This commit is contained in:
devdatt 2026-02-18 02:54:30 +05:30
parent 31eac1f7a1
commit 130dfcdf56
4 changed files with 285 additions and 103 deletions

View File

@ -1,14 +1,10 @@
<?php
/*
Urmi you happy me happy licence
Copyright (c) 2026 shreebhattji
License text:
https://github.com/shreebhattji/Urmi/blob/main/licence.md
*/
include 'header.php'; ?>
<div class="containerindex">
@ -17,15 +13,23 @@ include 'header.php'; ?>
<h3>CPU (%)</h3>
<div class="chart-wrap"><canvas id="cpuChart"></canvas></div>
</div>
<div class="card">
<h3>RAM (%)</h3>
<div class="chart-wrap"><canvas id="ramChart"></canvas></div>
</div>
<div class="card wide">
<div class="card">
<h3>Intel iGPU (%)</h3>
<div class="chart-wrap"><canvas id="gpuChart"></canvas></div>
</div>
<div class="card">
<h3>Network (KB/s)</h3>
<div class="chart-wrap"><canvas id="netChart"></canvas></div>
</div>
<div class="card wide">
<div class="card">
<h3>Disk I/O (KB/s) & Disk %</h3>
<div class="chart-wrap"><canvas id="diskChart"></canvas></div>
</div>
@ -33,30 +37,30 @@ include 'header.php'; ?>
<div style="margin-top:12px; color:#9fb2d6; display:flex; justify-content:space-between;">
<div>Last update: <span id="lastUpdate"></span></div>
<div>CPU: <span id="lastCpu"></span>% · RAM: <span id="lastRam"></span>% · In: <span id="lastIn"></span>KB/s ·
Out: <span id="lastOut"></span>KB/s</div>
<div>
CPU: <span id="lastCpu"></span>% ·
RAM: <span id="lastRam"></span>% ·
GPU: <span id="lastGpu"></span>% ·
In: <span id="lastIn"></span>KB/s ·
Out: <span id="lastOut"></span>KB/s
</div>
<br>
<br>
<br>
<br>
</div>
<br><br><br><br>
</div>
<script>
const POLL_MS = 1000;
const JSON_URL = "metrics.json";
const toKB = v => Math.round(v / 1024);
function toKB(v) {
return Math.round(v / 1024);
}
const cpuChart = new Chart(document.getElementById('cpuChart').getContext('2d'), {
/* CPU */
const cpuChart = new Chart(document.getElementById('cpuChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'CPU %',
data: [],
fill: false,
tension: 0.2
}]
},
@ -71,14 +75,15 @@ include 'header.php'; ?>
}
}
});
const ramChart = new Chart(document.getElementById('ramChart').getContext('2d'), {
/* RAM */
const ramChart = new Chart(document.getElementById('ramChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'RAM %',
data: [],
fill: false,
tension: 0.2
}]
},
@ -93,20 +98,43 @@ include 'header.php'; ?>
}
}
});
const netChart = new Chart(document.getElementById('netChart').getContext('2d'), {
/* GPU */
const gpuChart = new Chart(document.getElementById('gpuChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'iGPU %',
data: [],
tension: 0.2
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
scales: {
y: {
min: 0,
max: 100
}
}
}
});
/* Network */
const netChart = new Chart(document.getElementById('netChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'Net In (KB/s)',
data: [],
fill: false,
tension: 0.2
},
{
label: 'Net Out (KB/s)',
data: [],
fill: false,
tension: 0.2
}
]
@ -121,27 +149,26 @@ include 'header.php'; ?>
}
}
});
const diskChart = new Chart(document.getElementById('diskChart').getContext('2d'), {
/* Disk */
const diskChart = new Chart(document.getElementById('diskChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'Disk Read (KB/s)',
data: [],
fill: false,
tension: 0.2
},
{
label: 'Disk Write (KB/s)',
data: [],
fill: false,
tension: 0.2
},
{
label: 'Disk %',
data: [],
yAxisID: 'percent',
fill: false,
tension: 0.2
}
]
@ -174,16 +201,20 @@ include 'header.php'; ?>
const res = await fetch(JSON_URL + "?_=" + Date.now(), {
cache: 'no-store'
});
if (!res.ok) throw new Error('fetch fail ' + res.status);
if (!res.ok) throw new Error(res.status);
const j = await res.json();
const labels = j.timestamps.map(t => new Date(t).toLocaleTimeString());
cpuChart.data.labels = labels;
cpuChart.data.datasets[0].data = j.cpu_percent;
ramChart.data.labels = labels;
ramChart.data.datasets[0].data = j.ram_percent;
gpuChart.data.labels = labels;
gpuChart.data.datasets[0].data = j.igpu_percent;
netChart.data.labels = labels;
netChart.data.datasets[0].data = j.net_in_Bps.map(toKB);
netChart.data.datasets[1].data = j.net_out_Bps.map(toKB);
@ -195,23 +226,25 @@ include 'header.php'; ?>
cpuChart.update();
ramChart.update();
gpuChart.update();
netChart.update();
diskChart.update();
const last = labels.length - 1;
if (last >= 0) {
document.getElementById('lastUpdate').textContent = labels[last];
document.getElementById('lastCpu').textContent = j.cpu_percent[last];
document.getElementById('lastRam').textContent = j.ram_percent[last];
document.getElementById('lastIn').textContent = toKB(j.net_in_Bps[last]);
document.getElementById('lastOut').textContent = toKB(j.net_out_Bps[last]);
lastUpdate.textContent = labels[last];
lastCpu.textContent = j.cpu_percent[last];
lastRam.textContent = j.ram_percent[last];
lastGpu.textContent = j.igpu_percent[last];
lastIn.textContent = toKB(j.net_in_Bps[last]);
lastOut.textContent = toKB(j.net_out_Bps[last]);
}
} catch (e) {
console.error('update failed', e);
console.error(e);
}
}
setInterval(update, POLL_MS);
update();
</script>
<?php include 'footer.php'; ?>

View File

@ -745,7 +745,7 @@ function update_service($which_service)
case 'rtmp1';
update_service_backend("rtmp", "", "");
if ($service_rtmp0_multiple === "enable") {
$rtmp = "ffmpeg -hwaccel auto -hide_banner -fflags nobuffer -analyzeduration 3000000 -i ";
$rtmp = "ffmpeg -hide_banner -fflags nobuffer -analyzeduration 3000000 -i ";
if ($use_common_backend === "transcode_every_time") {
$rtmp .= $input_transcode_every_time;
} else {

View File

@ -247,25 +247,21 @@ EOL
cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
#!/usr/bin/env python3
"""
Lightweight sampler for nginx static frontend.
"""
import time, json, os
import time, json, os, subprocess
from collections import deque
from datetime import datetime
import psutil
OUT_FILE="/var/www/encoder/metrics.json"
TMP_FILE=OUT_FILE+".tmp"
SAMPLE_INTERVAL = 10.0 # seconds between samples
HISTORY_SECONDS = 15 * 60 # 15 minutes
SAMPLE_INTERVAL=10.0
HISTORY_SECONDS=15*60
MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
# circular buffers
timestamps=deque(maxlen=MAX_SAMPLES)
cpu_hist=deque(maxlen=MAX_SAMPLES)
ram_hist=deque(maxlen=MAX_SAMPLES)
gpu_hist=deque(maxlen=MAX_SAMPLES)
net_in_hist=deque(maxlen=MAX_SAMPLES)
net_out_hist=deque(maxlen=MAX_SAMPLES)
disk_read_hist=deque(maxlen=MAX_SAMPLES)
@ -276,26 +272,50 @@ _prev_net = psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
def igpu_percent():
# fastest method (modern kernels)
for card in ("card0","card1","card2"):
p=f"/sys/class/drm/{card}/gt_busy_percent"
if os.path.exists(p):
try:
return float(open(p).read().strip())
except:
pass
# fallback: intel_gpu_top JSON snapshot
try:
out=subprocess.check_output(
["intel_gpu_top","-J","-s","100","-o","-"],
stderr=subprocess.DEVNULL,
timeout=1
)
j=json.loads(out.splitlines()[0])
return float(j["engines"]["Render/3D/0"]["busy"])
except:
return 0.0
def sample_once():
global _prev_net,_prev_disk,_prev_time
now=time.time()
iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
cpu=psutil.cpu_percent(interval=None)
ram=psutil.virtual_memory().percent
gpu=igpu_percent()
net=psutil.net_io_counters()
disk=psutil.disk_io_counters()
try:
disk_percent=psutil.disk_usage("/").percent
except Exception:
except:
disk_percent=0.0
elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
if elapsed <= 0:
elapsed = SAMPLE_INTERVAL
if elapsed<=0: elapsed=SAMPLE_INTERVAL
in_rate = int(((net.bytes_recv - _prev_net.bytes_recv) / elapsed) * 8)
out_rate = int(((net.bytes_sent - _prev_net.bytes_sent) / elapsed) * 8)
in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
@ -303,6 +323,7 @@ def sample_once():
timestamps.append(iso)
cpu_hist.append(round(cpu,2))
ram_hist.append(round(ram,2))
gpu_hist.append(round(gpu,2))
net_in_hist.append(int(in_rate))
net_out_hist.append(int(out_rate))
disk_read_hist.append(int(read_rate))
@ -318,6 +339,7 @@ def write_json_atomic():
"timestamps":list(timestamps),
"cpu_percent":list(cpu_hist),
"ram_percent":list(ram_hist),
"igpu_percent":list(gpu_hist),
"net_in_Bps":list(net_in_hist),
"net_out_Bps":list(net_out_hist),
"disk_read_Bps":list(disk_read_hist),
@ -326,8 +348,7 @@ def write_json_atomic():
"sample_interval":SAMPLE_INTERVAL,
"generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
}
with open(TMP_FILE, "w") as f:
json.dump(payload, f)
with open(TMP_FILE,"w") as f: json.dump(payload,f)
os.replace(TMP_FILE,OUT_FILE)
def main():
@ -335,14 +356,13 @@ def main():
_prev_net=psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
time.sleep(0.2) # warm-up
time.sleep(0.2)
while True:
try:
sample_once()
write_json_atomic()
except Exception as e:
# systemd journal will capture prints
print("Sampler error:",e)
time.sleep(SAMPLE_INTERVAL)

129
setup.sh
View File

@ -174,4 +174,133 @@ done
# Validate
mount -a
cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
#!/usr/bin/env python3
import time, json, os, subprocess
from collections import deque
from datetime import datetime
import psutil
OUT_FILE="/var/www/encoder/metrics.json"
TMP_FILE=OUT_FILE+".tmp"
SAMPLE_INTERVAL=10.0
HISTORY_SECONDS=15*60
MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
timestamps=deque(maxlen=MAX_SAMPLES)
cpu_hist=deque(maxlen=MAX_SAMPLES)
ram_hist=deque(maxlen=MAX_SAMPLES)
gpu_hist=deque(maxlen=MAX_SAMPLES)
net_in_hist=deque(maxlen=MAX_SAMPLES)
net_out_hist=deque(maxlen=MAX_SAMPLES)
disk_read_hist=deque(maxlen=MAX_SAMPLES)
disk_write_hist=deque(maxlen=MAX_SAMPLES)
disk_percent_hist=deque(maxlen=MAX_SAMPLES)
_prev_net=psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
def igpu_percent():
# fastest method (modern kernels)
for card in ("card0","card1","card2"):
p=f"/sys/class/drm/{card}/gt_busy_percent"
if os.path.exists(p):
try:
return float(open(p).read().strip())
except:
pass
# fallback: intel_gpu_top JSON snapshot
try:
out=subprocess.check_output(
["intel_gpu_top","-J","-s","100","-o","-"],
stderr=subprocess.DEVNULL,
timeout=1
)
j=json.loads(out.splitlines()[0])
return float(j["engines"]["Render/3D/0"]["busy"])
except:
return 0.0
def sample_once():
global _prev_net,_prev_disk,_prev_time
now=time.time()
iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
cpu=psutil.cpu_percent(interval=None)
ram=psutil.virtual_memory().percent
gpu=igpu_percent()
net=psutil.net_io_counters()
disk=psutil.disk_io_counters()
try:
disk_percent=psutil.disk_usage("/").percent
except:
disk_percent=0.0
elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
if elapsed<=0: elapsed=SAMPLE_INTERVAL
in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
timestamps.append(iso)
cpu_hist.append(round(cpu,2))
ram_hist.append(round(ram,2))
gpu_hist.append(round(gpu,2))
net_in_hist.append(int(in_rate))
net_out_hist.append(int(out_rate))
disk_read_hist.append(int(read_rate))
disk_write_hist.append(int(write_rate))
disk_percent_hist.append(round(disk_percent,2))
_prev_net=net
_prev_disk=disk
_prev_time=now
def write_json_atomic():
payload={
"timestamps":list(timestamps),
"cpu_percent":list(cpu_hist),
"ram_percent":list(ram_hist),
"igpu_percent":list(gpu_hist),
"net_in_Bps":list(net_in_hist),
"net_out_Bps":list(net_out_hist),
"disk_read_Bps":list(disk_read_hist),
"disk_write_Bps":list(disk_write_hist),
"disk_percent":list(disk_percent_hist),
"sample_interval":SAMPLE_INTERVAL,
"generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
}
with open(TMP_FILE,"w") as f: json.dump(payload,f)
os.replace(TMP_FILE,OUT_FILE)
def main():
global _prev_net,_prev_disk,_prev_time
_prev_net=psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
time.sleep(0.2)
while True:
try:
sample_once()
write_json_atomic()
except Exception as e:
print("Sampler error:",e)
time.sleep(SAMPLE_INTERVAL)
if __name__=="__main__":
main()
EOL
sudo systemctl enable --now system-monitor.service
sudo systemctl restart system-monitor.service --no-pager
sudo reboot