This commit is contained in:
devdatt 2026-02-18 02:54:30 +05:30
parent 31eac1f7a1
commit 130dfcdf56
4 changed files with 285 additions and 103 deletions

View File

@ -1,14 +1,10 @@
<?php <?php
/* /*
Urmi you happy me happy licence Urmi you happy me happy licence
Copyright (c) 2026 shreebhattji Copyright (c) 2026 shreebhattji
License text: License text:
https://github.com/shreebhattji/Urmi/blob/main/licence.md https://github.com/shreebhattji/Urmi/blob/main/licence.md
*/ */
include 'header.php'; ?> include 'header.php'; ?>
<div class="containerindex"> <div class="containerindex">
@ -17,15 +13,23 @@ include 'header.php'; ?>
<h3>CPU (%)</h3> <h3>CPU (%)</h3>
<div class="chart-wrap"><canvas id="cpuChart"></canvas></div> <div class="chart-wrap"><canvas id="cpuChart"></canvas></div>
</div> </div>
<div class="card"> <div class="card">
<h3>RAM (%)</h3> <h3>RAM (%)</h3>
<div class="chart-wrap"><canvas id="ramChart"></canvas></div> <div class="chart-wrap"><canvas id="ramChart"></canvas></div>
</div> </div>
<div class="card wide">
<div class="card">
<h3>Intel iGPU (%)</h3>
<div class="chart-wrap"><canvas id="gpuChart"></canvas></div>
</div>
<div class="card">
<h3>Network (KB/s)</h3> <h3>Network (KB/s)</h3>
<div class="chart-wrap"><canvas id="netChart"></canvas></div> <div class="chart-wrap"><canvas id="netChart"></canvas></div>
</div> </div>
<div class="card wide">
<div class="card">
<h3>Disk I/O (KB/s) & Disk %</h3> <h3>Disk I/O (KB/s) & Disk %</h3>
<div class="chart-wrap"><canvas id="diskChart"></canvas></div> <div class="chart-wrap"><canvas id="diskChart"></canvas></div>
</div> </div>
@ -33,30 +37,30 @@ include 'header.php'; ?>
<div style="margin-top:12px; color:#9fb2d6; display:flex; justify-content:space-between;"> <div style="margin-top:12px; color:#9fb2d6; display:flex; justify-content:space-between;">
<div>Last update: <span id="lastUpdate"></span></div> <div>Last update: <span id="lastUpdate"></span></div>
<div>CPU: <span id="lastCpu"></span>% · RAM: <span id="lastRam"></span>% · In: <span id="lastIn"></span>KB/s · <div>
Out: <span id="lastOut"></span>KB/s</div> CPU: <span id="lastCpu"></span>% ·
RAM: <span id="lastRam"></span>% ·
GPU: <span id="lastGpu"></span>% ·
In: <span id="lastIn"></span>KB/s ·
Out: <span id="lastOut"></span>KB/s
</div>
</div> </div>
<br> <br><br><br><br>
<br>
<br>
<br>
</div> </div>
<script> <script>
const POLL_MS = 1000; const POLL_MS = 1000;
const JSON_URL = "metrics.json"; const JSON_URL = "metrics.json";
const toKB = v => Math.round(v / 1024);
function toKB(v) { /* CPU */
return Math.round(v / 1024); const cpuChart = new Chart(document.getElementById('cpuChart'), {
}
const cpuChart = new Chart(document.getElementById('cpuChart').getContext('2d'), {
type: 'line', type: 'line',
data: { data: {
labels: [], labels: [],
datasets: [{ datasets: [{
label: 'CPU %', label: 'CPU %',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
}] }]
}, },
@ -71,14 +75,15 @@ include 'header.php'; ?>
} }
} }
}); });
const ramChart = new Chart(document.getElementById('ramChart').getContext('2d'), {
/* RAM */
const ramChart = new Chart(document.getElementById('ramChart'), {
type: 'line', type: 'line',
data: { data: {
labels: [], labels: [],
datasets: [{ datasets: [{
label: 'RAM %', label: 'RAM %',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
}] }]
}, },
@ -93,20 +98,43 @@ include 'header.php'; ?>
} }
} }
}); });
const netChart = new Chart(document.getElementById('netChart').getContext('2d'), {
/* GPU */
const gpuChart = new Chart(document.getElementById('gpuChart'), {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'iGPU %',
data: [],
tension: 0.2
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
scales: {
y: {
min: 0,
max: 100
}
}
}
});
/* Network */
const netChart = new Chart(document.getElementById('netChart'), {
type: 'line', type: 'line',
data: { data: {
labels: [], labels: [],
datasets: [{ datasets: [{
label: 'Net In (KB/s)', label: 'Net In (KB/s)',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
}, },
{ {
label: 'Net Out (KB/s)', label: 'Net Out (KB/s)',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
} }
] ]
@ -121,27 +149,26 @@ include 'header.php'; ?>
} }
} }
}); });
const diskChart = new Chart(document.getElementById('diskChart').getContext('2d'), {
/* Disk */
const diskChart = new Chart(document.getElementById('diskChart'), {
type: 'line', type: 'line',
data: { data: {
labels: [], labels: [],
datasets: [{ datasets: [{
label: 'Disk Read (KB/s)', label: 'Disk Read (KB/s)',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
}, },
{ {
label: 'Disk Write (KB/s)', label: 'Disk Write (KB/s)',
data: [], data: [],
fill: false,
tension: 0.2 tension: 0.2
}, },
{ {
label: 'Disk %', label: 'Disk %',
data: [], data: [],
yAxisID: 'percent', yAxisID: 'percent',
fill: false,
tension: 0.2 tension: 0.2
} }
] ]
@ -174,16 +201,20 @@ include 'header.php'; ?>
const res = await fetch(JSON_URL + "?_=" + Date.now(), { const res = await fetch(JSON_URL + "?_=" + Date.now(), {
cache: 'no-store' cache: 'no-store'
}); });
if (!res.ok) throw new Error('fetch fail ' + res.status); if (!res.ok) throw new Error(res.status);
const j = await res.json(); const j = await res.json();
const labels = j.timestamps.map(t => new Date(t).toLocaleTimeString()); const labels = j.timestamps.map(t => new Date(t).toLocaleTimeString());
cpuChart.data.labels = labels; cpuChart.data.labels = labels;
cpuChart.data.datasets[0].data = j.cpu_percent; cpuChart.data.datasets[0].data = j.cpu_percent;
ramChart.data.labels = labels; ramChart.data.labels = labels;
ramChart.data.datasets[0].data = j.ram_percent; ramChart.data.datasets[0].data = j.ram_percent;
gpuChart.data.labels = labels;
gpuChart.data.datasets[0].data = j.igpu_percent;
netChart.data.labels = labels; netChart.data.labels = labels;
netChart.data.datasets[0].data = j.net_in_Bps.map(toKB); netChart.data.datasets[0].data = j.net_in_Bps.map(toKB);
netChart.data.datasets[1].data = j.net_out_Bps.map(toKB); netChart.data.datasets[1].data = j.net_out_Bps.map(toKB);
@ -195,23 +226,25 @@ include 'header.php'; ?>
cpuChart.update(); cpuChart.update();
ramChart.update(); ramChart.update();
gpuChart.update();
netChart.update(); netChart.update();
diskChart.update(); diskChart.update();
const last = labels.length - 1; const last = labels.length - 1;
if (last >= 0) { if (last >= 0) {
document.getElementById('lastUpdate').textContent = labels[last]; lastUpdate.textContent = labels[last];
document.getElementById('lastCpu').textContent = j.cpu_percent[last]; lastCpu.textContent = j.cpu_percent[last];
document.getElementById('lastRam').textContent = j.ram_percent[last]; lastRam.textContent = j.ram_percent[last];
document.getElementById('lastIn').textContent = toKB(j.net_in_Bps[last]); lastGpu.textContent = j.igpu_percent[last];
document.getElementById('lastOut').textContent = toKB(j.net_out_Bps[last]); lastIn.textContent = toKB(j.net_in_Bps[last]);
lastOut.textContent = toKB(j.net_out_Bps[last]);
} }
} catch (e) { } catch (e) {
console.error('update failed', e); console.error(e);
} }
} }
setInterval(update, POLL_MS); setInterval(update, POLL_MS);
update(); update();
</script> </script>
<?php include 'footer.php'; ?> <?php include 'footer.php'; ?>

View File

@ -745,7 +745,7 @@ function update_service($which_service)
case 'rtmp1'; case 'rtmp1';
update_service_backend("rtmp", "", ""); update_service_backend("rtmp", "", "");
if ($service_rtmp0_multiple === "enable") { if ($service_rtmp0_multiple === "enable") {
$rtmp = "ffmpeg -hwaccel auto -hide_banner -fflags nobuffer -analyzeduration 3000000 -i "; $rtmp = "ffmpeg -hide_banner -fflags nobuffer -analyzeduration 3000000 -i ";
if ($use_common_backend === "transcode_every_time") { if ($use_common_backend === "transcode_every_time") {
$rtmp .= $input_transcode_every_time; $rtmp .= $input_transcode_every_time;
} else { } else {

View File

@ -247,106 +247,126 @@ EOL
cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL' cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
#!/usr/bin/env python3 #!/usr/bin/env python3
""" import time, json, os, subprocess
Lightweight sampler for nginx static frontend.
"""
import time, json, os
from collections import deque from collections import deque
from datetime import datetime from datetime import datetime
import psutil import psutil
OUT_FILE = "/var/www/encoder/metrics.json" OUT_FILE="/var/www/encoder/metrics.json"
TMP_FILE = OUT_FILE + ".tmp" TMP_FILE=OUT_FILE+".tmp"
SAMPLE_INTERVAL = 10.0 # seconds between samples SAMPLE_INTERVAL=10.0
HISTORY_SECONDS = 15 * 60 # 15 minutes HISTORY_SECONDS=15*60
MAX_SAMPLES = int(HISTORY_SECONDS / SAMPLE_INTERVAL) MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
# circular buffers timestamps=deque(maxlen=MAX_SAMPLES)
timestamps = deque(maxlen=MAX_SAMPLES) cpu_hist=deque(maxlen=MAX_SAMPLES)
cpu_hist = deque(maxlen=MAX_SAMPLES) ram_hist=deque(maxlen=MAX_SAMPLES)
ram_hist = deque(maxlen=MAX_SAMPLES) gpu_hist=deque(maxlen=MAX_SAMPLES)
net_in_hist = deque(maxlen=MAX_SAMPLES) net_in_hist=deque(maxlen=MAX_SAMPLES)
net_out_hist = deque(maxlen=MAX_SAMPLES) net_out_hist=deque(maxlen=MAX_SAMPLES)
disk_read_hist = deque(maxlen=MAX_SAMPLES) disk_read_hist=deque(maxlen=MAX_SAMPLES)
disk_write_hist = deque(maxlen=MAX_SAMPLES) disk_write_hist=deque(maxlen=MAX_SAMPLES)
disk_percent_hist = deque(maxlen=MAX_SAMPLES) disk_percent_hist=deque(maxlen=MAX_SAMPLES)
_prev_net = psutil.net_io_counters() _prev_net=psutil.net_io_counters()
_prev_disk = psutil.disk_io_counters() _prev_disk=psutil.disk_io_counters()
_prev_time = time.time() _prev_time=time.time()
def igpu_percent():
# fastest method (modern kernels)
for card in ("card0","card1","card2"):
p=f"/sys/class/drm/{card}/gt_busy_percent"
if os.path.exists(p):
try:
return float(open(p).read().strip())
except:
pass
# fallback: intel_gpu_top JSON snapshot
try:
out=subprocess.check_output(
["intel_gpu_top","-J","-s","100","-o","-"],
stderr=subprocess.DEVNULL,
timeout=1
)
j=json.loads(out.splitlines()[0])
return float(j["engines"]["Render/3D/0"]["busy"])
except:
return 0.0
def sample_once(): def sample_once():
global _prev_net, _prev_disk, _prev_time global _prev_net,_prev_disk,_prev_time
now = time.time() now=time.time()
iso = datetime.fromtimestamp(now).isoformat(timespec='seconds') iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
cpu = psutil.cpu_percent(interval=None)
ram = psutil.virtual_memory().percent cpu=psutil.cpu_percent(interval=None)
ram=psutil.virtual_memory().percent
gpu=igpu_percent()
net=psutil.net_io_counters()
disk=psutil.disk_io_counters()
net = psutil.net_io_counters()
disk = psutil.disk_io_counters()
try: try:
disk_percent = psutil.disk_usage("/").percent disk_percent=psutil.disk_usage("/").percent
except Exception: except:
disk_percent = 0.0 disk_percent=0.0
elapsed = now - _prev_time if _prev_time else SAMPLE_INTERVAL elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
if elapsed <= 0: if elapsed<=0: elapsed=SAMPLE_INTERVAL
elapsed = SAMPLE_INTERVAL
in_rate = int(((net.bytes_recv - _prev_net.bytes_recv) / elapsed) * 8) in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
out_rate = int(((net.bytes_sent - _prev_net.bytes_sent) / elapsed) * 8) out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
read_rate = (disk.read_bytes - _prev_disk.read_bytes) / elapsed read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
write_rate = (disk.write_bytes - _prev_disk.write_bytes) / elapsed write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
timestamps.append(iso) timestamps.append(iso)
cpu_hist.append(round(cpu, 2)) cpu_hist.append(round(cpu,2))
ram_hist.append(round(ram, 2)) ram_hist.append(round(ram,2))
gpu_hist.append(round(gpu,2))
net_in_hist.append(int(in_rate)) net_in_hist.append(int(in_rate))
net_out_hist.append(int(out_rate)) net_out_hist.append(int(out_rate))
disk_read_hist.append(int(read_rate)) disk_read_hist.append(int(read_rate))
disk_write_hist.append(int(write_rate)) disk_write_hist.append(int(write_rate))
disk_percent_hist.append(round(disk_percent, 2)) disk_percent_hist.append(round(disk_percent,2))
_prev_net = net _prev_net=net
_prev_disk = disk _prev_disk=disk
_prev_time = now _prev_time=now
def write_json_atomic(): def write_json_atomic():
payload = { payload={
"timestamps": list(timestamps), "timestamps":list(timestamps),
"cpu_percent": list(cpu_hist), "cpu_percent":list(cpu_hist),
"ram_percent": list(ram_hist), "ram_percent":list(ram_hist),
"net_in_Bps": list(net_in_hist), "igpu_percent":list(gpu_hist),
"net_out_Bps": list(net_out_hist), "net_in_Bps":list(net_in_hist),
"disk_read_Bps": list(disk_read_hist), "net_out_Bps":list(net_out_hist),
"disk_write_Bps": list(disk_write_hist), "disk_read_Bps":list(disk_read_hist),
"disk_percent": list(disk_percent_hist), "disk_write_Bps":list(disk_write_hist),
"sample_interval": SAMPLE_INTERVAL, "disk_percent":list(disk_percent_hist),
"generated_at": datetime.utcnow().isoformat(timespec='seconds') + "Z" "sample_interval":SAMPLE_INTERVAL,
"generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
} }
with open(TMP_FILE, "w") as f: with open(TMP_FILE,"w") as f: json.dump(payload,f)
json.dump(payload, f) os.replace(TMP_FILE,OUT_FILE)
os.replace(TMP_FILE, OUT_FILE)
def main(): def main():
global _prev_net, _prev_disk, _prev_time global _prev_net,_prev_disk,_prev_time
_prev_net = psutil.net_io_counters() _prev_net=psutil.net_io_counters()
_prev_disk = psutil.disk_io_counters() _prev_disk=psutil.disk_io_counters()
_prev_time = time.time() _prev_time=time.time()
time.sleep(0.2) # warm-up time.sleep(0.2)
while True: while True:
try: try:
sample_once() sample_once()
write_json_atomic() write_json_atomic()
except Exception as e: except Exception as e:
# systemd journal will capture prints print("Sampler error:",e)
print("Sampler error:", e)
time.sleep(SAMPLE_INTERVAL) time.sleep(SAMPLE_INTERVAL)
if __name__ == "__main__": if __name__=="__main__":
main() main()
EOL EOL

129
setup.sh
View File

@ -174,4 +174,133 @@ done
# Validate # Validate
mount -a mount -a
cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
#!/usr/bin/env python3
import time, json, os, subprocess
from collections import deque
from datetime import datetime
import psutil
OUT_FILE="/var/www/encoder/metrics.json"
TMP_FILE=OUT_FILE+".tmp"
SAMPLE_INTERVAL=10.0
HISTORY_SECONDS=15*60
MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
timestamps=deque(maxlen=MAX_SAMPLES)
cpu_hist=deque(maxlen=MAX_SAMPLES)
ram_hist=deque(maxlen=MAX_SAMPLES)
gpu_hist=deque(maxlen=MAX_SAMPLES)
net_in_hist=deque(maxlen=MAX_SAMPLES)
net_out_hist=deque(maxlen=MAX_SAMPLES)
disk_read_hist=deque(maxlen=MAX_SAMPLES)
disk_write_hist=deque(maxlen=MAX_SAMPLES)
disk_percent_hist=deque(maxlen=MAX_SAMPLES)
_prev_net=psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
def igpu_percent():
# fastest method (modern kernels)
for card in ("card0","card1","card2"):
p=f"/sys/class/drm/{card}/gt_busy_percent"
if os.path.exists(p):
try:
return float(open(p).read().strip())
except:
pass
# fallback: intel_gpu_top JSON snapshot
try:
out=subprocess.check_output(
["intel_gpu_top","-J","-s","100","-o","-"],
stderr=subprocess.DEVNULL,
timeout=1
)
j=json.loads(out.splitlines()[0])
return float(j["engines"]["Render/3D/0"]["busy"])
except:
return 0.0
def sample_once():
global _prev_net,_prev_disk,_prev_time
now=time.time()
iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
cpu=psutil.cpu_percent(interval=None)
ram=psutil.virtual_memory().percent
gpu=igpu_percent()
net=psutil.net_io_counters()
disk=psutil.disk_io_counters()
try:
disk_percent=psutil.disk_usage("/").percent
except:
disk_percent=0.0
elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
if elapsed<=0: elapsed=SAMPLE_INTERVAL
in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
timestamps.append(iso)
cpu_hist.append(round(cpu,2))
ram_hist.append(round(ram,2))
gpu_hist.append(round(gpu,2))
net_in_hist.append(int(in_rate))
net_out_hist.append(int(out_rate))
disk_read_hist.append(int(read_rate))
disk_write_hist.append(int(write_rate))
disk_percent_hist.append(round(disk_percent,2))
_prev_net=net
_prev_disk=disk
_prev_time=now
def write_json_atomic():
payload={
"timestamps":list(timestamps),
"cpu_percent":list(cpu_hist),
"ram_percent":list(ram_hist),
"igpu_percent":list(gpu_hist),
"net_in_Bps":list(net_in_hist),
"net_out_Bps":list(net_out_hist),
"disk_read_Bps":list(disk_read_hist),
"disk_write_Bps":list(disk_write_hist),
"disk_percent":list(disk_percent_hist),
"sample_interval":SAMPLE_INTERVAL,
"generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
}
with open(TMP_FILE,"w") as f: json.dump(payload,f)
os.replace(TMP_FILE,OUT_FILE)
def main():
global _prev_net,_prev_disk,_prev_time
_prev_net=psutil.net_io_counters()
_prev_disk=psutil.disk_io_counters()
_prev_time=time.time()
time.sleep(0.2)
while True:
try:
sample_once()
write_json_atomic()
except Exception as e:
print("Sampler error:",e)
time.sleep(SAMPLE_INTERVAL)
if __name__=="__main__":
main()
EOL
sudo systemctl enable --now system-monitor.service
sudo systemctl restart system-monitor.service --no-pager
sudo reboot sudo reboot