diff --git a/encoder/index.php b/encoder/index.php
index df279e1..129037c 100755
--- a/encoder/index.php
+++ b/encoder/index.php
@@ -1,14 +1,10 @@
+
+
+
+
-
+
+
@@ -33,30 +37,30 @@ include 'header.php'; ?>
Last update: —
-
CPU: —% · RAM: —% · In: —KB/s ·
- Out: —KB/s
+
+ CPU: —% ·
+ RAM: —% ·
+ GPU: —% ·
+ In: —KB/s ·
+ Out: —KB/s
+
-
-
-
-
+
+
+
\ No newline at end of file
diff --git a/encoder/static.php b/encoder/static.php
index 38932a9..3b08ead 100755
--- a/encoder/static.php
+++ b/encoder/static.php
@@ -745,7 +745,7 @@ function update_service($which_service)
case 'rtmp1';
update_service_backend("rtmp", "", "");
if ($service_rtmp0_multiple === "enable") {
- $rtmp = "ffmpeg -hwaccel auto -hide_banner -fflags nobuffer -analyzeduration 3000000 -i ";
+ $rtmp = "ffmpeg -hide_banner -fflags nobuffer -analyzeduration 3000000 -i ";
if ($use_common_backend === "transcode_every_time") {
$rtmp .= $input_transcode_every_time;
} else {
diff --git a/install.sh b/install.sh
index 8d6dffc..b9d1eed 100755
--- a/install.sh
+++ b/install.sh
@@ -247,106 +247,126 @@ EOL
cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
#!/usr/bin/env python3
-"""
-Lightweight sampler for nginx static frontend.
-"""
-
-import time, json, os
+import time, json, os, subprocess
from collections import deque
from datetime import datetime
import psutil
-OUT_FILE = "/var/www/encoder/metrics.json"
-TMP_FILE = OUT_FILE + ".tmp"
-SAMPLE_INTERVAL = 10.0 # seconds between samples
-HISTORY_SECONDS = 15 * 60 # 15 minutes
-MAX_SAMPLES = int(HISTORY_SECONDS / SAMPLE_INTERVAL)
+OUT_FILE="/var/www/encoder/metrics.json"
+TMP_FILE=OUT_FILE+".tmp"
+SAMPLE_INTERVAL=10.0
+HISTORY_SECONDS=15*60
+MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
-# circular buffers
-timestamps = deque(maxlen=MAX_SAMPLES)
-cpu_hist = deque(maxlen=MAX_SAMPLES)
-ram_hist = deque(maxlen=MAX_SAMPLES)
-net_in_hist = deque(maxlen=MAX_SAMPLES)
-net_out_hist = deque(maxlen=MAX_SAMPLES)
-disk_read_hist = deque(maxlen=MAX_SAMPLES)
-disk_write_hist = deque(maxlen=MAX_SAMPLES)
-disk_percent_hist = deque(maxlen=MAX_SAMPLES)
+timestamps=deque(maxlen=MAX_SAMPLES)
+cpu_hist=deque(maxlen=MAX_SAMPLES)
+ram_hist=deque(maxlen=MAX_SAMPLES)
+gpu_hist=deque(maxlen=MAX_SAMPLES)
+net_in_hist=deque(maxlen=MAX_SAMPLES)
+net_out_hist=deque(maxlen=MAX_SAMPLES)
+disk_read_hist=deque(maxlen=MAX_SAMPLES)
+disk_write_hist=deque(maxlen=MAX_SAMPLES)
+disk_percent_hist=deque(maxlen=MAX_SAMPLES)
-_prev_net = psutil.net_io_counters()
-_prev_disk = psutil.disk_io_counters()
-_prev_time = time.time()
+_prev_net=psutil.net_io_counters()
+_prev_disk=psutil.disk_io_counters()
+_prev_time=time.time()
+
+def igpu_percent():
+ # fastest method (modern kernels)
+ for card in ("card0","card1","card2"):
+ p=f"/sys/class/drm/{card}/gt_busy_percent"
+ if os.path.exists(p):
+ try:
+ return float(open(p).read().strip())
+ except:
+ pass
+
+ # fallback: intel_gpu_top JSON snapshot
+ try:
+ out=subprocess.check_output(
+ ["intel_gpu_top","-J","-s","100","-o","-"],
+ stderr=subprocess.DEVNULL,
+ timeout=1
+ )
+ j=json.loads(out.splitlines()[0])
+ return float(j["engines"]["Render/3D/0"]["busy"])
+ except:
+ return 0.0
def sample_once():
- global _prev_net, _prev_disk, _prev_time
- now = time.time()
- iso = datetime.fromtimestamp(now).isoformat(timespec='seconds')
- cpu = psutil.cpu_percent(interval=None)
- ram = psutil.virtual_memory().percent
+ global _prev_net,_prev_disk,_prev_time
+ now=time.time()
+ iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
+
+ cpu=psutil.cpu_percent(interval=None)
+ ram=psutil.virtual_memory().percent
+ gpu=igpu_percent()
+
+ net=psutil.net_io_counters()
+ disk=psutil.disk_io_counters()
- net = psutil.net_io_counters()
- disk = psutil.disk_io_counters()
try:
- disk_percent = psutil.disk_usage("/").percent
- except Exception:
- disk_percent = 0.0
+ disk_percent=psutil.disk_usage("/").percent
+ except:
+ disk_percent=0.0
- elapsed = now - _prev_time if _prev_time else SAMPLE_INTERVAL
- if elapsed <= 0:
- elapsed = SAMPLE_INTERVAL
+ elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
+ if elapsed<=0: elapsed=SAMPLE_INTERVAL
- in_rate = int(((net.bytes_recv - _prev_net.bytes_recv) / elapsed) * 8)
- out_rate = int(((net.bytes_sent - _prev_net.bytes_sent) / elapsed) * 8)
+ in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
+ out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
- read_rate = (disk.read_bytes - _prev_disk.read_bytes) / elapsed
- write_rate = (disk.write_bytes - _prev_disk.write_bytes) / elapsed
+ read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
+ write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
timestamps.append(iso)
- cpu_hist.append(round(cpu, 2))
- ram_hist.append(round(ram, 2))
+ cpu_hist.append(round(cpu,2))
+ ram_hist.append(round(ram,2))
+ gpu_hist.append(round(gpu,2))
net_in_hist.append(int(in_rate))
net_out_hist.append(int(out_rate))
disk_read_hist.append(int(read_rate))
disk_write_hist.append(int(write_rate))
- disk_percent_hist.append(round(disk_percent, 2))
+ disk_percent_hist.append(round(disk_percent,2))
- _prev_net = net
- _prev_disk = disk
- _prev_time = now
+ _prev_net=net
+ _prev_disk=disk
+ _prev_time=now
def write_json_atomic():
- payload = {
- "timestamps": list(timestamps),
- "cpu_percent": list(cpu_hist),
- "ram_percent": list(ram_hist),
- "net_in_Bps": list(net_in_hist),
- "net_out_Bps": list(net_out_hist),
- "disk_read_Bps": list(disk_read_hist),
- "disk_write_Bps": list(disk_write_hist),
- "disk_percent": list(disk_percent_hist),
- "sample_interval": SAMPLE_INTERVAL,
- "generated_at": datetime.utcnow().isoformat(timespec='seconds') + "Z"
+ payload={
+ "timestamps":list(timestamps),
+ "cpu_percent":list(cpu_hist),
+ "ram_percent":list(ram_hist),
+ "igpu_percent":list(gpu_hist),
+ "net_in_Bps":list(net_in_hist),
+ "net_out_Bps":list(net_out_hist),
+ "disk_read_Bps":list(disk_read_hist),
+ "disk_write_Bps":list(disk_write_hist),
+ "disk_percent":list(disk_percent_hist),
+ "sample_interval":SAMPLE_INTERVAL,
+ "generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
}
- with open(TMP_FILE, "w") as f:
- json.dump(payload, f)
- os.replace(TMP_FILE, OUT_FILE)
+ with open(TMP_FILE,"w") as f: json.dump(payload,f)
+ os.replace(TMP_FILE,OUT_FILE)
def main():
- global _prev_net, _prev_disk, _prev_time
- _prev_net = psutil.net_io_counters()
- _prev_disk = psutil.disk_io_counters()
- _prev_time = time.time()
- time.sleep(0.2) # warm-up
+ global _prev_net,_prev_disk,_prev_time
+ _prev_net=psutil.net_io_counters()
+ _prev_disk=psutil.disk_io_counters()
+ _prev_time=time.time()
+ time.sleep(0.2)
while True:
try:
sample_once()
write_json_atomic()
except Exception as e:
- # systemd journal will capture prints
- print("Sampler error:", e)
+ print("Sampler error:",e)
time.sleep(SAMPLE_INTERVAL)
-if __name__ == "__main__":
+if __name__=="__main__":
main()
EOL
diff --git a/setup.sh b/setup.sh
index 0e57fbc..b88e0ba 100644
--- a/setup.sh
+++ b/setup.sh
@@ -174,4 +174,133 @@ done
# Validate
mount -a
+cat > /usr/local/bin/nginx_system_monitor_sampler.py<< 'EOL'
+#!/usr/bin/env python3
+import time, json, os, subprocess
+from collections import deque
+from datetime import datetime
+import psutil
+
+OUT_FILE="/var/www/encoder/metrics.json"
+TMP_FILE=OUT_FILE+".tmp"
+SAMPLE_INTERVAL=10.0
+HISTORY_SECONDS=15*60
+MAX_SAMPLES=int(HISTORY_SECONDS/SAMPLE_INTERVAL)
+
+timestamps=deque(maxlen=MAX_SAMPLES)
+cpu_hist=deque(maxlen=MAX_SAMPLES)
+ram_hist=deque(maxlen=MAX_SAMPLES)
+gpu_hist=deque(maxlen=MAX_SAMPLES)
+net_in_hist=deque(maxlen=MAX_SAMPLES)
+net_out_hist=deque(maxlen=MAX_SAMPLES)
+disk_read_hist=deque(maxlen=MAX_SAMPLES)
+disk_write_hist=deque(maxlen=MAX_SAMPLES)
+disk_percent_hist=deque(maxlen=MAX_SAMPLES)
+
+_prev_net=psutil.net_io_counters()
+_prev_disk=psutil.disk_io_counters()
+_prev_time=time.time()
+
+def igpu_percent():
+ # fastest method (modern kernels)
+ for card in ("card0","card1","card2"):
+ p=f"/sys/class/drm/{card}/gt_busy_percent"
+ if os.path.exists(p):
+ try:
+ return float(open(p).read().strip())
+ except:
+ pass
+
+ # fallback: intel_gpu_top JSON snapshot
+ try:
+ out=subprocess.check_output(
+ ["intel_gpu_top","-J","-s","100","-o","-"],
+ stderr=subprocess.DEVNULL,
+ timeout=1
+ )
+ j=json.loads(out.splitlines()[0])
+ return float(j["engines"]["Render/3D/0"]["busy"])
+ except:
+ return 0.0
+
+def sample_once():
+ global _prev_net,_prev_disk,_prev_time
+ now=time.time()
+ iso=datetime.fromtimestamp(now).isoformat(timespec='seconds')
+
+ cpu=psutil.cpu_percent(interval=None)
+ ram=psutil.virtual_memory().percent
+ gpu=igpu_percent()
+
+ net=psutil.net_io_counters()
+ disk=psutil.disk_io_counters()
+
+ try:
+ disk_percent=psutil.disk_usage("/").percent
+ except:
+ disk_percent=0.0
+
+ elapsed=now-_prev_time if _prev_time else SAMPLE_INTERVAL
+ if elapsed<=0: elapsed=SAMPLE_INTERVAL
+
+ in_rate=int(((net.bytes_recv-_prev_net.bytes_recv)/elapsed))
+ out_rate=int(((net.bytes_sent-_prev_net.bytes_sent)/elapsed))
+
+ read_rate=(disk.read_bytes-_prev_disk.read_bytes)/elapsed
+ write_rate=(disk.write_bytes-_prev_disk.write_bytes)/elapsed
+
+ timestamps.append(iso)
+ cpu_hist.append(round(cpu,2))
+ ram_hist.append(round(ram,2))
+ gpu_hist.append(round(gpu,2))
+ net_in_hist.append(int(in_rate))
+ net_out_hist.append(int(out_rate))
+ disk_read_hist.append(int(read_rate))
+ disk_write_hist.append(int(write_rate))
+ disk_percent_hist.append(round(disk_percent,2))
+
+ _prev_net=net
+ _prev_disk=disk
+ _prev_time=now
+
+def write_json_atomic():
+ payload={
+ "timestamps":list(timestamps),
+ "cpu_percent":list(cpu_hist),
+ "ram_percent":list(ram_hist),
+ "igpu_percent":list(gpu_hist),
+ "net_in_Bps":list(net_in_hist),
+ "net_out_Bps":list(net_out_hist),
+ "disk_read_Bps":list(disk_read_hist),
+ "disk_write_Bps":list(disk_write_hist),
+ "disk_percent":list(disk_percent_hist),
+ "sample_interval":SAMPLE_INTERVAL,
+ "generated_at":datetime.utcnow().isoformat(timespec='seconds')+"Z"
+ }
+ with open(TMP_FILE,"w") as f: json.dump(payload,f)
+ os.replace(TMP_FILE,OUT_FILE)
+
+def main():
+ global _prev_net,_prev_disk,_prev_time
+ _prev_net=psutil.net_io_counters()
+ _prev_disk=psutil.disk_io_counters()
+ _prev_time=time.time()
+ time.sleep(0.2)
+
+ while True:
+ try:
+ sample_once()
+ write_json_atomic()
+ except Exception as e:
+ print("Sampler error:",e)
+ time.sleep(SAMPLE_INTERVAL)
+
+if __name__=="__main__":
+ main()
+EOL
+
+sudo systemctl enable --now system-monitor.service
+sudo systemctl restart system-monitor.service --no-pager
+
+
sudo reboot
\ No newline at end of file