This commit is contained in:
devdatt 2026-02-18 04:40:42 +05:30
parent 8f072d967e
commit e5873e7dc3
1 changed files with 79 additions and 68 deletions

147
setup.sh
View File

@ -181,6 +181,7 @@ import json
import os import os
import subprocess import subprocess
import threading import threading
import re
import psutil import psutil
from collections import deque from collections import deque
from datetime import datetime, timezone from datetime import datetime, timezone
@ -193,116 +194,126 @@ HISTORY_SECONDS = 15 * 60
MAX_SAMPLES = int(HISTORY_SECONDS / SAMPLE_INTERVAL) MAX_SAMPLES = int(HISTORY_SECONDS / SAMPLE_INTERVAL)
# ---------------- HISTORY BUFFERS ---------------- # ---------------- HISTORY BUFFERS ----------------
# Initializing deques with maxlen handles the sliding window automatically # Using a dictionary to manage deques more cleanly
buffers = { keys = [
"timestamps": deque(maxlen=MAX_SAMPLES), "timestamps", "cpu_percent", "ram_percent", "gpu_total", "gpu_render",
"cpu_percent": deque(maxlen=MAX_SAMPLES), "gpu_video", "gpu_blitter", "gpu_videoenhance", "net_in_Bps",
"ram_percent": deque(maxlen=MAX_SAMPLES), "net_out_Bps", "disk_read_Bps", "disk_write_Bps", "disk_percent"
"gpu_total": deque(maxlen=MAX_SAMPLES), ]
"gpu_render": deque(maxlen=MAX_SAMPLES), hist = {k: deque(maxlen=MAX_SAMPLES) for k in keys}
"gpu_video": deque(maxlen=MAX_SAMPLES),
"gpu_blitter": deque(maxlen=MAX_SAMPLES),
"gpu_videoenhance": deque(maxlen=MAX_SAMPLES),
"net_in_Bps": deque(maxlen=MAX_SAMPLES),
"net_out_Bps": deque(maxlen=MAX_SAMPLES),
"disk_read_Bps": deque(maxlen=MAX_SAMPLES),
"disk_write_Bps": deque(maxlen=MAX_SAMPLES),
"disk_percent": deque(maxlen=MAX_SAMPLES),
}
gpu_data = {"total": 0.0, "render": 0.0, "video": 0.0, "blitter": 0.0, "ve": 0.0} # Global state for rates
_prev_net = psutil.net_io_counters()
_prev_disk = psutil.disk_io_counters()
_prev_time = time.time()
gpu_data = {"total": 0.0, "Render/3D": 0.0, "Video": 0.0, "Blitter": 0.0, "VideoEnhance": 0.0}
gpu_lock = threading.Lock() gpu_lock = threading.Lock()
# ---------------- GPU MONITOR THREAD ---------------- # ---------------- GPU MONITOR THREAD ----------------
def gpu_monitor(): def gpu_monitor():
global gpu_data global gpu_data
# -J provides JSON, -s 1000 provides 1s updates # Note: Ensure this script runs as ROOT or with CAP_PERFMON for intel_gpu_top
cmd = ["stdbuf", "-oL", "/usr/bin/intel_gpu_top", "-J", "-s", "1000"] cmd = ["stdbuf", "-oL", "intel_gpu_top", "-J", "-s", "1000"]
while True: while True:
try: try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)
for line in p.stdout: for line in p.stdout:
# intel_gpu_top -J outputs one JSON object per sample # Basic regex to grab "busy": X.XX values from the JSON stream
# We look for lines containing the engine data if '"busy":' in line:
try: val_match = re.search(r'"busy":\s*([\d\.]+)', line)
# Simple check to see if we have a full JSON-like line for engines if val_match:
if '"engines":' in line: val = float(val_match.group(1))
# Extract percentages (this is a simplified logic, with gpu_lock:
# usually intel_gpu_top output needs a bit of buffering to be valid JSON) if "Render/3D" in line or "rcs" in line: gpu_data["Render/3D"] = val
# If the JSON is complex, consider a proper JSON buffer. elif "Video" in line or "vcs" in line: gpu_data["Video"] = val
pass elif "Blitter" in line or "bcs" in line: gpu_data["Blitter"] = val
elif "VideoEnhance" in line or "vecs" in line: gpu_data["VideoEnhance"] = val
# Alternative: Regex is actually faster for streaming if structure is consistent
# Using your existing logic but making it slightly more robust: # Total is the max load of any single engine
with gpu_lock: gpu_data["total"] = max(gpu_data.values())
if "render" in line.lower() or "rcs" in line: except Exception as e:
gpu_data["render"] = float(line.split(":")[1].split(",")[0]) time.sleep(5) # Cool down on error
elif "video" in line.lower() or "vcs" in line:
gpu_data["video"] = float(line.split(":")[1].split(",")[0])
# Add others as needed...
gpu_data["total"] = max(gpu_data.values())
except:
continue
except Exception:
time.sleep(5)
# ---------------- SAMPLING ---------------- # ---------------- SAMPLING ----------------
_prev_net = psutil.net_io_counters()
_prev_disk = psutil.disk_io_counters()
_prev_time = time.time()
def sample_once(): def sample_once():
global _prev_net, _prev_disk, _prev_time global _prev_net, _prev_disk, _prev_time
now = time.time() now = time.time()
elapsed = now - _prev_time elapsed = max(now - _prev_time, 0.001) # Avoid division by zero
# System Basics
cpu = psutil.cpu_percent()
ram = psutil.virtual_memory().percent
# Network Rates
net = psutil.net_io_counters() net = psutil.net_io_counters()
disk = psutil.disk_io_counters() in_rate = max(0, (net.bytes_recv - _prev_net.bytes_recv) / elapsed)
out_rate = max(0, (net.bytes_sent - _prev_net.bytes_sent) / elapsed)
# Calculate Rates # Disk Rates & Usage
in_rate = (net.bytes_recv - _prev_net.bytes_recv) / elapsed disk = psutil.disk_io_counters()
out_rate = (net.bytes_sent - _prev_net.bytes_sent) / elapsed read_rate = max(0, (disk.read_bytes - _prev_disk.read_bytes) / elapsed)
read_rate = (disk.read_bytes - _prev_disk.read_bytes) / elapsed write_rate = max(0, (disk.write_bytes - _prev_disk.write_bytes) / elapsed)
write_rate = (disk.write_bytes - _prev_disk.write_bytes) / elapsed
try:
d_perc = psutil.disk_usage('/').percent
except:
d_perc = 0.0
# GPU Data (Thread-safe copy)
with gpu_lock: with gpu_lock:
g = gpu_data.copy() g = gpu_data.copy()
# Append to buffers # Update History Buffers
buffers["timestamps"].append(datetime.fromtimestamp(now).isoformat(timespec='seconds')) hist["timestamps"].append(datetime.fromtimestamp(now).isoformat(timespec='seconds'))
buffers["cpu_percent"].append(round(psutil.cpu_percent(), 2)) hist["cpu_percent"].append(round(cpu, 2))
buffers["ram_percent"].append(round(psutil.virtual_memory().percent, 2)) hist["ram_percent"].append(round(ram, 2))
buffers["gpu_total"].append(round(g["total"], 2)) hist["net_in_Bps"].append(int(in_rate))
buffers["net_in_Bps"].append(int(max(0, in_rate))) hist["net_out_Bps"].append(int(out_rate))
buffers["net_out_Bps"].append(int(max(0, out_rate))) hist["disk_read_Bps"].append(int(read_rate))
# ... append the rest similarly hist["disk_write_Bps"].append(int(write_rate))
hist["disk_percent"].append(round(d_perc, 2))
hist["gpu_total"].append(round(g["total"], 2))
hist["gpu_render"].append(round(g["Render/3D"], 2))
hist["gpu_video"].append(round(g["Video"], 2))
hist["gpu_blitter"].append(round(g["Blitter"], 2))
hist["gpu_videoenhance"].append(round(g["VideoEnhance"], 2))
# Save state for next tick
_prev_net, _prev_disk, _prev_time = net, disk, now _prev_net, _prev_disk, _prev_time = net, disk, now
def write_json_atomic(): def write_json_atomic():
payload = {key: list(val) for key, val in buffers.items()} payload = {k: list(v) for k, v in hist.items()}
payload["sample_interval"] = SAMPLE_INTERVAL payload["sample_interval"] = SAMPLE_INTERVAL
payload["generated_at"] = datetime.now(timezone.utc).isoformat(timespec='seconds') payload["generated_at"] = datetime.now(timezone.utc).isoformat(timespec='seconds').replace("+00:00", "Z")
with open(TMP_FILE, "w") as f: try:
json.dump(payload, f) with open(TMP_FILE, "w") as f:
os.replace(TMP_FILE, OUT_FILE) json.dump(payload, f)
os.replace(TMP_FILE, OUT_FILE)
except Exception as e:
print(f"File write error: {e}")
def main(): def main():
# Start GPU monitor
threading.Thread(target=gpu_monitor, daemon=True).start() threading.Thread(target=gpu_monitor, daemon=True).start()
print(f"Monitoring started. Writing to {OUT_FILE}...")
while True: while True:
try: try:
sample_once() sample_once()
write_json_atomic() write_json_atomic()
except Exception as e: except Exception as e:
print(f"Error: {e}") print(f"Loop error: {e}")
time.sleep(SAMPLE_INTERVAL) time.sleep(SAMPLE_INTERVAL)
if __name__ == "__main__": if __name__ == "__main__":
main() main()
EOL EOL
sudo systemctl enable --now system-monitor.service sudo systemctl enable --now system-monitor.service