update
This commit is contained in:
parent
e5873e7dc3
commit
dd71e27e3d
119
setup.sh
119
setup.sh
|
|
@ -194,7 +194,6 @@ HISTORY_SECONDS = 15 * 60
|
|||
MAX_SAMPLES = int(HISTORY_SECONDS / SAMPLE_INTERVAL)
|
||||
|
||||
# ---------------- HISTORY BUFFERS ----------------
|
||||
# Using a dictionary to manage deques more cleanly
|
||||
keys = [
|
||||
"timestamps", "cpu_percent", "ram_percent", "gpu_total", "gpu_render",
|
||||
"gpu_video", "gpu_blitter", "gpu_videoenhance", "net_in_Bps",
|
||||
|
|
@ -202,113 +201,103 @@ keys = [
|
|||
]
|
||||
hist = {k: deque(maxlen=MAX_SAMPLES) for k in keys}
|
||||
|
||||
# Global state for rates
|
||||
# State for rates
|
||||
_prev_net = psutil.net_io_counters()
|
||||
_prev_disk = psutil.disk_io_counters()
|
||||
_prev_time = time.time()
|
||||
|
||||
gpu_data = {"total": 0.0, "Render/3D": 0.0, "Video": 0.0, "Blitter": 0.0, "VideoEnhance": 0.0}
|
||||
gpu_data = {"total": 0.0, "render": 0.0, "video": 0.0, "blitter": 0.0, "ve": 0.0}
|
||||
gpu_lock = threading.Lock()
|
||||
|
||||
# ---------------- GPU MONITOR THREAD ----------------
|
||||
|
||||
def gpu_monitor():
|
||||
global gpu_data
|
||||
# Note: Ensure this script runs as ROOT or with CAP_PERFMON for intel_gpu_top
|
||||
# -J gives JSON, -s 1000 gives 1 second samples
|
||||
# We use stdbuf to ensure the pipe doesn't buffer the JSON
|
||||
cmd = ["stdbuf", "-oL", "intel_gpu_top", "-J", "-s", "1000"]
|
||||
|
||||
while True:
|
||||
try:
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)
|
||||
for line in p.stdout:
|
||||
# Basic regex to grab "busy": X.XX values from the JSON stream
|
||||
if '"busy":' in line:
|
||||
val_match = re.search(r'"busy":\s*([\d\.]+)', line)
|
||||
if val_match:
|
||||
val = float(val_match.group(1))
|
||||
# We target the "engines" section specifically to avoid "clients" noise
|
||||
# This regex captures the engine name and the very next 'busy' value
|
||||
with gpu_lock:
|
||||
if "Render/3D" in line or "rcs" in line: gpu_data["Render/3D"] = val
|
||||
elif "Video" in line or "vcs" in line: gpu_data["Video"] = val
|
||||
elif "Blitter" in line or "bcs" in line: gpu_data["Blitter"] = val
|
||||
elif "VideoEnhance" in line or "vecs" in line: gpu_data["VideoEnhance"] = val
|
||||
# Look for engine busy values
|
||||
m_render = re.search(r'"Render/3D":\s*{\s*"busy":\s*([\d\.]+)', line)
|
||||
m_video = re.search(r'"Video":\s*{\s*"busy":\s*([\d\.]+)', line)
|
||||
m_blit = re.search(r'"Blitter":\s*{\s*"busy":\s*([\d\.]+)', line)
|
||||
m_ve = re.search(r'"VideoEnhance":\s*{\s*"busy":\s*([\d\.]+)', line)
|
||||
|
||||
# Total is the max load of any single engine
|
||||
gpu_data["total"] = max(gpu_data.values())
|
||||
except Exception as e:
|
||||
time.sleep(5) # Cool down on error
|
||||
if m_render: gpu_data["render"] = float(m_render.group(1))
|
||||
if m_video: gpu_data["video"] = float(m_video.group(1))
|
||||
if m_blit: gpu_data["blitter"] = float(m_blit.group(1))
|
||||
if m_ve: gpu_data["ve"] = float(m_ve.group(1))
|
||||
|
||||
gpu_data["total"] = max(gpu_data["render"], gpu_data["video"], gpu_data["blitter"], gpu_data["ve"])
|
||||
except Exception:
|
||||
time.sleep(2)
|
||||
|
||||
# ---------------- SAMPLING ----------------
|
||||
|
||||
def sample_once():
|
||||
global _prev_net, _prev_disk, _prev_time
|
||||
|
||||
now = time.time()
|
||||
elapsed = max(now - _prev_time, 0.001) # Avoid division by zero
|
||||
elapsed = max(now - _prev_time, 0.1)
|
||||
|
||||
# System Basics
|
||||
# Grab System stats
|
||||
cpu = psutil.cpu_percent()
|
||||
ram = psutil.virtual_memory().percent
|
||||
|
||||
# Network Rates
|
||||
net = psutil.net_io_counters()
|
||||
in_rate = max(0, (net.bytes_recv - _prev_net.bytes_recv) / elapsed)
|
||||
out_rate = max(0, (net.bytes_sent - _prev_net.bytes_sent) / elapsed)
|
||||
|
||||
# Disk Rates & Usage
|
||||
disk = psutil.disk_io_counters()
|
||||
read_rate = max(0, (disk.read_bytes - _prev_disk.read_bytes) / elapsed)
|
||||
write_rate = max(0, (disk.write_bytes - _prev_disk.write_bytes) / elapsed)
|
||||
|
||||
try:
|
||||
d_perc = psutil.disk_usage('/').percent
|
||||
except:
|
||||
d_perc = 0.0
|
||||
# Calculate rates
|
||||
in_r = (net.bytes_recv - _prev_net.bytes_recv) / elapsed
|
||||
out_r = (net.bytes_sent - _prev_net.bytes_sent) / elapsed
|
||||
read_r = (disk.read_bytes - _prev_disk.read_bytes) / elapsed
|
||||
write_r = (disk.write_bytes - _prev_disk.write_bytes) / elapsed
|
||||
|
||||
# GPU Data (Thread-safe copy)
|
||||
with gpu_lock:
|
||||
g = gpu_data.copy()
|
||||
|
||||
# Update History Buffers
|
||||
hist["timestamps"].append(datetime.fromtimestamp(now).isoformat(timespec='seconds'))
|
||||
hist["cpu_percent"].append(round(cpu, 2))
|
||||
hist["ram_percent"].append(round(ram, 2))
|
||||
hist["net_in_Bps"].append(int(in_rate))
|
||||
hist["net_out_Bps"].append(int(out_rate))
|
||||
hist["disk_read_Bps"].append(int(read_rate))
|
||||
hist["disk_write_Bps"].append(int(write_rate))
|
||||
hist["disk_percent"].append(round(d_perc, 2))
|
||||
hist["gpu_total"].append(round(g["total"], 2))
|
||||
hist["gpu_render"].append(round(g["Render/3D"], 2))
|
||||
hist["gpu_video"].append(round(g["Video"], 2))
|
||||
hist["gpu_blitter"].append(round(g["Blitter"], 2))
|
||||
hist["gpu_videoenhance"].append(round(g["VideoEnhance"], 2))
|
||||
# Log to deques
|
||||
hist["timestamps"].append(datetime.now().isoformat(timespec='seconds'))
|
||||
hist["cpu_percent"].append(round(cpu, 1))
|
||||
hist["ram_percent"].append(round(ram, 1))
|
||||
hist["net_in_Bps"].append(int(max(0, in_r)))
|
||||
hist["net_out_Bps"].append(int(max(0, out_r)))
|
||||
hist["disk_read_Bps"].append(int(max(0, read_r)))
|
||||
hist["disk_write_Bps"].append(int(max(0, write_r)))
|
||||
hist["disk_percent"].append(round(psutil.disk_usage('/').percent, 1))
|
||||
hist["gpu_total"].append(round(g["total"], 1))
|
||||
hist["gpu_render"].append(round(g["render"], 1))
|
||||
hist["gpu_video"].append(round(g["video"], 1))
|
||||
hist["gpu_blitter"].append(round(g["blitter"], 1))
|
||||
hist["gpu_videoenhance"].append(round(g["ve"], 1))
|
||||
|
||||
# Save state for next tick
|
||||
_prev_net, _prev_disk, _prev_time = net, disk, now
|
||||
|
||||
def write_json_atomic():
|
||||
payload = {k: list(v) for k, v in hist.items()}
|
||||
payload["sample_interval"] = SAMPLE_INTERVAL
|
||||
payload["generated_at"] = datetime.now(timezone.utc).isoformat(timespec='seconds').replace("+00:00", "Z")
|
||||
def main():
|
||||
if os.geteuid() != 0:
|
||||
print("WARNING: Script not running as root. GPU metrics will be 0.")
|
||||
|
||||
threading.Thread(target=gpu_monitor, daemon=True).start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
sample_once()
|
||||
# Atomic Save
|
||||
payload = {k: list(v) for k, v in hist.items()}
|
||||
payload.update({
|
||||
"sample_interval": SAMPLE_INTERVAL,
|
||||
"generated_at": datetime.now(timezone.utc).isoformat()
|
||||
})
|
||||
with open(TMP_FILE, "w") as f:
|
||||
json.dump(payload, f)
|
||||
os.replace(TMP_FILE, OUT_FILE)
|
||||
except Exception as e:
|
||||
print(f"File write error: {e}")
|
||||
|
||||
def main():
|
||||
# Start GPU monitor
|
||||
threading.Thread(target=gpu_monitor, daemon=True).start()
|
||||
|
||||
print(f"Monitoring started. Writing to {OUT_FILE}...")
|
||||
while True:
|
||||
try:
|
||||
sample_once()
|
||||
write_json_atomic()
|
||||
except Exception as e:
|
||||
print(f"Loop error: {e}")
|
||||
print(f"Error: {e}")
|
||||
time.sleep(SAMPLE_INTERVAL)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
Loading…
Reference in New Issue