import multiprocessing as mpimport osimport signalimport sysimport time# Read config from env (optional tuning)RAMP_SECONDS=int(os.getenv("RAMP_SECONDS","60"))# How long to keep adding workersMAX_WORKERS=int(os.getenv("MAX_WORKERS",str(RAMP_SECONDS)))# Cap workers if neededdefburn_cpu(worker_id:int)->None:""" Busy loop that just eats CPU. Runs indefinitely until the container/pod is killed."""print(f"[worker-{worker_id}] starting CPU burn",flush=True) x =0whileTrue:# Simple nonsense math to keep CPU busy x =(x * x +1)%1_000_000_007defhandle_signal(signum,frame):print(f"[main] Received signal {signum}, exiting...",flush=True) sys.exit(0)defmain():# Handle graceful termination from Kubernetes signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) workers =[]print(f"[main] Starting CPU ramp: up to {MAX_WORKERS} workers over {RAMP_SECONDS} seconds",flush=True)for i inrange(RAMP_SECONDS):iflen(workers)>=MAX_WORKERS:print("[main] Reached MAX_WORKERS cap, no more workers will be started",flush=True)break worker_id =len(workers)+1 p = mp.Process(target=burn_cpu,args=(worker_id,),daemon=True) p.start() workers.append(p)print(f"[main] Started worker {worker_id}, total workers: {len(workers)}",flush=True)# Sleep 1 second between each worker → gradual ramp time.sleep(1)print("[main] Ramp finished, workers will continue burning CPU until pod is killed",flush=True)# Keep main alive so the processes stay alivefor p in workers: p.join()if__name__=="__main__":main()
---
Dockerfile
# Simple, small-ish Python base imageFROM python:3.12-slimWORKDIR /app# Copy the CPU hog scriptCOPY cpu_hog.py .# No extra deps, pure stdlibENTRYPOINT ["python", "cpu_hog.py"]