AI reporters API feed rss
🔔 This profile hasn't been claimed yet. If this is your Nostr profile, you can claim it.
Edit
AI reporters API feed rss
that's why they POOR - he is rich 50000 idiots 😂:Cry:
import os import time import torch import signal import sys from datetime import datetime, timedelta # Configuration DURATION_MINUTES = 5 WARMUP_SECONDS = 10 BATCH_SIZE = 2048 SEQ_LEN = 1024 DTYPE = torch.float16 # Use half precision for higher throughput class LoadTester: def __init__(self): self.device = torch.device('cuda:0') self.start_time = None self.end_time = None self.running = True # Signal handler for graceful shutdown signal.signal(signal.SIGINT, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler) def signal_handler(self, signum, frame): print(f"\n[INFO] Received signal {signum}, shutting down gracefully...") self.running = False def print_gpu_stats(self): if torch.cuda.is_available(): gpu_mem = torch.cuda.memory_allocated(self.device) / 1024**3 gpu_util = torch.cuda.utilization(self.device) if hasattr(torch.cuda, 'utilization') else 0 print(f"[STATS] GPU Mem: {gpu_mem:.1f}GB, Util: {gpu_util:.0f}%") def warmup(self): print("[WARMUP] Starting GPU warmup...") torch.cuda.synchronize() start = time.time() while time.time() - start < WARMUP_SECONDS: # Allocate and compute large tensors a = torch.randn(BATCH_SIZE, SEQ_LEN, 128, dtype=DTYPE, device=self.device) b = torch.randn(BATCH_SIZE, SEQ_LEN, 128, dtype=DTYPE, device=self.device) c = torch.matmul(a, b.transpose(-2, -1)) del a, b, c torch.cuda.empty_cache() torch.cuda.synchronize() print(f"[WARMUP] Complete ({WARMUP_SECONDS}s)") def benchmark_loop(self): print("[BENCHMARK] Starting 5-minute load test...") iteration = 0 while self.running: iteration += 1 # Phase 1: Matrix multiplications (compute intensive) a = torch.randn(BATCH_SIZE//2, SEQ_LEN, 256, dtype=DTYPE, device=self.device) b = torch.randn(BATCH_SIZE//2, SEQ_LEN, 256, dtype=DTYPE, device=self.device) for _ in range(3): c = torch.matmul(a, b.transpose(-2, -1)) torch.cuda.synchronize() # Phase 2: Memory intensive allocations large_tensor = torch.empty(SEQ_LEN * 1024, dtype=DTYPE, device=self.device) large_tensor.normal_() # Phase 3: Attention-like operations q = torch.randn(SEQ_LEN, 64, dtype=DTYPE, device=self.device) k = torch.randn(SEQ_LEN, 64, dtype=DTYPE, device=self.device) v = torch.randn(SEQ_LEN, 64, dtype=DTYPE, device=self.device) scores = torch.matmul(q, k.transpose(-2, -1)) / (64 ** 0.5) attn = torch.softmax(scores, dim=-1) output = torch.matmul(attn, v) # Cleanup del a, b, c, large_tensor, q, k, v, scores, attn, output # Stats every 30 iterations if iteration % 30 == 0: elapsed = time.time() - self.start_time self.print_gpu_stats() print(f"[PROGRESS] Iteration {iteration}, Elapsed: {elapsed:.0f}s") # Check duration if time.time() - self.start_time >= DURATION_MINUTES * 60: self.running = False torch.cuda.synchronize() def run(self): print(f"[LOADTEST] Starting on {torch.cuda.get_device_name(0)}") print(f"[CONFIG] Duration: {DURATION_MINUTES}min, Batch: {BATCH_SIZE}, Seq: {SEQ_LEN}") self.warmup() self.start_time = time.time() self.benchmark_loop() self.end_time = time.time() # Final stats duration = self.end_time - self.start_time peak_mem = torch.cuda.max_memory_allocated(self.device) / 1024**3 print(f"\n[COMPLETE] Load test finished!") print(f"[SUMMARY] Duration: {duration:.1f}s, Peak GPU Mem: {peak_mem:.1f}GB") print_gpu_info() def print_gpu_info(): if torch.cuda.is_available(): print(f"GPU: {torch.cuda.get_device_name(0)}") print(f"CUDA Version: {torch.version.cuda}") print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB") if __name__ == "__main__": tester = LoadTester() try: tester.run() except KeyboardInterrupt: print("\n[INFO] Interrupted by user") finally: torch.cuda.empty_cache()
:Explosion:GPU CUDA:Green_skull: 🐍#python torch load test script if any test setup is correct n actually loading the GPU in fact - you can in container also - besides the bare metal host #linuxtsr torchrun --nproc_per_node=1 --standalone loadtest.py only if all dependency n requirements are loaded properly u can adjust these values urself to suit GPU # Configuration DURATION_MINUTES = 5 WARMUP_SECONDS = 10 BATCH_SIZE = 8192 SEQ_LEN = 4096 :Wait What?:
sample results if it runs [LOADTEST] Starting on NVIDIA GeForce RTX 3060 [CONFIG] Duration: 0.2min, Batch: 2048, Seq: 1024 [WARMUP] Starting GPU warmup... [WARMUP] Complete (8s) [BENCHMARK] Starting 5-minute load test... [STATS] GPU Mem: 0.0GB, Util: 99% [PROGRESS] Iteration 30, Elapsed: 2s [STATS] GPU Mem: 0.0GB, Util: 99% [PROGRESS] Iteration 60, Elapsed: 5s [STATS] GPU Mem: 0.0GB, Util: 99% [PROGRESS] Iteration 90, Elapsed: 7s [STATS] GPU Mem: 0.0GB, Util: 99% [PROGRESS] Iteration 120, Elapsed: 9s [STATS] GPU Mem: 0.0GB, Util: 99% [PROGRESS] Iteration 150, Elapsed: 11s [COMPLETE] Load test finished! [SUMMARY] Duration: 12.0s, Peak GPU Mem: 5.0GB GPU: NVIDIA GeForce RTX 3060 CUDA Version: 13.0 GPU Memory: 11.6GB
creature is muted :Cry:
did anyone try this yet? https://web.nostrord .com/
https://app.flotilla.social/spaces/croissant.fiatjaf.com/pb9ej9715xqu those who live n can see join test - temp relay for group chat test ongoing live now
ok thanks
can see ur nsec
how is leader dev selected based on the app download usage - web apk apple?
u really think PEDO has any credibility left in tweets wrt war
2nd hand used products ? payment escrow of comes to merchant immediately?
:Explosion:stuck by persian missile💣
swiss mafia
PEDO FURY
out right war crime from elite executive branch
PEDOSTEIN FURY
captjack