ariG23498 HF Staff commited on
Commit
77f96d0
·
verified ·
1 Parent(s): 364d99c

Create memory-requirements-quantized-vs-dequantized.py

Browse files
memory-requirements-quantized-vs-dequantized.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Measure and compare VRAM with and without MXFP4 dequantize
2
+ import gc
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, Mxfp4Config
5
+
6
+ MODEL_ID = "openai/gpt-oss-20b"
7
+ DEVICE = "cuda:0"
8
+
9
+ def get_used_gb():
10
+ free, total = torch.cuda.mem_get_info()
11
+ return (total - free) / (1024**3), total / (1024**3)
12
+
13
+ def clear_memory():
14
+ del_vars = [k for k in list(globals().keys()) if k.startswith("_tmp_")]
15
+ for k in del_vars:
16
+ globals().pop(k, None)
17
+ gc.collect()
18
+ torch.cuda.empty_cache()
19
+ torch.cuda.synchronize()
20
+
21
+ assert torch.cuda.is_available(), "CUDA is not available."
22
+
23
+ # --- Dequantized (heavier) ---
24
+ clear_memory()
25
+ before_deq_used, total_gb = get_used_gb()
26
+ qconf = Mxfp4Config(dequantize=True)
27
+ model_deq = AutoModelForCausalLM.from_pretrained(
28
+ MODEL_ID,
29
+ torch_dtype="auto",
30
+ device_map=DEVICE,
31
+ quantization_config=qconf,
32
+ ).eval()
33
+ after_deq_used, _ = get_used_gb()
34
+
35
+ # --- Quantized (lighter) ---
36
+ del model_deq
37
+ clear_memory()
38
+ before_q_used, _ = get_used_gb()
39
+ model_q = AutoModelForCausalLM.from_pretrained(
40
+ MODEL_ID,
41
+ torch_dtype="auto",
42
+ device_map=DEVICE,
43
+ ).eval()
44
+ after_q_used, _ = get_used_gb()
45
+
46
+ print(f"[dequantized] used before: {before_deq_used:.2f} GB, after: {after_deq_used:.2f} GB / total {total_gb:.2f} GB")
47
+ print(f"[quantized ] used before: {before_q_used:.2f} GB, after: {after_q_used:.2f} GB / total {total_gb:.2f} GB")
48
+
49
+ # Make these available for plotting
50
+ mx_results = {
51
+ "total_gb": total_gb,
52
+ "after_dequantized_gb": after_deq_used,
53
+ "after_quantized_gb": after_q_used,
54
+ }
55
+
56
+ # Outputs:
57
+ # [dequantized] used before: 0.41 GB, after: 43.18 GB / total 79.25 GB
58
+ # [quantized ] used before: 0.49 GB, after: 13.37 GB / total 79.25 GB