ariG23498 HF Staff commited on
Commit
165be40
·
verified ·
1 Parent(s): 90fd4a4

Create cb-bench.py

Browse files
Files changed (1) hide show
  1. cb-bench.py +90 -0
cb-bench.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os; os.environ["CUDA_VISIBLE_DEVICES"]="3"
2
+ import argparse
3
+ import time
4
+
5
+ import datasets
6
+ import torch
7
+
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer
9
+ from transformers.generation import GenerationConfig
10
+
11
+
12
+ MODEL_ID = "Qwen/Qwen3-4B-Instruct-2507"
13
+
14
+
15
+ if __name__ == "__main__":
16
+ # Parse args
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument("--num-blocks", "-n", type=int, default=None)
19
+ parser.add_argument("--max-batch-tokens", "-b", type=int, default=None)
20
+ parser.add_argument(
21
+ "--attn", type=str, default="paged_attention|kernels-community/flash-attn", help="Attention implementation"
22
+ )
23
+ parser.add_argument("--samples", type=int, default=500)
24
+ args = parser.parse_args()
25
+
26
+ # Prepare model
27
+ model = AutoModelForCausalLM.from_pretrained(
28
+ MODEL_ID,
29
+ attn_implementation=args.attn,
30
+ dtype=torch.bfloat16,
31
+ )
32
+ model = model.cuda().eval()
33
+
34
+ # Prepare tokenizer and dataset
35
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, padding_side="left")
36
+ dataset = datasets.load_dataset("openai/gsm8k", "socratic", split="test")
37
+ dataset = dataset.select(range(args.samples))
38
+ tokenized_datasets = dataset.map(lambda x: tokenizer(x["question"]), batched=True)
39
+ simple_batch_inputs = [item["input_ids"] for item in tokenized_datasets]
40
+
41
+ # Prepare generation config
42
+ generation_config = GenerationConfig(
43
+ max_new_tokens=512,
44
+ use_cuda_graph=False, # Not supported for simple version
45
+ eos_token_id=tokenizer.eos_token_id,
46
+ pad_token_id=tokenizer.pad_token_id,
47
+ do_sample=False,
48
+ num_blocks=args.num_blocks,
49
+ max_batch_tokens=args.max_batch_tokens,
50
+ )
51
+
52
+ # Warmup iterations
53
+ _ = model.generate_batch(
54
+ inputs=simple_batch_inputs[: min(5, args.samples)],
55
+ generation_config=generation_config,
56
+ slice_inputs=True,
57
+ )
58
+
59
+ # Actual batch generation
60
+ print("--- Running CB Generation Example ---")
61
+ start_time = time.time()
62
+ batch_outputs = model.generate_batch(
63
+ inputs=simple_batch_inputs,
64
+ generation_config=generation_config,
65
+ slice_inputs=True,
66
+ )
67
+ end_time = time.time()
68
+ print("Done with batch generation.")
69
+
70
+ # Decode outputs
71
+ token_count = 0
72
+ for i, request in enumerate(batch_outputs):
73
+ input_text = tokenizer.decode(batch_outputs[request].prompt_ids, skip_special_tokens=True)
74
+ # Try to decode the output
75
+ try:
76
+ output_text = tokenizer.decode(batch_outputs[request].generated_tokens, skip_special_tokens=True)
77
+ token_count += len(batch_outputs[request].generated_tokens[1:])
78
+ except Exception as e:
79
+ print(f"Decoding failed for request {request}: {e}")
80
+ continue
81
+
82
+ # Compute stats and maybe print them
83
+ gen_time = end_time - start_time
84
+ tok_per_sec = token_count / gen_time
85
+ print("-" * 20)
86
+ print("--- Finished CB Generation Example ---\n")
87
+ print(f"CB generation took: {gen_time:.2f} seconds for {token_count} tokens. {tok_per_sec:.2f}tok/s")
88
+
89
+ # python load.py -n 512 -b 32 --samples 100
90
+ # CB generation took: 101.44 seconds for 49197 tokens. 484.97tok/s