wenhuach commited on
Commit
7dc248b
·
verified ·
1 Parent(s): 1b14e5b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -21,7 +21,7 @@ Please follow the license of the original model.
21
  ~~~python
22
  from transformers import AutoModelForCausalLM, AutoTokenizer
23
 
24
- quantized_model_dir = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound-gptq-inc"
25
 
26
  model = AutoModelForCausalLM.from_pretrained(
27
  quantized_model_dir,
@@ -211,7 +211,7 @@ prompts = [
211
  "Hello, my name is",
212
  ]
213
  sampling_params = SamplingParams(temperature=0.8, top_p=0.95) ##change this to match official usage
214
- model_name = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound-gptq-inc"
215
  llm = LLM(model=model_name, tensor_parallel_size=1)
216
 
217
  outputs = llm.generate(prompts, sampling_params)
 
21
  ~~~python
22
  from transformers import AutoModelForCausalLM, AutoTokenizer
23
 
24
+ quantized_model_dir = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound"
25
 
26
  model = AutoModelForCausalLM.from_pretrained(
27
  quantized_model_dir,
 
211
  "Hello, my name is",
212
  ]
213
  sampling_params = SamplingParams(temperature=0.8, top_p=0.95) ##change this to match official usage
214
+ model_name = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound"
215
  llm = LLM(model=model_name, tensor_parallel_size=1)
216
 
217
  outputs = llm.generate(prompts, sampling_params)