Update README.md
Browse files
README.md
CHANGED
@@ -21,7 +21,7 @@ Please follow the license of the original model.
|
|
21 |
~~~python
|
22 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
23 |
|
24 |
-
quantized_model_dir = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound
|
25 |
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
quantized_model_dir,
|
@@ -211,7 +211,7 @@ prompts = [
|
|
211 |
"Hello, my name is",
|
212 |
]
|
213 |
sampling_params = SamplingParams(temperature=0.8, top_p=0.95) ##change this to match official usage
|
214 |
-
model_name = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound
|
215 |
llm = LLM(model=model_name, tensor_parallel_size=1)
|
216 |
|
217 |
outputs = llm.generate(prompts, sampling_params)
|
|
|
21 |
~~~python
|
22 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
23 |
|
24 |
+
quantized_model_dir = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound"
|
25 |
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
quantized_model_dir,
|
|
|
211 |
"Hello, my name is",
|
212 |
]
|
213 |
sampling_params = SamplingParams(temperature=0.8, top_p=0.95) ##change this to match official usage
|
214 |
+
model_name = "Intel/DeepSeek-R1-0528-Qwen3-8B-int4-AutoRound"
|
215 |
llm = LLM(model=model_name, tensor_parallel_size=1)
|
216 |
|
217 |
outputs = llm.generate(prompts, sampling_params)
|