Update README.md
Browse files
README.md
CHANGED
|
@@ -63,11 +63,9 @@ You can use this model just as any other HuggingFace models:
|
|
| 63 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 64 |
model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-0.4B-world', trust_remote_code=True)
|
| 65 |
tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-0.4B-world', trust_remote_code=True)
|
| 66 |
-
model = model.cuda()
|
| 67 |
prompt = "What is a large language model?"
|
| 68 |
messages = [
|
| 69 |
-
{"role": "user", "content": "Who are you?"},
|
| 70 |
-
{"role": "assistant", "content": "I am a GPT-3 based model."},
|
| 71 |
{"role": "user", "content": prompt}
|
| 72 |
]
|
| 73 |
text = tokenizer.apply_chat_template(
|
|
@@ -80,7 +78,11 @@ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
|
| 80 |
|
| 81 |
generated_ids = model.generate(
|
| 82 |
**model_inputs,
|
| 83 |
-
max_new_tokens=
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
)
|
| 85 |
generated_ids = [
|
| 86 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
|
|
|
| 63 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 64 |
model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-0.4B-world', trust_remote_code=True)
|
| 65 |
tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-0.4B-world', trust_remote_code=True)
|
| 66 |
+
model = model.cuda() # Supported on Nvidia/AMD/Intel eg. model.xpu()
|
| 67 |
prompt = "What is a large language model?"
|
| 68 |
messages = [
|
|
|
|
|
|
|
| 69 |
{"role": "user", "content": prompt}
|
| 70 |
]
|
| 71 |
text = tokenizer.apply_chat_template(
|
|
|
|
| 78 |
|
| 79 |
generated_ids = model.generate(
|
| 80 |
**model_inputs,
|
| 81 |
+
max_new_tokens=4096,
|
| 82 |
+
do_sample=True,
|
| 83 |
+
temperature=1.0,
|
| 84 |
+
top_p=0.3,
|
| 85 |
+
repetition_penalty=1.2
|
| 86 |
)
|
| 87 |
generated_ids = [
|
| 88 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|