Commit
·
3522113
1
Parent(s):
b70b06b
Update README.md
Browse files
README.md
CHANGED
|
@@ -12,27 +12,21 @@ Bloom (2.5 B) Scientific Model fine-tuned on Zen knowledge
|
|
| 12 |
#####
|
| 13 |
|
| 14 |
|
|
|
|
| 15 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 16 |
|
| 17 |
-
|
| 18 |
-
tokenizer = AutoTokenizer.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
|
| 19 |
model = AutoModelForCausalLM.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
|
| 20 |
|
| 21 |
-
model
|
| 22 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 23 |
|
| 24 |
-
|
| 25 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 26 |
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
"""Yesterday"""
|
| 31 |
-
]
|
| 32 |
-
|
| 33 |
-
out = generator(
|
| 34 |
-
inp.cuda(),
|
| 35 |
-
do_sample=True,
|
| 36 |
|
| 37 |
temperature=.6,
|
| 38 |
typical_p=.7,
|
|
@@ -43,5 +37,5 @@ out = generator(
|
|
| 43 |
max_time=60, # seconds
|
| 44 |
)
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
|
|
|
| 12 |
#####
|
| 13 |
|
| 14 |
|
| 15 |
+
```python
|
| 16 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 17 |
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
|
|
|
|
| 19 |
model = AutoModelForCausalLM.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
|
| 20 |
|
| 21 |
+
model
|
| 22 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 23 |
|
|
|
|
| 24 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 25 |
|
| 26 |
+
inp = [ """Today""", """Yesterday""" ]
|
| 27 |
|
| 28 |
+
out = generator(
|
| 29 |
+
inp, do_sample=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
temperature=.6,
|
| 32 |
typical_p=.7,
|
|
|
|
| 37 |
max_time=60, # seconds
|
| 38 |
)
|
| 39 |
|
| 40 |
+
for o in out: print(o[0]['generated_text'])
|
| 41 |
+
```
|