Update README.md
Browse files
README.md
CHANGED
|
@@ -30,13 +30,17 @@ model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min
|
|
| 30 |
# Decode to get output strings
|
| 31 |
|
| 32 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
|
|
|
| 33 |
print(decoded_output) # I am a boy
|
| 34 |
|
| 35 |
# What if we mask?
|
| 36 |
|
| 37 |
inp = tokenizer("I am [MASK] </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
|
|
|
| 38 |
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=tokenizer.pad_token_id, decoder_start_token_id=tokenizer(["<2en>"], add_special_tokens=False).input_ids[0][0])
|
|
|
|
| 39 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
|
|
|
| 40 |
print(decoded_output) # I am happy
|
| 41 |
|
| 42 |
|
|
|
|
| 30 |
# Decode to get output strings
|
| 31 |
|
| 32 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 33 |
+
|
| 34 |
print(decoded_output) # I am a boy
|
| 35 |
|
| 36 |
# What if we mask?
|
| 37 |
|
| 38 |
inp = tokenizer("I am [MASK] </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
| 39 |
+
|
| 40 |
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=tokenizer.pad_token_id, decoder_start_token_id=tokenizer(["<2en>"], add_special_tokens=False).input_ids[0][0])
|
| 41 |
+
|
| 42 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 43 |
+
|
| 44 |
print(decoded_output) # I am happy
|
| 45 |
|
| 46 |
|