EdmundYi's picture
Update model_wrappers/model_b.py (#5)
cbecf26 verified
raw
history blame
374 Bytes
from transformers import pipeline
model_b = pipeline("text-generation", model="distilgpt2")
def run_model_b(prompt: str) -> str:
output = model_b(prompt,
max_length=100,
do_sample=True,
temperature=0.8,
top_k=50,
top_p=0.95)
return output[0]["generated_text"]