Spaces:
Running
Running
File size: 1,952 Bytes
b0671c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from transformers import AutoModelForCausalLM, GPT2TokenizerFast
import torch
# Load model and tokenizer
model_path = "../models/recipe-gpt"
tokenizer = GPT2TokenizerFast.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
model.eval() # set to inference mode
# Set up device agnostic code
if torch.cuda.is_available():
device = 'cuda'
elif torch.mps.is_available():
device = 'mps'
else:
device = 'cpu'
# print("Using device:", device)
model.to(device)
# Define generation function
def generate_recipe(ingredients, max_length=300, temperature=0.8, top_k=50, top_p=0.95):
prompt = "<start>\nIngredients:\n"
for ing in ingredients:
prompt += f"- {ing}\n"
prompt += "Directions:\n"
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs.input_ids.to(device)
attention_mask = inputs.attention_mask.to(device)
with torch.no_grad():
output_ids = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
temperature=0.7,
top_k=40,
top_p=0.9,
max_length=180,
eos_token_id=tokenizer.convert_tokens_to_ids("<end>")
)
generated = tokenizer.decode(output_ids[0], skip_special_tokens=False)
# Extract directions block
if "Directions:" in generated:
generated = generated.split("Directions:")[1]
if "<end>" in generated:
generated = generated.split("<end>")[0]
return generated.strip()
def get_ingredients_from_user():
user_input = input("Enter ingredients (comma-separated): ")
ingredients = [i.strip() for i in user_input.split(",") if i.strip()]
return ingredients
# Try generation
ingredients = get_ingredients_from_user()
recipe = generate_recipe(ingredients)
print("\nGenerated Recipe Directions:\n")
print(recipe)
|