import os from transformers import pipeline import gradio as gr # Set custom cache (optional) os.environ["HF_HOME"] = "/tmp/huggingface" # Load text generation pipeline pipe = pipeline("text-generation", model="Abu180/ie-gpt2-textgen") def generate_answer(question): prompt = f"Question: {question}\nAnswer:" output = pipe(prompt, max_new_tokens=100)[0]['generated_text'] return output.replace(prompt, "") # remove prompt from output # Gradio interface iface = gr.Interface( fn=generate_answer, inputs=gr.Textbox(placeholder="Ask your Industrial Engineering question..."), outputs="text", title="IE Answer Generator", description="Ask any Industrial Engineering question. This model generates answers fine-tuned on IE Q&A." ) iface.launch()