import gradio as gr | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
model_name = "AI4FinanceFoundation/FinGPT-Foundation" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def answer(query): | |
response = pipe(query, max_new_tokens=300, do_sample=True)[0]["generated_text"] | |
return response | |
gr.Interface(fn=answer, inputs="text", outputs="text", title="FinGPT QA").launch() | |