xkakashi commited on
Commit
4644a82
·
verified ·
1 Parent(s): 52d91fd

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -1,15 +1,23 @@
1
 
2
  import gradio as gr
3
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
 
5
- model_name = "AI4FinanceFoundation/FinGPT-Foundation"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
8
 
9
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
- def answer(query):
12
- response = pipe(query, max_new_tokens=300, do_sample=True)[0]["generated_text"]
13
- return response
14
 
15
- gr.Interface(fn=answer, inputs="text", outputs="text", title="FinGPT QA").launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("FinGPT/fingpt-mt_llama2-7b_lora", trust_remote_code=True)
6
+ model = AutoModelForCausalLM.from_pretrained("FinGPT/fingpt-mt_llama2-7b_lora", trust_remote_code=True, device_map="auto")
 
7
 
8
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ chat_history = []
 
 
11
 
12
+ def chat(user_input, history):
13
+ prompt = (history + "\nUser: " + user_input) if history else ("User: " + user_input)
14
+ output = pipe(prompt, max_new_tokens=300, do_sample=True)[0]["generated_text"]
15
+ history = prompt + "\nAssistant: " + output
16
+ return output, history
17
+
18
+ with gr.Blocks() as demo:
19
+ chatbot = gr.Chatbot()
20
+ txt = gr.Textbox(placeholder="Ask a finance question...", show_label=False)
21
+ state = gr.State("")
22
+ txt.submit(lambda msg, hist: (chatbot + [(msg, chat(msg, hist)[0])], chat(msg, hist)[1]), [txt, state], [chatbot, state])
23
+ demo.launch()