xkakashi commited on
Commit
7827a37
Β·
verified Β·
1 Parent(s): 26a5f67

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -1,24 +1,22 @@
1
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- base_model = "meta‑llama/Llama‑2‑7b‑chat‑hf" # base tokenizer
5
- adapter = "FinGPT/fingpt‑mt_llama2‑7b_lora"
6
 
7
- tokenizer = AutoTokenizer.from_pretrained(base_model)
8
  model = AutoModelForCausalLM.from_pretrained(adapter, device_map="auto")
9
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
- chat_history = []
12
-
13
  def chat(user_input, history):
14
  prompt = (history + "\nUser: " + user_input) if history else ("User: " + user_input)
15
- output = pipe(prompt, max_new_tokens=300, do_sample=True)[0]["generated_text"]
16
- history = prompt + "\nAssistant: " + output
17
- return output, history
18
 
19
  with gr.Blocks() as demo:
20
  chatbot = gr.Chatbot()
21
- txt = gr.Textbox(placeholder="Ask a finance question...", show_label=False)
22
  state = gr.State("")
23
- txt.submit(lambda msg, hist: (chatbot + [(msg, chat(msg, hist)[0])], chat(msg, hist)[1]), [txt, state], [chatbot, state])
24
  demo.launch()
 
1
 
2
+ import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
+ base = "meta-llama/Llama-2-7b-chat-hf"
6
+ adapter = "FinGPT/fingpt-mt_llama2-7b_lora"
7
 
8
+ tokenizer = AutoTokenizer.from_pretrained(base)
9
  model = AutoModelForCausalLM.from_pretrained(adapter, device_map="auto")
10
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
 
 
12
  def chat(user_input, history):
13
  prompt = (history + "\nUser: " + user_input) if history else ("User: " + user_input)
14
+ output = pipe(prompt, max_new_tokens=256, do_sample=True)[0]["generated_text"]
15
+ return output, prompt + "\nAssistant: " + output
 
16
 
17
  with gr.Blocks() as demo:
18
  chatbot = gr.Chatbot()
19
+ txt = gr.Textbox(placeholder="Ask a finance question...")
20
  state = gr.State("")
21
+ txt.submit(lambda m, h: (chatbot + [(m, chat(m, h)[0])], chat(m, h)[1]), [txt, state], [chatbot, state])
22
  demo.launch()