Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -1,24 +1,22 @@
|
|
1 |
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
-
|
5 |
-
adapter = "FinGPT/fingpt
|
6 |
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
8 |
model = AutoModelForCausalLM.from_pretrained(adapter, device_map="auto")
|
9 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
10 |
|
11 |
-
chat_history = []
|
12 |
-
|
13 |
def chat(user_input, history):
|
14 |
prompt = (history + "\nUser: " + user_input) if history else ("User: " + user_input)
|
15 |
-
output = pipe(prompt, max_new_tokens=
|
16 |
-
|
17 |
-
return output, history
|
18 |
|
19 |
with gr.Blocks() as demo:
|
20 |
chatbot = gr.Chatbot()
|
21 |
-
txt = gr.Textbox(placeholder="Ask a finance question..."
|
22 |
state = gr.State("")
|
23 |
-
txt.submit(lambda
|
24 |
demo.launch()
|
|
|
1 |
|
2 |
+
import gradio as gr
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
|
5 |
+
base = "meta-llama/Llama-2-7b-chat-hf"
|
6 |
+
adapter = "FinGPT/fingpt-mt_llama2-7b_lora"
|
7 |
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(base)
|
9 |
model = AutoModelForCausalLM.from_pretrained(adapter, device_map="auto")
|
10 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
11 |
|
|
|
|
|
12 |
def chat(user_input, history):
|
13 |
prompt = (history + "\nUser: " + user_input) if history else ("User: " + user_input)
|
14 |
+
output = pipe(prompt, max_new_tokens=256, do_sample=True)[0]["generated_text"]
|
15 |
+
return output, prompt + "\nAssistant: " + output
|
|
|
16 |
|
17 |
with gr.Blocks() as demo:
|
18 |
chatbot = gr.Chatbot()
|
19 |
+
txt = gr.Textbox(placeholder="Ask a finance question...")
|
20 |
state = gr.State("")
|
21 |
+
txt.submit(lambda m, h: (chatbot + [(m, chat(m, h)[0])], chat(m, h)[1]), [txt, state], [chatbot, state])
|
22 |
demo.launch()
|