gustavokuklinski commited on
Commit
5569e88
·
verified ·
1 Parent(s): 03d93bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -32
app.py CHANGED
@@ -1,6 +1,12 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
 
 
 
4
 
5
  def respond(
6
  message,
@@ -9,40 +15,32 @@ def respond(
9
  max_tokens,
10
  temperature,
11
  top_p,
12
- hf_token: gr.OAuthToken,
13
  ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="gustavokuklinski/aeon-360m")
18
-
19
  messages = [{"role": "system", "content": system_message}]
20
-
21
  messages.extend(history)
22
-
23
  messages.append({"role": "user", "content": message})
24
 
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
  messages,
29
- max_tokens=max_tokens,
30
- stream=True,
 
 
 
 
 
 
31
  temperature=temperature,
32
  top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  chatbot = gr.ChatInterface(
47
  respond,
48
  type="messages",
@@ -60,11 +58,5 @@ chatbot = gr.ChatInterface(
60
  ],
61
  )
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
  if __name__ == "__main__":
70
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
5
+ # Load the model and tokenizer outside the function to avoid
6
+ # reloading it on every call.
7
+ model_name = "gustavokuklinski/aeon-360m"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
  def respond(
12
  message,
 
15
  max_tokens,
16
  temperature,
17
  top_p,
 
18
  ):
 
 
 
 
 
19
  messages = [{"role": "system", "content": system_message}]
 
20
  messages.extend(history)
 
21
  messages.append({"role": "user", "content": message})
22
 
23
+ # Apply the chat template to format the messages
24
+ # The jinja template from the model card can be used
25
+ input_ids = tokenizer.apply_chat_template(
26
  messages,
27
+ add_generation_prompt=True,
28
+ return_tensors="pt"
29
+ )
30
+
31
+ # Generate the response
32
+ outputs = model.generate(
33
+ input_ids,
34
+ max_new_tokens=max_tokens,
35
  temperature=temperature,
36
  top_p=top_p,
37
+ )
 
 
 
 
38
 
39
+ # Decode the generated text and yield the response
40
+ response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
41
+ yield response
42
 
43
+ # The rest of your Gradio ChatInterface code remains the same
 
 
 
44
  chatbot = gr.ChatInterface(
45
  respond,
46
  type="messages",
 
58
  ],
59
  )
60
 
 
 
 
 
 
 
61
  if __name__ == "__main__":
62
+ chatbot.launch()