yuvrajpant56 commited on
Commit
76a64ca
·
verified ·
1 Parent(s): 2ce8469

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -14
app.py CHANGED
@@ -1,21 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
 
5
  model_id = "yuvrajpant56/Mistral_Posttrain_SFT"
6
-
7
- # Load model & tokenizer
8
- tokenizer = AutoTokenizer.from_pretrained(model_id)
9
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
10
 
11
  def generate_text(prompt):
12
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
13
- outputs = model.generate(**inputs, max_new_tokens=100)
14
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
15
 
16
- gr.Interface(fn=generate_text,
17
- inputs="text",
18
- outputs="text",
19
- title="Mistral SFT Text Generator",
20
- description="Type a prompt and let the fine-tuned Mistral model generate the rest."
 
21
  ).launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
 
3
 
4
  model_id = "yuvrajpant56/Mistral_Posttrain_SFT"
5
+ client = InferenceClient(model_id)
 
 
 
6
 
7
  def generate_text(prompt):
8
+ response = ""
9
+ for token in client.text_generation(prompt, stream=True, max_new_tokens=100):
10
+ response += token.token
11
+ return response
12
 
13
+ gr.Interface(
14
+ fn=generate_text,
15
+ inputs="text",
16
+ outputs="text",
17
+ title="Mistral SFT Text Generator",
18
+ description="Type a prompt and let the fine-tuned Mistral model generate the rest."
19
  ).launch()