JPeace18 commited on
Commit
3f8d8b4
·
verified ·
1 Parent(s): c5b96a8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer
3
+
4
+ # Load the pretrained model and tokenizer
5
+ model_name = "JPeace18/vit-base-patch16-224-in21k-finetuned-lora-food101" # Replace with your desired model
6
+ model = AutoModelForQuestionAnswering.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+
9
+ # Define the function to generate the model's answer
10
+ def get_answer(question):
11
+ inputs = tokenizer(question, return_tensors="pt")
12
+ outputs = model(**inputs)
13
+ answer_start = torch.argmax(outputs.start_logits)
14
+ answer_end = torch.argmax(outputs.end_logits) + 1
15
+ answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]))
16
+ return answer
17
+
18
+ # Create the Gradio interface
19
+ iface = gr.Interface(
20
+ fn=get_answer,
21
+ inputs="text",
22
+ outputs="text",
23
+ live=True,
24
+ title="Question Answering Model",
25
+ description="Ask a question, and the model will provide an answer.",
26
+ )
27
+
28
+ # Launch the Gradio interface
29
+ iface.launch()