Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ from googletrans import Translator
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
# Load a Pre-trained Medical Question Answering Model
|
| 7 |
-
model_name = "deepset/
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
| 10 |
|
|
@@ -28,9 +28,12 @@ def translate_to_bangla(text):
|
|
| 28 |
def chatbot_response(user_input):
|
| 29 |
"""Generates a response based on the medical question-answering model."""
|
| 30 |
|
|
|
|
|
|
|
| 31 |
translated_input = translate_to_english(user_input) # Translate to English
|
|
|
|
| 32 |
|
| 33 |
-
# Define a fixed
|
| 34 |
context = """
|
| 35 |
Fever can be caused by infections, dehydration, or viral illnesses.
|
| 36 |
It is recommended to rest, stay hydrated, and take paracetamol if necessary.
|
|
@@ -43,14 +46,18 @@ def chatbot_response(user_input):
|
|
| 43 |
with torch.no_grad():
|
| 44 |
outputs = model(**inputs)
|
| 45 |
|
| 46 |
-
# Extract
|
| 47 |
answer_start = outputs.start_logits.argmax()
|
| 48 |
answer_end = outputs.end_logits.argmax() + 1
|
| 49 |
response_text = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]))
|
| 50 |
|
| 51 |
-
|
|
|
|
|
|
|
| 52 |
translated_response = translate_to_bangla(response_text)
|
| 53 |
|
|
|
|
|
|
|
| 54 |
return translated_response
|
| 55 |
|
| 56 |
# Create Gradio Interface
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
# Load a Pre-trained Medical Question Answering Model
|
| 7 |
+
model_name = "deepset/roberta-base-squad2"
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
| 10 |
|
|
|
|
| 28 |
def chatbot_response(user_input):
|
| 29 |
"""Generates a response based on the medical question-answering model."""
|
| 30 |
|
| 31 |
+
print(f"User Input (Bangla): {user_input}")
|
| 32 |
+
|
| 33 |
translated_input = translate_to_english(user_input) # Translate to English
|
| 34 |
+
print(f"Translated Input (English): {translated_input}")
|
| 35 |
|
| 36 |
+
# Define a fixed medical context
|
| 37 |
context = """
|
| 38 |
Fever can be caused by infections, dehydration, or viral illnesses.
|
| 39 |
It is recommended to rest, stay hydrated, and take paracetamol if necessary.
|
|
|
|
| 46 |
with torch.no_grad():
|
| 47 |
outputs = model(**inputs)
|
| 48 |
|
| 49 |
+
# Extract answer span
|
| 50 |
answer_start = outputs.start_logits.argmax()
|
| 51 |
answer_end = outputs.end_logits.argmax() + 1
|
| 52 |
response_text = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]))
|
| 53 |
|
| 54 |
+
print(f"Generated Response (English): {response_text}")
|
| 55 |
+
|
| 56 |
+
# Translate response back to Bangla
|
| 57 |
translated_response = translate_to_bangla(response_text)
|
| 58 |
|
| 59 |
+
print(f"Final Response (Bangla): {translated_response}")
|
| 60 |
+
|
| 61 |
return translated_response
|
| 62 |
|
| 63 |
# Create Gradio Interface
|