|
import os |
|
import gradio as gr |
|
import pandas as pd |
|
from PIL import Image |
|
import io |
|
import datetime |
|
import re |
|
import traceback |
|
import base64 |
|
from openai import OpenAI |
|
|
|
|
|
try: |
|
openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) |
|
except Exception as e: |
|
print(f"Error initializing OpenAI client: {e}") |
|
openai_client = None |
|
|
|
|
|
def encode_image(image): |
|
if isinstance(image, Image.Image): |
|
|
|
buffered = io.BytesIO() |
|
|
|
if image.mode in ('RGBA', 'P', 'LA'): |
|
image = image.convert('RGB') |
|
image.save(buffered, format="JPEG") |
|
image_bytes = buffered.getvalue() |
|
return base64.b64encode(image_bytes).decode("utf-8") |
|
elif isinstance(image, str) and os.path.exists(image): |
|
with open(image, "rb") as image_file: |
|
return base64.b64encode(image_file.read()).decode("utf-8") |
|
return None |
|
|
|
|
|
def process_patient_history(file): |
|
if file is None: |
|
return "" |
|
|
|
try: |
|
|
|
file_path = file.name |
|
file_ext = os.path.splitext(file_path)[1].lower() |
|
|
|
if file_ext == '.txt': |
|
|
|
with open(file_path, 'r', encoding='utf-8') as f: |
|
content = f.read() |
|
return content |
|
|
|
elif file_ext in ['.csv', '.xlsx', '.xls']: |
|
|
|
if file_ext == '.csv': |
|
df = pd.read_csv(file_path) |
|
else: |
|
try: |
|
df = pd.read_excel(file_path) |
|
except ImportError: |
|
return "Error: `openpyxl` needed for .xlsx files. Install with `pip install openpyxl`" |
|
except Exception as e_excel: |
|
return f"Error reading Excel file: {e_excel}" |
|
|
|
|
|
formatted_data = "PATIENT INFORMATION:\n\n" |
|
if not df.empty: |
|
for column in df.columns: |
|
value = df.iloc[0].get(column, 'N/A') |
|
formatted_data += f"{column}: {str(value)}\n" |
|
else: |
|
formatted_data += "Spreadsheet is empty or format is not recognized correctly." |
|
|
|
return formatted_data |
|
|
|
else: |
|
return f"Unsupported file format ({file_ext}). Please upload a .txt, .csv, or .xlsx file." |
|
|
|
except AttributeError: |
|
return "Error: Could not get file path from Gradio File object. Ensure a file was uploaded." |
|
except FileNotFoundError: |
|
return f"Error: File not found at path: {file_path}" |
|
except Exception as e: |
|
print(f"Error processing patient history file:\n{traceback.format_exc()}") |
|
return f"Error processing patient history file: {str(e)}" |
|
|
|
|
|
def analyze_ecg_image(image): |
|
if image is None: |
|
return "<strong style='color:red'>No image provided.</strong>" |
|
|
|
|
|
if openai_client is None: |
|
return "<strong style='color:red'>OpenAI client not initialized. Check API Key.</strong>" |
|
|
|
|
|
if not isinstance(image, Image.Image): |
|
try: |
|
if isinstance(image, str) and os.path.exists(image): |
|
image = Image.open(image) |
|
elif hasattr(image, 'name'): |
|
image = Image.open(image.name) |
|
else: |
|
return f"<strong style='color:red'>Unrecognized image input format: {type(image)}</strong>" |
|
except Exception as e: |
|
print(f"Error opening image:\n{traceback.format_exc()}") |
|
return f"<strong style='color:red'>Error opening image: {str(e)}</strong>" |
|
|
|
try: |
|
|
|
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
|
|
|
base64_image = encode_image(image) |
|
if not base64_image: |
|
return "<strong style='color:red'>Failed to encode image.</strong>" |
|
|
|
|
|
vision_prompt = f"""Analyze this ECG image carefully. You are a cardiologist analyzing an electrocardiogram (ECG). |
|
|
|
IMPORTANT: Only report what you can actually see clearly displayed in this specific ECG screen. Do not include any measurements or values that are not visible or not displayed digitally in the image. Only create sections for values that are actually shown in the image. |
|
|
|
Look for and extract visible measurements from the ECG display, which may include: |
|
- Heart rate (if displayed digitally) |
|
- Any numeric measurements shown on the screen |
|
- Visible rhythm patterns |
|
- Any clearly labeled values or measurements |
|
|
|
Report exact numerical values where visible. If a value is not displayed or not visible, DO NOT include that section at all in your response. |
|
|
|
Format your response strictly like this: |
|
<h3>ECG Report</h3> |
|
<ul> |
|
<li><strong>Analysis Time:</strong> {timestamp}</li> |
|
<!-- Only include the following if they are visible in the image --> |
|
<!-- If Heart Rate is displayed: --> |
|
<li><strong>Heart Rate:</strong> [visible value] bpm</li> |
|
<!-- If other measurements are visible: --> |
|
<!-- Add only visible measurements as list items --> |
|
</ul> |
|
<h3>Visible Findings</h3> |
|
<ul> |
|
<li>[Only observations of what is actually visible in the waveform]</li> |
|
<li>[Only visible abnormalities, if any]</li> |
|
</ul> |
|
<h3>Visual Assessment</h3> |
|
<p>[Brief summary based ONLY on what is visible in this specific ECG display]</p> |
|
|
|
Critical rules: |
|
- Do NOT add sections for measurements not visible in the image |
|
- Do NOT write "Not determinable from image" for any parameter |
|
- Only include data that you can actually see in this ECG screen |
|
- Report only the exact values or descriptions visible in the image |
|
- If certain standard ECG parameters are not shown, simply don't include them |
|
""" |
|
|
|
|
|
response = openai_client.responses.create( |
|
model="gpt-4.1", |
|
input=[ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{ "type": "input_text", "text": vision_prompt }, |
|
{ |
|
"type": "input_image", |
|
"image_url": f"data:image/jpeg;base64,{base64_image}", |
|
}, |
|
], |
|
} |
|
] |
|
) |
|
|
|
ecg_analysis = response.output_text |
|
|
|
|
|
ecg_analysis = re.sub(r'\*\*(.*?)\*\*', r'<strong>\1</strong>', ecg_analysis) |
|
ecg_analysis = re.sub(r'^\s*#+\s+(.*?)\s*$', r'<h3>\1</h3>', ecg_analysis, flags=re.MULTILINE) |
|
ecg_analysis = re.sub(r'^\s*[\*-]\s+(.*?)\s*$', r'<li>\1</li>', ecg_analysis, flags=re.MULTILINE) |
|
|
|
|
|
if not ("<h3>" in ecg_analysis and "<ul>" in ecg_analysis): |
|
print(f"Warning: GPT-4.1 response might not be in the expected HTML format:\n{ecg_analysis[:500]}...") |
|
|
|
return ecg_analysis |
|
|
|
except Exception as e: |
|
print(f"Error during GPT-4.1 ECG analysis:\n{traceback.format_exc()}") |
|
error_type = type(e).__name__ |
|
return f"<strong style='color:red'>Error analyzing ECG image with GPT-4.1 ({error_type}):</strong> {str(e)}" |
|
|
|
|
|
def generate_assessment(ecg_analysis, patient_history=None): |
|
if openai_client is None: |
|
return "<strong style='color:red'>OpenAI client not initialized. Check API Key.</strong>" |
|
|
|
if not ecg_analysis or ecg_analysis.startswith("<strong style='color:red'>"): |
|
return "<strong style='color:red'>Cannot generate assessment. Please analyze a valid ECG image first.</strong>" |
|
|
|
|
|
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
|
|
|
clean_ecg_analysis = re.sub('<[^>]+>', '', ecg_analysis) |
|
|
|
|
|
prompt_parts = [ |
|
"You are a highly trained cardiologist assistant AI. Synthesize information from the ECG analysis and patient history (if provided) into a clinical assessment.", |
|
"Focus on integrating the findings and suggesting potential implications and recommendations.", |
|
"Format your response strictly using the specified HTML structure.", |
|
"\nECG ANALYSIS SUMMARY (Provided):\n" + clean_ecg_analysis, |
|
] |
|
|
|
if patient_history and patient_history.strip(): |
|
prompt_parts.append("\nPATIENT HISTORY (Provided):\n" + patient_history) |
|
else: |
|
prompt_parts.append("\nPATIENT HISTORY: Not provided.") |
|
|
|
prompt_parts.append(f"\nASSESSMENT TIMESTAMP: {timestamp}") |
|
|
|
prompt_parts.append(""" |
|
Format your assessment using ONLY the following HTML structure: |
|
<h3>Summary of Integrated Findings</h3> |
|
<ul> |
|
<li>[Combine key ECG findings with relevant patient history points]</li> |
|
<li>[Finding 2]</li> |
|
</ul> |
|
<h3>Key Abnormalities and Concerns</h3> |
|
<ul> |
|
<li>[List specific significant abnormalities from the ECG]</li> |
|
<li>[Use <span style="color:red"> for urgent/critical concerns]</li> |
|
</ul> |
|
<h3>Potential Clinical Implications</h3> |
|
<ul> |
|
<li>[Suggest possible underlying conditions or risks]</li> |
|
<li>[Implication 2]</li> |
|
</ul> |
|
<h3>Recommendations for Physician Review</h3> |
|
<ul> |
|
<li>[Suggest next steps or urgency]</li> |
|
<li>[Recommendation 2]</li> |
|
</ul> |
|
<h3>Differential Considerations (Optional)</h3> |
|
<ul> |
|
<li>[List possible alternative explanations if applicable]</li> |
|
<li>[Differential 2]</li> |
|
</ul> |
|
Important Instructions: |
|
- Adhere strictly to the HTML format |
|
- Do NOT use markdown formatting |
|
- Base your assessment ONLY on the provided information |
|
- Do NOT make definitive diagnoses |
|
""") |
|
prompt = "\n".join(prompt_parts) |
|
|
|
try: |
|
assessment_completion = openai_client.responses.create( |
|
model="gpt-4.1", |
|
instructions="You are a medical AI assistant specialized in cardiology. Generate a structured clinical assessment based on the provided ECG and patient data, formatted in HTML for physician review. Highlight urgent findings appropriately. Avoid definitive diagnoses.", |
|
input=prompt |
|
) |
|
|
|
assessment_text = assessment_completion.output_text |
|
|
|
|
|
assessment_text = re.sub(r'\*\*(.*?)\*\*', r'<strong>\1</strong>', assessment_text) |
|
assessment_text = re.sub(r'^\s*#+\s+(.*?)\s*$', r'<h3>\1</h3>', assessment_text, flags=re.MULTILINE) |
|
|
|
|
|
if not ("<h3>" in assessment_text and "<ul>" in assessment_text): |
|
print(f"Warning: GPT-4.1 assessment response might not be in the expected HTML format:\n{assessment_text[:500]}...") |
|
processed_text = assessment_text.replace('\n', '<br>') |
|
assessment_text = f"<h3>Assessment (Raw Output)</h3><p>{processed_text}</p>" |
|
|
|
return assessment_text |
|
|
|
except Exception as e: |
|
print(f"Error during GPT-4.1 assessment generation:\n{traceback.format_exc()}") |
|
error_type = type(e).__name__ |
|
return f"<strong style='color:red'>Error generating assessment with GPT-4.1 ({error_type}):</strong> {str(e)}" |
|
|
|
|
|
def doctor_chat(message, chat_history, ecg_analysis, patient_history, assessment): |
|
if openai_client is None: |
|
chat_history.append((message, "<strong style='color:red'>Cannot start chat. OpenAI client not initialized. Check API Key.</strong>")) |
|
return "", chat_history |
|
|
|
|
|
if not ecg_analysis or ecg_analysis.startswith("<strong style='color:red'>"): |
|
chat_history.append((message, "<strong style='color:red'>Cannot start chat. Please analyze a valid ECG image first.</strong>")) |
|
return "", chat_history |
|
|
|
if not message.strip(): |
|
return "", chat_history |
|
|
|
|
|
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
|
|
|
clean_ecg = re.sub('<[^>]+>', '', ecg_analysis) |
|
clean_assessment = re.sub('<[^>]+>', '', assessment) if assessment and not assessment.startswith("<strong style='color:red'>") else "Assessment not available or failed." |
|
clean_history = patient_history if patient_history and patient_history.strip() else "No patient history provided." |
|
|
|
|
|
context = f"""CURRENT TIMESTAMP: {timestamp} |
|
=== BEGIN PATIENT CONTEXT === |
|
PATIENT HISTORY: |
|
{clean_history} |
|
ECG ANALYSIS SUMMARY: |
|
{clean_ecg} |
|
GENERATED ASSESSMENT SUMMARY: |
|
{clean_assessment} |
|
=== END PATIENT CONTEXT === |
|
Based *only* on the patient context provided above, answer the doctor's questions concisely and professionally. If the information needed to answer is not in the context, explicitly state that. Do not invent information or access external knowledge. |
|
""" |
|
|
|
|
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": f"You are a specialized cardiology AI assistant conversing with a doctor. Your knowledge is strictly limited to the patient information provided in the context below. Answer questions based *only* on this context.\n\n{context}" |
|
} |
|
] |
|
|
|
|
|
history_limit = 5 |
|
for user_msg, assistant_msg in chat_history[-history_limit:]: |
|
messages.append({"role": "user", "content": [{"type": "input_text", "text": user_msg}]}) |
|
if isinstance(assistant_msg, str) and not assistant_msg.startswith("<strong style='color:red'>"): |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
|
|
|
messages.append({"role": "user", "content": [{"type": "input_text", "text": message}]}) |
|
|
|
try: |
|
|
|
system_prompt = messages[0]["content"] |
|
|
|
user_messages = [] |
|
for msg in messages[1:]: |
|
if msg["role"] == "user": |
|
if isinstance(msg["content"], list): |
|
for content in msg["content"]: |
|
if isinstance(content, dict) and content.get("type") == "input_text": |
|
user_messages.append(content["text"]) |
|
else: |
|
user_messages.append(str(msg["content"])) |
|
else: |
|
user_messages.append(msg["content"]) |
|
|
|
combined_input = "\n\n".join(user_messages) |
|
|
|
chat_completion = openai_client.responses.create( |
|
model="gpt-4.1", |
|
instructions=system_prompt, |
|
input=combined_input |
|
) |
|
|
|
response = chat_completion.output_text |
|
|
|
|
|
response = re.sub(r'\*\*(.*?)\*\*', r'<strong>\1</strong>', response) |
|
response = response.replace('\n', '<br>') |
|
|
|
chat_history.append((message, response)) |
|
return "", chat_history |
|
except Exception as e: |
|
print(f"Error during GPT-4.1 chat:\n{traceback.format_exc()}") |
|
error_type = type(e).__name__ |
|
error_message = f"<strong style='color:red'>Error in chat ({error_type}):</strong> {str(e)}" |
|
chat_history.append((message, error_message)) |
|
return "", chat_history |
|
|
|
|
|
with gr.Blocks(title="Cardiac ECG Analysis System", theme=gr.themes.Soft()) as app: |
|
|
|
gr.Markdown("# π« Cardiac ECG Analysis System") |
|
gr.Markdown("Upload an ECG image and optional patient history for AI-assisted analysis, assessment, and consultation.") |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("π» Main Interface"): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
|
|
with gr.Group(): |
|
gr.Markdown("### π ECG Image Upload") |
|
ecg_image = gr.Image(type="pil", label="Upload ECG Image", height=300) |
|
gr.Markdown("**Vision Model: GPT-4.1**") |
|
analyze_button = gr.Button("Analyze ECG Image", variant="primary") |
|
|
|
with gr.Group(): |
|
gr.Markdown("### π Patient Information") |
|
patient_history_text = gr.Textbox( |
|
lines=8, |
|
label="Patient History (Manual Entry or Loaded from File)", |
|
placeholder="Enter relevant patient details OR upload a file and click Load." |
|
) |
|
patient_history_file = gr.File( |
|
label="Upload Patient History File (Optional)", |
|
file_types=[".txt", ".csv", ".xlsx", ".xls"] |
|
) |
|
load_history_button = gr.Button("Load Patient History from File") |
|
|
|
with gr.Group(): |
|
gr.Markdown("### π§ Generate Assessment") |
|
gr.Markdown("**Assessment/Chat Model: GPT-4.1**") |
|
assess_button = gr.Button("Generate Assessment", variant="primary") |
|
|
|
with gr.Column(scale=1): |
|
|
|
with gr.Group(): |
|
gr.Markdown("### π ECG Analysis Results") |
|
ecg_analysis_output = gr.HTML(label="ECG Analysis", elem_id="ecg-analysis") |
|
|
|
with gr.Group(): |
|
gr.Markdown("### π Medical Assessment") |
|
assessment_output = gr.HTML(label="Assessment", elem_id="assessment-output") |
|
|
|
gr.Markdown("---") |
|
gr.Markdown("## π¨ββοΈ Doctor's Consultation Chat") |
|
gr.Markdown("Ask follow-up questions based on the analysis and assessment above.") |
|
|
|
with gr.Group(): |
|
chatbot = gr.Chatbot( |
|
label="Consultation Log", |
|
height=450, |
|
bubble_full_width=False, |
|
show_label=False |
|
) |
|
with gr.Row(): |
|
message = gr.Textbox( |
|
label="Your Question", |
|
placeholder="Type your question here and press Enter or click Send...", |
|
scale=4, |
|
show_label=False, |
|
container=False, |
|
) |
|
chat_button = gr.Button("Send", scale=1, variant="primary") |
|
|
|
with gr.TabItem("βΉοΈ Instructions & Disclaimer"): |
|
gr.Markdown(""" |
|
## How to Use This Application |
|
1. **Upload ECG:** Go to the "Main Interface" tab. Upload an ECG image using the designated area. |
|
2. **Analyze ECG:** Click the **Analyze ECG Image** button. The system will analyze using GPT-4.1 and show results. |
|
3. **Add Patient History (Optional):** |
|
* Type relevant details directly into the "Patient History" text box. |
|
* OR, upload a `.txt`, `.csv`, or `.xlsx` file and click **Load Patient History from File**. |
|
4. **Generate Assessment:** Click the **Generate Assessment** button. Results appear in the "Medical Assessment" box. |
|
5. **Consult:** Use the chat interface to ask follow-up questions about the analysis and assessment. |
|
--- |
|
## Important Disclaimer |
|
* **Not a Medical Device:** This tool is for informational purposes only. It is **NOT** a certified medical device. |
|
* **AI Limitations:** AI models can make mistakes, misinterpret images, or generate inaccurate information. |
|
* **Professional Judgment Required:** All outputs must be reviewed by a qualified healthcare professional. |
|
* **No Liability:** Use this tool at your own risk. The creators assume no liability for any decisions made based on its output. |
|
""") |
|
|
|
|
|
analyze_button.click( |
|
fn=analyze_ecg_image, |
|
inputs=[ecg_image], |
|
outputs=ecg_analysis_output |
|
) |
|
|
|
load_history_button.click( |
|
fn=process_patient_history, |
|
inputs=[patient_history_file], |
|
outputs=[patient_history_text] |
|
) |
|
|
|
assess_button.click( |
|
fn=generate_assessment, |
|
inputs=[ecg_analysis_output, patient_history_text], |
|
outputs=assessment_output |
|
) |
|
|
|
chat_button.click( |
|
fn=doctor_chat, |
|
inputs=[message, chatbot, ecg_analysis_output, patient_history_text, assessment_output], |
|
outputs=[message, chatbot] |
|
) |
|
|
|
message.submit( |
|
fn=doctor_chat, |
|
inputs=[message, chatbot, ecg_analysis_output, patient_history_text, assessment_output], |
|
outputs=[message, chatbot] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
print("===== Application Startup =====") |
|
print(f"Attempting to launch Gradio app at {datetime.datetime.now()}") |
|
app.launch() |