# ===================== IMPORTS ===================== import streamlit as st import os import json import difflib import pickle import torch import numpy as np from PIL import Image from tensorflow.keras.models import load_model # ✅ FIXED from tensorflow.keras.preprocessing.image import img_to_array, load_img # ✅ FIXED from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint # optional, if used from transformers import CLIPProcessor, CLIPModel from sentence_transformers import SentenceTransformer, CrossEncoder from langdetect import detect # ===================== PATHS ===================== Main_py = "Main_py" model_path = os.path.join(Main_py, "best_cnn_model_finetuned.keras") label_path = os.path.join(Main_py, "label_encoder.pkl") json_path = os.path.join(Main_py, "banana_disease_knowledge_base_updated_shuffled.json") # ===================== LOAD MODELS & DATA ===================== @st.cache_resource def load_cnn_clip_kb(): model = load_model(model_path) # ✅ FIXED with open(label_path, "rb") as f: le = pickle.load(f) with open(json_path, "r", encoding="utf-8") as f: kb_data = json.load(f) clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") return model, le, kb_data, clip_model, clip_processor @st.cache_resource def load_nlp_models(): embedder = SentenceTransformer("sentence-transformers/paraphrase-xlm-r-multilingual-v1") cross_encoder = CrossEncoder("cross-encoder/mmarco-mMiniLMv2-L12-H384-v1") return embedder, cross_encoder model, le, kb_data, clip_model, clip_processor = load_cnn_clip_kb() embedder, cross_encoder = load_nlp_models() # ===================== CLIP FILTER ===================== def verify_image_with_clip(image_path): prompts = ["a photo of a banana leaf", "a photo of something that is not a banana leaf"] try: image = Image.open(image_path).convert("RGB") except Exception as e: return ('REJECTED', f'Invalid image file: {e}', 0.0) inputs = clip_processor(text=prompts, images=image, return_tensors="pt", padding=True) with torch.no_grad(): outputs = clip_model(**inputs) probs = outputs.logits_per_image.softmax(dim=1).cpu().numpy()[0] banana_score, not_banana_score = probs[0], probs[1] rejection_factor = 3.0 if banana_score >= not_banana_score * rejection_factor: return ('ACCEPTED', 'banana leaf', banana_score) else: return ('REJECTED', 'Not a banana leaf', not_banana_score) # ===================== CNN PREDICTION ===================== def predict_disease(image_path, target_size=(224, 224)): image = load_img(image_path, target_size=target_size) img_array = img_to_array(image) / 255.0 img_array = np.expand_dims(img_array, axis=0) preds = model.predict(img_array)[0] idx = np.argmax(preds) label = le.inverse_transform([idx])[0] confidence = preds[idx] return label, confidence, image # ===================== FUZZY MARATHI OUTPUT ===================== def match_disease_name_fuzzy(predicted_name): disease_names = [entry["Disease"].strip().lower() for entry in kb_data] matches = difflib.get_close_matches(predicted_name.strip().lower(), disease_names, n=1, cutoff=0.5) if matches: for entry in kb_data: if entry["Disease"].strip().lower() == matches[0]: return entry return None def get_marathi_recommendation_fuzzy(predicted_disease, confidence=None): entry = match_disease_name_fuzzy(predicted_disease) if entry: return { "पिक": entry.get("Crop", "केळी"), "रोग": entry.get("Local_Name", {}).get("mr", predicted_disease), "लक्षणे": entry.get("Symptoms_MR", ""), "कारण": entry.get("Cause_MR", ""), "किटकनाशके": entry.get("Pesticide_MR", ""), "किटकनाशक शिफारस": entry.get("Pesticide_Recommendation", {}).get("mr", ""), "नियंत्रण पद्धती": entry.get("Management_MR", ""), "रोगजन्य घटक": entry.get("Pathogen", ""), "विश्वासार्हता": f"{confidence:.2%}" if confidence else "N/A" } return None # ===================== NLP PREDICTION ===================== def detect_language(query: str) -> str: try: lang = detect(query) return lang if lang in ["mr", "hi"] else "en" except: return "en" def predict_disease_from_text(query: str): lang = detect_language(query) query_emb = embedder.encode([query], normalize_embeddings=True) symptom_key = f"Symptoms_{lang.upper()}" if lang != "en" else "Symptoms" pairs = [[query, entry.get(symptom_key, "")] for entry in kb_data] scores = cross_encoder.predict(pairs) best_idx = np.argmax(scores) if scores[best_idx] < 0.2: return { "message": { "mr": "हा रोग आमच्या डेटाबेसमध्ये नाही.", "hi": "यह रोग हमारे डेटाबेस में नहीं है।", "en": "This disease is not in our database." }[lang] } entry = kb_data[best_idx] return { "पिक": entry.get("Crop", "केळी"), "रोग": entry["Local_Name"].get(lang, entry["Disease"]), "लक्षणे": entry.get(symptom_key, ""), "कारण": entry.get(f"Cause_{lang.upper()}", entry.get("Cause", "")), "किटकनाशक शिफारस": entry.get("Pesticide_Recommendation", {}).get(lang, ""), "किटकनाशके": entry.get("Pesticide", ""), "रोगजन्य घटक": entry.get("Pathogen", ""), "नियंत्रण पद्धती": entry.get(f"Management_{lang.upper()}", entry.get("Management_Practices", "")), } # ===================== STREAMLIT UI ===================== st.set_page_config(page_title="🍌\ Banana Disease Detection (CNN + NLP)", layout="centered") st.title(" केळीच्या पानांवरील रोगांचे निदान") st.markdown("प्रतिमा किंवा लक्षणे वापरून केळीवरील रोगांचे निदान करा (मराठी, हिंदी, इंग्रजी भाषांमध्ये).") option = st.radio("इनपुट पद्धत निवडा:", ["Image Only", "Text Only", "Both"]) # ===================== IMAGE FLOW ===================== if option in ["Image Only", "Both"]: st.subheader(" प्रतिमा अपलोड करा") uploaded_img = st.file_uploader("JPG / PNG", type=["jpg", "jpeg", "png"]) if uploaded_img: temp_path = "temp_uploaded.jpg" with open(temp_path, "wb") as f: f.write(uploaded_img.getbuffer()) st.image(temp_path, caption="अपलोड केलेली प्रतिमा", use_column_width=True) st.info("CLIP मॉडेलद्वारे पडताळणी करत आहे...") status, reason, clip_conf = verify_image_with_clip(temp_path) if status == "REJECTED": st.error(f" CLIP नकार: {reason} [विश्वासार्हता: {clip_conf:.2f}]") else: st.success(f" CLIP मंजूरी: शक्यतो केळीचे पान [विश्वासार्हता: {clip_conf:.2f}]") pred_disease, cnn_conf, img = predict_disease(temp_path) st.markdown(f"**ओळखलेला रोग:** {pred_disease} (विश्वासार्हता: {cnn_conf:.2%})") marathi_info = get_marathi_recommendation_fuzzy(pred_disease, cnn_conf) if marathi_info: st.subheader(" मराठी शिफारस:") for k, v in marathi_info.items(): st.markdown(f"**{k}**: {v}") else: st.warning(" ज्ञानतळात रोगासाठी माहिती नाही.") st.image(img, caption=f"{pred_disease} ({cnn_conf:.2%})", use_column_width=True) os.remove(temp_path) # ===================== TEXT FLOW ===================== if option in ["Text Only", "Both"]: st.subheader(" लक्षणे लिहा") symptoms = st.text_area("लक्षणे (मराठी / हिंदी / इंग्रजी):") if symptoms and st.button(" रोग ओळखा"): result = predict_disease_from_text(symptoms) if "message" in result: st.warning(result["message"]) else: st.subheader(" शिफारस:") for k, v in result.items(): st.markdown(f"**{k}**: {v}")