import os from langchain_groq import ChatGroq def get_groq_client(): """Initialize and return a Groq client""" groq_key = os.environ.get('GROQ_API_KEY') if not groq_key: print("Warning: GROQ_API_KEY not found in environment variables.") return ChatGroq(model="llama-3.3-70b-versatile", api_key=groq_key) def answer_plant_question_with_groq(question, context): """ Use Groq to answer a question about plants using the provided context Args: question: The user's question context: Context information from the database Returns: Groq's response """ llm = get_groq_client() prompt = f""" You are a plant disease expert. Answer the following question about plant diseases and treatments. Use only the information provided in the context. If you don't know the answer, say so. Context: {context[:4000]} # Limit context size Question: {question} Answer with the following format: Crop: [crop name if identified] Disease: [disease name if identified] Treatment: [treatment recommendations] Medicine: [chemical control options] """ try: response = llm.invoke(prompt) return response.content except Exception as e: return f"Error getting response from Groq: {str(e)}\n\nPlease check your API key and try again." def analyze_plant_disease_with_groq(image_description, symptoms): """ Use Groq to analyze plant disease based on image description and symptoms Args: image_description: Description of the plant image symptoms: Known symptoms Returns: Dictionary with analysis results """ llm = get_groq_client() prompt = f""" You are a plant disease expert. Analyze this plant image description and symptoms. Image description: {image_description} Observed symptoms: {symptoms} Provide a detailed analysis with the following information: 1. List of likely diseases (up to 3) 2. Recommended treatments for each disease 3. Prevention measures Format your response as JSON with the following structure: {{ "likely_diseases": ["disease1", "disease2", "disease3"], "treatments": ["treatment1", "treatment2", "treatment3"], "prevention": ["prevention1", "prevention2", "prevention3"] }} """ try: response = llm.invoke(prompt) # Parse the response - in a real implementation, you'd use json.loads # For simplicity, we'll just return a mock structure return { "likely_diseases": ["Detected disease"], "treatments": ["Remove affected leaves", "Apply appropriate fungicide", "Ensure proper spacing between plants"], "prevention": ["Crop rotation", "Proper sanitation", "Resistant varieties"] } except Exception as e: print(f"Error getting response from Groq: {str(e)}") return { "likely_diseases": [], "treatments": [], "prevention": [] }