Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
|
2 |
from google import genai
|
3 |
from google.genai import types
|
@@ -12,57 +13,21 @@ from pathlib import Path
|
|
12 |
|
13 |
app = Flask(__name__)
|
14 |
|
15 |
-
#
|
|
|
16 |
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
|
17 |
-
TELEGRAM_BOT_TOKEN = "
|
18 |
-
TELEGRAM_CHAT_ID = "
|
19 |
-
|
20 |
-
client = genai.Client(api_key=GOOGLE_API_KEY)
|
21 |
-
|
22 |
-
# Prompt de base
|
23 |
-
BASE_PROMPT = r"""
|
24 |
-
# 🔍 GÉNÉRATEUR DE CORRECTION MATHÉMATIQUE (Version Directe)
|
25 |
-
|
26 |
-
## 🎓 VOTRE RÔLE
|
27 |
-
Vous êtes **Mariam-MATHEX-PRO**, un expert en mathématiques chargé de fournir des corrections. Votre objectif est d'être clair, précis et d'aller droit au but.
|
28 |
-
|
29 |
-
## 📊 FORMAT D'ENTRÉE ET SORTIE
|
30 |
-
**ENTRÉE:** L'énoncé d'un exercice mathématique (niveau Terminale/Supérieur).
|
31 |
-
**SORTIE:** UNIQUEMENT la correction de l'exercice **en français** avec rendu LaTeX.
|
32 |
-
|
33 |
-
## 🛠️ INSTRUCTIONS POUR LA CORRECTION
|
34 |
-
1. **STRUCTURATION DE LA RÉPONSE :**
|
35 |
-
Organisez la solution en étapes logiques claires.
|
36 |
-
Si l'exercice comporte plusieurs questions ou parties, traitez-les séquentiellement.
|
37 |
-
|
38 |
-
2. **DÉTAIL DU PROCÉDÉ DE CALCUL :**
|
39 |
-
Pour chaque étape significative, montrez les calculs.
|
40 |
-
Écrivez les calculs intermédiaires importants.
|
41 |
-
|
42 |
-
3. **EXPLICATIONS TRÈS BRÈVES :**
|
43 |
-
Chaque étape doit avoir une explication textuelle très concise.
|
44 |
-
|
45 |
-
4. **RÉSULTATS :**
|
46 |
-
Indiquez clairement les résultats intermédiaires et le résultat final.
|
47 |
-
|
48 |
-
## 🔧 RENDU MATHÉMATIQUE
|
49 |
-
5. **RENDU MATHÉMATIQUE :**
|
50 |
-
Utilisez LaTeX pour toutes les expressions mathématiques.
|
51 |
-
|
52 |
-
## ✅ OBJECTIF PRINCIPAL
|
53 |
-
Fournir une correction mathématique textuelle **en français** qui va droit au but.
|
54 |
-
"""
|
55 |
-
|
56 |
-
# Extension du prompt
|
57 |
-
CODE_EXTENSION = r"""
|
58 |
-
## 🧮 EXIGENCES TECHNIQUES (MODE CALCULATRICE ACTIVÉ)
|
59 |
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
|
64 |
-
Vérifiez vos calculs analytiques par du numérique en Python.
|
65 |
-
"""
|
66 |
|
67 |
class AgentSystem:
|
68 |
def __init__(self):
|
@@ -74,36 +39,82 @@ class AgentSystem:
|
|
74 |
try:
|
75 |
self.prompts_dir.mkdir(exist_ok=True)
|
76 |
|
|
|
77 |
default_prompts = {
|
78 |
-
"step1_initial_solution.md": """### Core Instructions ###
|
79 |
-
* **Rigor is Paramount:** Your primary goal is to produce a complete and rigorously justified solution.
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
"step2_self_improvement.md": """You are a world-class mathematician.
|
84 |
-
You have just produced the following draft solution.
|
85 |
-
Your task is to review it carefully, identify flaws or gaps, and produce a new, improved solution.
|
86 |
|
87 |
### Draft Solution ###
|
88 |
[The initial solution attempt will be inserted here]
|
89 |
|
90 |
### Your Task ###
|
91 |
-
Provide the improved version of the solution.
|
92 |
-
|
93 |
-
"step3_verification.md": """You are an expert mathematician and a meticulous grader.
|
94 |
-
Your task is to verify the provided solution step by step.
|
95 |
-
|
96 |
-
### Problem ###
|
97 |
-
[The mathematical problem will be inserted here]
|
98 |
-
|
99 |
-
### Solution ###
|
100 |
-
[The solution to be verified will be inserted here]
|
101 |
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
""",
|
105 |
|
106 |
-
"step5_correction.md": """You are a brilliant mathematician
|
107 |
|
108 |
### Verification Report ###
|
109 |
[The full verification report will be inserted here]
|
@@ -112,75 +123,70 @@ Act as an IMO grader. Generate a summary and a detailed verification log.
|
|
112 |
[The previous solution attempt will be inserted here]
|
113 |
|
114 |
### Task ###
|
115 |
-
Provide a new
|
116 |
"""
|
117 |
}
|
118 |
|
119 |
for filename, content in default_prompts.items():
|
120 |
prompt_file = self.prompts_dir / filename
|
121 |
-
|
122 |
-
prompt_file.write_text(content, encoding='utf-8')
|
123 |
-
prompts[filename.replace('.md', '')] = content
|
124 |
|
125 |
for prompt_file in self.prompts_dir.glob("*.md"):
|
126 |
prompts[prompt_file.stem] = prompt_file.read_text(encoding='utf-8')
|
127 |
-
|
128 |
except Exception as e:
|
129 |
print(f"Error loading prompts: {e}")
|
130 |
return prompts
|
131 |
|
132 |
def extract_problem_text(self, img_str):
|
|
|
133 |
try:
|
134 |
response = client.models.generate_content(
|
135 |
-
model=
|
136 |
contents=[
|
137 |
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
|
138 |
-
"
|
139 |
],
|
140 |
-
config=types.GenerateContentConfig(temperature=0.
|
141 |
)
|
142 |
-
|
143 |
-
for part in response.candidates[0].content.parts:
|
144 |
-
if hasattr(part, 'text') and part.text:
|
145 |
-
problem_text += part.text
|
146 |
-
return problem_text.strip()
|
147 |
except Exception as e:
|
148 |
print(f"Error extracting problem text: {e}")
|
149 |
-
return "[
|
150 |
|
151 |
-
def run_agent_step(self, step_name, prompt
|
|
|
152 |
try:
|
153 |
config = types.GenerateContentConfig(
|
154 |
-
temperature=0.
|
155 |
-
|
156 |
)
|
157 |
-
|
158 |
-
|
159 |
-
response = client.models.generate_content_stream(
|
160 |
-
model="gemini-2.5-flash",
|
161 |
contents=[prompt],
|
162 |
-
config=config
|
163 |
)
|
164 |
-
|
165 |
-
for chunk in response:
|
166 |
-
for part in chunk.candidates[0].content.parts:
|
167 |
-
if hasattr(part, 'text') and part.text:
|
168 |
-
result += part.text
|
169 |
-
return result.strip()
|
170 |
except Exception as e:
|
171 |
print(f"Error in agent step {step_name}: {e}")
|
172 |
-
return f"[
|
|
|
|
|
173 |
|
174 |
-
def send_to_telegram(image_data, caption="Nouvelle image
|
|
|
|
|
175 |
try:
|
176 |
url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendPhoto"
|
177 |
files = {'photo': ('image.png', image_data)}
|
178 |
data = {'chat_id': TELEGRAM_CHAT_ID, 'caption': caption}
|
179 |
-
response = requests.post(url, files=files, data=data)
|
180 |
return response.status_code == 200
|
181 |
except Exception as e:
|
182 |
print(f"Exception Telegram: {e}")
|
183 |
return False
|
|
|
|
|
184 |
|
185 |
@app.route('/')
|
186 |
def index():
|
@@ -188,116 +194,71 @@ def index():
|
|
188 |
|
189 |
@app.route('/solve', methods=['POST'])
|
190 |
def solve():
|
|
|
|
|
|
|
191 |
try:
|
192 |
image_data = request.files['image'].read()
|
193 |
-
|
194 |
-
use_extended_reasoning = request.form.get('use_extended_reasoning', 'false').lower() == 'true'
|
195 |
-
|
196 |
-
img = Image.open(io.BytesIO(image_data))
|
197 |
-
send_to_telegram(image_data, "Nouvelle image reçue")
|
198 |
|
199 |
-
|
200 |
-
img.save(buffered, format="PNG")
|
201 |
-
img_str = base64.b64encode(buffered.getvalue()).decode()
|
202 |
|
203 |
def generate():
|
204 |
try:
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
# Étape
|
224 |
-
yield f'data: {json.dumps({"content": "
|
225 |
-
|
226 |
-
|
227 |
-
)
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
step3_prompt = agent_system.prompts["step3_verification"].replace(
|
234 |
-
"[The mathematical problem will be inserted here]", problem_text
|
235 |
-
).replace(
|
236 |
-
"[The solution to be verified will be inserted here]", improved_solution
|
237 |
-
)
|
238 |
-
verification_result = agent_system.run_agent_step("step3", step3_prompt, False)
|
239 |
-
yield f'data: {json.dumps({"content": verification_result, "type": "text"})}\n\n'
|
240 |
-
|
241 |
-
needs_correction = (
|
242 |
-
"Critical Error" in verification_result
|
243 |
-
or "Justification Gap" in verification_result
|
244 |
-
or "invalid" in verification_result.lower()
|
245 |
-
)
|
246 |
-
|
247 |
-
if needs_correction:
|
248 |
-
yield f'data: {json.dumps({"content": "# 🛠️ ÉTAPE 5: CORRECTION\n\n", "type": "text"})}\n\n'
|
249 |
step5_prompt = agent_system.prompts["step5_correction"].replace(
|
250 |
-
"[The full verification report will be inserted here]",
|
251 |
).replace(
|
252 |
-
"[The previous solution attempt will be inserted here]",
|
253 |
)
|
254 |
-
|
255 |
-
|
256 |
-
yield f'data: {json.dumps({"content": corrected_solution, "type": "text"})}\n\n'
|
257 |
else:
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
yield f'data: {json.dumps({"mode": "answering"})}\n\n'
|
262 |
-
yield f'data: {json.dumps({"content": "# 📋 SOLUTION FINALE\n\n", "type": "text"})}\n\n'
|
263 |
-
yield f'data: {json.dumps({"content": final_solution, "type": "text"})}\n\n'
|
264 |
-
|
265 |
-
else:
|
266 |
-
prompt = BASE_PROMPT
|
267 |
-
if use_calculator:
|
268 |
-
prompt += CODE_EXTENSION
|
269 |
-
config = types.GenerateContentConfig(
|
270 |
-
temperature=0.3,
|
271 |
-
thinking_config=types.ThinkingConfig(include_thoughts=True)
|
272 |
-
)
|
273 |
-
if use_calculator:
|
274 |
-
config.tools = [types.Tool(code_execution=types.ToolCodeExecution)]
|
275 |
-
response = client.models.generate_content_stream(
|
276 |
-
model="gemini-2.5-flash",
|
277 |
-
contents=[
|
278 |
-
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
|
279 |
-
prompt
|
280 |
-
],
|
281 |
-
config=config
|
282 |
-
)
|
283 |
-
for chunk in response:
|
284 |
-
for part in chunk.candidates[0].content.parts:
|
285 |
-
if hasattr(part, 'text') and part.text:
|
286 |
-
yield f'data: {json.dumps({"content": part.text, "type": "text"})}\n\n'
|
287 |
|
|
|
|
|
|
|
|
|
288 |
except Exception as e:
|
289 |
-
|
290 |
-
|
|
|
291 |
|
292 |
-
return Response(
|
293 |
-
stream_with_context(generate()),
|
294 |
-
mimetype='text/event-stream',
|
295 |
-
headers={'Cache-Control': 'no-cache', 'X-Accel-Buffering': 'no'}
|
296 |
-
)
|
297 |
|
298 |
except Exception as e:
|
299 |
-
|
300 |
-
|
|
|
301 |
|
302 |
if __name__ == '__main__':
|
303 |
-
app.run(debug=True)
|
|
|
1 |
+
|
2 |
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
|
3 |
from google import genai
|
4 |
from google.genai import types
|
|
|
13 |
|
14 |
app = Flask(__name__)
|
15 |
|
16 |
+
# --- Configuration ---
|
17 |
+
# Assurez-vous que ces variables d'environnement sont définies
|
18 |
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
|
19 |
+
TELEGRAM_BOT_TOKEN = os.environ.get("TELEGRAM_BOT_TOKEN")
|
20 |
+
TELEGRAM_CHAT_ID = os.environ.get("TELEGRAM_CHAT_ID")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Initialisation du client GenAI
|
23 |
+
try:
|
24 |
+
client = genai.Client(api_key=GOOGLE_API_KEY)
|
25 |
+
MODEL_NAME = "gemini-1.5-pro-latest" # Utilisons un modèle plus puissant pour ces tâches
|
26 |
+
except Exception as e:
|
27 |
+
print(f"Erreur lors de l'initialisation du client GenAI : {e}")
|
28 |
+
client = None
|
29 |
|
30 |
+
# --- Agent System (avec les prompts du PDF) ---
|
|
|
|
|
31 |
|
32 |
class AgentSystem:
|
33 |
def __init__(self):
|
|
|
39 |
try:
|
40 |
self.prompts_dir.mkdir(exist_ok=True)
|
41 |
|
42 |
+
# Prompts reproduits EXACTEMENT du PDF
|
43 |
default_prompts = {
|
44 |
+
"step1_initial_solution.md": r"""### Core Instructions ###
|
45 |
+
* **Rigor is Paramount:** Your primary goal is to produce a complete and rigorously justified solution. Every step in your solution must be logically sound and clearly explained. A correct final answer derived from flawed or incomplete reasoning is considered a failure.
|
46 |
+
* **Honesty About Completeness:** If you cannot find a complete solution, you must **not** guess or create a solution that appears correct but contains hidden flaws or justification gaps. Instead, you should present only significant partial results that you can rigorously prove. A partial result is considered significant if it represents a substantial advancement toward a full solution. Examples include:
|
47 |
+
* Proving a key lemma.
|
48 |
+
* Fully resolving one or more cases within a logically sound case-based proof.
|
49 |
+
* Establishing a critical property of the mathematical objects in the problem.
|
50 |
+
* For an optimization problem, proving an upper or lower bound without proving that this bound is achievable.
|
51 |
+
* **Use TeX for All Mathematics:** All mathematical variables, expressions, and relations must be enclosed in TeX delimiters (e.g., `Let $n$ be an integer.`).
|
52 |
+
|
53 |
+
### Output Format ###
|
54 |
+
Your response MUST be structured into the following sections, in this exact order.
|
55 |
+
|
56 |
+
**1. Summary**
|
57 |
+
Provide a concise overview of your findings. This section must contain two parts:
|
58 |
+
* **a. Verdict:** State clearly whether you have found a complete solution or a partial solution.
|
59 |
+
* **For a complete solution:** State the final answer, e.g., "I have successfully solved the problem. The final answer is..."
|
60 |
+
* **For a partial solution:** State the main rigorous conclusion(s) you were able to prove, e.g., "I have not found a complete solution, but I have rigorously proven that..."
|
61 |
+
* **b. Method Sketch:** Present a high-level, conceptual outline of your solution. This sketch should allow an expert to understand the logical flow of your argument without reading the full detail. It should include:
|
62 |
+
* A narrative of your overall strategy.
|
63 |
+
* The full and precise mathematical statements of any key lemmas or major intermediate results.
|
64 |
+
* If applicable, describe any key constructions or case splits that form the backbone of your argument.
|
65 |
+
|
66 |
+
**2. Detailed Solution**
|
67 |
+
Present the full, step-by-step mathematical proof. Each step must be logically justified and clearly explained. The level of detail should be sufficient for an expert to verify the correctness of your reasoning without needing to fill in any gaps. This section must contain ONLY the complete, rigorous proof, free of any internal commentary, alternative approaches, or failed attempts.
|
68 |
+
|
69 |
+
### Self-Correction Instruction ###
|
70 |
+
Before finalizing your output, carefully review your "Method Sketch" and "Detailed Solution" to ensure they are clean, rigorous, and strictly adhere to all instructions provided above. Verify that every statement contributes directly to the final, coherent mathematical argument.
|
71 |
+
""",
|
72 |
|
73 |
+
"step2_self_improvement.md": r"""You are a world-class mathematician reviewing your own work. You have just produced the following draft solution to a problem. Your task is to critically analyze it for clarity, logical soundness, and potential simplifications. Produce a new, improved version of the solution. The goal is to elevate it to a publication-ready standard.
|
|
|
|
|
74 |
|
75 |
### Draft Solution ###
|
76 |
[The initial solution attempt will be inserted here]
|
77 |
|
78 |
### Your Task ###
|
79 |
+
Provide the improved version of the solution, adhering to the original problem's constraints and focusing on enhancing rigor and elegance.
|
80 |
+
""",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
+
"step3_verification.md": r"""You are an expert mathematician and a meticulous grader for an International Mathematical Olympiad (IMO) level exam. Your primary task is to rigorously verify the provided mathematical solution. A solution is to be judged correct **only if every step is rigorously justified.** A solution that arrives at a correct final answer through flawed reasoning, educated guesses, or with gaps in its arguments must be flagged as incorrect or incomplete.
|
83 |
+
|
84 |
+
### Instructions ###
|
85 |
+
|
86 |
+
**1. Core Instructions**
|
87 |
+
* Your sole task is to find and report all issues in the provided solution. You must act as a **verifier**, NOT a solver. **Do NOT attempt to correct the errors or fill the gaps you find.**
|
88 |
+
* You must perform a **step-by-step** check of the entire solution. This analysis will be presented in a **Detailed Verification Log**, where you justify your assessment of each step: for correct steps, a brief justification suffices; for steps with errors or gaps, you must provide a detailed explanation.
|
89 |
+
|
90 |
+
**2. How to Handle Issues in the Solution**
|
91 |
+
When you identify an issue in a step, you MUST first classify it into one of the following two categories and then follow the specified procedure.
|
92 |
+
* **a. Critical Error:**
|
93 |
+
This is any error that breaks the logical chain of the proof. This includes both **logical fallacies** (e.g., claiming that ‘A>B, C>D’ implies ‘A-C>B-D’) and **factual errors** (e.g., a calculation error like '2+3=6').
|
94 |
+
* **Procedure:**
|
95 |
+
* Explain the specific error and state that it **invalidates the current line of reasoning**.
|
96 |
+
* Do NOT check any further steps that rely on this error.
|
97 |
+
* You MUST, however, scan the rest of the solution to identify and verify any fully independent parts. For example, if a proof is split into multiple cases, an error in one case does not prevent you from checking the other cases.
|
98 |
+
* **b. Justification Gap:**
|
99 |
+
This is for steps where the conclusion may be correct, but the provided argument is incomplete, hand-wavy, or lacks sufficient rigor.
|
100 |
+
* **Procedure:**
|
101 |
+
* Explain the gap in the justification.
|
102 |
+
* State that you will **assume the step's conclusion is true** for the sake of argument.
|
103 |
+
* Then, proceed to verify all subsequent steps to check if the remainder of the argument is sound.
|
104 |
+
|
105 |
+
**3. Output Format**
|
106 |
+
Your response MUST be structured into two main sections: a **Summary** followed by the **Detailed Verification Log**.
|
107 |
+
* **a. Summary**
|
108 |
+
This section MUST be at the very beginning of your response. It must contain two components:
|
109 |
+
* **Final Verdict**: A single, clear sentence declaring the overall validity of the solution. For example: "The solution is correct," "The solution contains a Critical Error and is therefore invalid," or "The solution's approach is viable but contains several Justification Gaps."
|
110 |
+
* **List of Findings**: A bulleted list that summarizes **every** issue you discovered. For each finding, you must provide:
|
111 |
+
* **Location:** A direct quote of the key phrase or equation where the issue occurs.
|
112 |
+
* **Issue:** A brief description of the problem and its classification (**Critical Error** or **Justification Gap**).
|
113 |
+
* **b. Detailed Verification Log**
|
114 |
+
Following the summary, provide the full, step-by-step verification log as defined in the Core Instructions. When you refer to a specific part of the solution, **quote the relevant text** to make your reference clear before providing your detailed analysis of that part.
|
115 |
""",
|
116 |
|
117 |
+
"step5_correction.md": r"""You are a brilliant mathematician working to solve a difficult problem. Your previous attempt at a solution has been reviewed, and a verification report has been generated. Your task is to carefully study the report and produce a new, corrected solution that addresses all the identified issues.
|
118 |
|
119 |
### Verification Report ###
|
120 |
[The full verification report will be inserted here]
|
|
|
123 |
[The previous solution attempt will be inserted here]
|
124 |
|
125 |
### Task ###
|
126 |
+
Provide a new, complete, and rigorously justified solution. Ensure that every error and justification gap mentioned in the report is resolved. If you disagree with an item in the report, revise your solution to make the reasoning clearer and less ambiguous.
|
127 |
"""
|
128 |
}
|
129 |
|
130 |
for filename, content in default_prompts.items():
|
131 |
prompt_file = self.prompts_dir / filename
|
132 |
+
prompt_file.write_text(content.strip(), encoding='utf-8')
|
|
|
|
|
133 |
|
134 |
for prompt_file in self.prompts_dir.glob("*.md"):
|
135 |
prompts[prompt_file.stem] = prompt_file.read_text(encoding='utf-8')
|
136 |
+
|
137 |
except Exception as e:
|
138 |
print(f"Error loading prompts: {e}")
|
139 |
return prompts
|
140 |
|
141 |
def extract_problem_text(self, img_str):
|
142 |
+
if not client: return "[Client not initialized]"
|
143 |
try:
|
144 |
response = client.models.generate_content(
|
145 |
+
model=MODEL_NAME,
|
146 |
contents=[
|
147 |
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
|
148 |
+
"Please extract the mathematical problem statement from this image. Provide ONLY the problem text, formatted in LaTeX. Include all conditions and questions. If the image contains text in another language, translate the problem statement to French."
|
149 |
],
|
150 |
+
config=types.GenerateContentConfig(temperature=0.0)
|
151 |
)
|
152 |
+
return response.text.strip()
|
|
|
|
|
|
|
|
|
153 |
except Exception as e:
|
154 |
print(f"Error extracting problem text: {e}")
|
155 |
+
return f"[Erreur d'extraction du problème: {e}]"
|
156 |
|
157 |
+
def run_agent_step(self, step_name, prompt):
|
158 |
+
if not client: return "[Client not initialized]"
|
159 |
try:
|
160 |
config = types.GenerateContentConfig(
|
161 |
+
temperature=0.1, # Low temperature for rigor
|
162 |
+
# Le "thinking budget" n'est pas directement exposée, mais les modèles Pro sont conçus pour des tâches longues
|
163 |
)
|
164 |
+
response = client.models.generate_content(
|
165 |
+
model=MODEL_NAME,
|
|
|
|
|
166 |
contents=[prompt],
|
167 |
+
config=config,
|
168 |
)
|
169 |
+
return response.text.strip()
|
|
|
|
|
|
|
|
|
|
|
170 |
except Exception as e:
|
171 |
print(f"Error in agent step {step_name}: {e}")
|
172 |
+
return f"[Erreur à l'étape {step_name}: {e}]"
|
173 |
+
|
174 |
+
# --- Fonctions Telegram ---
|
175 |
|
176 |
+
def send_to_telegram(image_data, caption="Nouvelle image reçue"):
|
177 |
+
if not TELEGRAM_BOT_TOKEN or not TELEGRAM_CHAT_ID:
|
178 |
+
return False
|
179 |
try:
|
180 |
url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendPhoto"
|
181 |
files = {'photo': ('image.png', image_data)}
|
182 |
data = {'chat_id': TELEGRAM_CHAT_ID, 'caption': caption}
|
183 |
+
response = requests.post(url, files=files, data=data, timeout=10)
|
184 |
return response.status_code == 200
|
185 |
except Exception as e:
|
186 |
print(f"Exception Telegram: {e}")
|
187 |
return False
|
188 |
+
|
189 |
+
# --- Routes Flask ---
|
190 |
|
191 |
@app.route('/')
|
192 |
def index():
|
|
|
194 |
|
195 |
@app.route('/solve', methods=['POST'])
|
196 |
def solve():
|
197 |
+
if not client:
|
198 |
+
return jsonify({'error': 'Le client GenAI n\'est pas initialisé. Vérifiez votre clé API.'}), 500
|
199 |
+
|
200 |
try:
|
201 |
image_data = request.files['image'].read()
|
202 |
+
send_to_telegram(image_data, "Image reçue pour résolution...")
|
|
|
|
|
|
|
|
|
203 |
|
204 |
+
img_str = base64.b64encode(image_data).decode()
|
|
|
|
|
205 |
|
206 |
def generate():
|
207 |
try:
|
208 |
+
agent_system = AgentSystem()
|
209 |
+
current_solution = ""
|
210 |
+
|
211 |
+
# --- Étape 0: Extraction du problème ---
|
212 |
+
yield f'data: {json.dumps({"content": "### Étape 0 : Extraction de l\'énoncé\nAnalyse de l’image...", "type": "header"})}\n\n'
|
213 |
+
problem_text = agent_system.extract_problem_text(img_str)
|
214 |
+
yield f'data: {json.dumps({"content": f"**Énoncé détecté :**\n\n{problem_text}\n\n---\n", "type": "text"})}\n\n'
|
215 |
+
|
216 |
+
# --- Pipeline de raisonnement ---
|
217 |
+
# Étape 1: Solution Initiale
|
218 |
+
yield f'data: {json.dumps({"content": "### Étape 1 : Génération de la solution initiale\nLe modèle prépare une première version de la preuve...", "type": "header"})}\n\n'
|
219 |
+
step1_prompt = agent_system.prompts["step1_initial_solution"] + f"\n\n### Problem ###\n{problem_text}"
|
220 |
+
current_solution = agent_system.run_agent_step("1 (Initial)", step1_prompt)
|
221 |
+
yield f'data: {json.dumps({"content": f"**Solution Initiale :**\n\n{current_solution}\n\n---\n", "type": "text"})}\n\n'
|
222 |
+
|
223 |
+
# Itération de Vérification-Correction (ici, 1 seule itération pour la démo)
|
224 |
+
MAX_ITERATIONS = 1
|
225 |
+
for i in range(MAX_ITERATIONS):
|
226 |
+
# Étape 3: Vérification
|
227 |
+
yield f'data: {json.dumps({"content": f"### Étape 3.{i+1} : Vérification par l\'expert\nAnalyse critique de la solution pour détecter les erreurs...", "type": "header"})}\n\n'
|
228 |
+
step3_prompt = agent_system.prompts["step3_verification"] + f"\n\n### Problem ###\n{problem_text}\n\n### Solution to be verified ###\n{current_solution}"
|
229 |
+
verification_report = agent_system.run_agent_step(f"3.{i+1} (Verification)", step3_prompt)
|
230 |
+
yield f'data: {json.dumps({"content": f"**Rapport de vérification :**\n\n{verification_report}\n\n---\n", "type": "text"})}\n\n'
|
231 |
+
|
232 |
+
# Vérifier si une correction est nécessaire
|
233 |
+
if "critical error" in verification_report.lower() or "justification gap" in verification_report.lower():
|
234 |
+
# Étape 5: Correction
|
235 |
+
yield f'data: {json.dumps({"content": f"### Étape 5.{i+1} : Correction de la solution\nLe modèle utilise le rapport pour corriger sa preuve...", "type": "header"})}\n\n'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
step5_prompt = agent_system.prompts["step5_correction"].replace(
|
237 |
+
"[The full verification report will be inserted here]", verification_report
|
238 |
).replace(
|
239 |
+
"[The previous solution attempt will be inserted here]", current_solution
|
240 |
)
|
241 |
+
current_solution = agent_system.run_agent_step(f"5.{i+1} (Correction)", step5_prompt)
|
242 |
+
yield f'data: {json.dumps({"content": f"**Solution Corrigée :**\n\n{current_solution}\n\n---\n", "type": "text"})}\n\n'
|
|
|
243 |
else:
|
244 |
+
yield f'data: {json.dumps({"content": "✅ Le rapport de vérification n\'a trouvé aucune erreur critique. La solution est considérée comme valide.", "type": "header"})}\n\n'
|
245 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
|
247 |
+
# --- Affichage Final ---
|
248 |
+
yield f'data: {json.dumps({"content": "# Solution Finale Validée\nVoici la version finale de la correction.", "type": "header"})}\n\n'
|
249 |
+
yield f'data: {json.dumps({"content": current_solution, "type": "final"})}\n\n'
|
250 |
+
|
251 |
except Exception as e:
|
252 |
+
error_message = f"Une erreur est survenue pendant la génération : {e}"
|
253 |
+
print(error_message)
|
254 |
+
yield f'data: {json.dumps({"error": error_message})}\n\n'
|
255 |
|
256 |
+
return Response(stream_with_context(generate()), mimetype='text/event-stream')
|
|
|
|
|
|
|
|
|
257 |
|
258 |
except Exception as e:
|
259 |
+
error_message = f"Erreur dans le endpoint /solve : {e}"
|
260 |
+
print(error_message)
|
261 |
+
return jsonify({'error': error_message}), 500
|
262 |
|
263 |
if __name__ == '__main__':
|
264 |
+
app.run(debug=True)
|