Update chat.py
Browse files
chat.py
CHANGED
@@ -16,6 +16,40 @@ db=get_db()
|
|
16 |
conversation_history = {}
|
17 |
hf_client = InferenceClient(token=HF_TOKEN)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
try:
|
20 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
21 |
embedding_model = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
|
@@ -67,6 +101,14 @@ async def chat(request: Request):
|
|
67 |
conversation_id = data.get("conversation_id")
|
68 |
skip_save = data.get("skip_save", False)
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
if not skip_save and conversation_id and current_user:
|
72 |
db.messages.insert_one({
|
@@ -77,17 +119,6 @@ async def chat(request: Request):
|
|
77 |
"timestamp": datetime.utcnow()
|
78 |
})
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
if not user_message:
|
83 |
-
raise HTTPException(status_code=400, detail="Le champ 'message' est requis.")
|
84 |
-
|
85 |
-
current_user = None
|
86 |
-
try:
|
87 |
-
current_user = await get_current_user(request)
|
88 |
-
except HTTPException:
|
89 |
-
pass
|
90 |
-
|
91 |
current_tokens = 0
|
92 |
message_tokens = 0
|
93 |
if current_user and conversation_id:
|
@@ -98,17 +129,19 @@ async def chat(request: Request):
|
|
98 |
if conv:
|
99 |
current_tokens = conv.get("token_count", 0)
|
100 |
message_tokens = int(len(user_message.split()) * 1.3)
|
101 |
-
MAX_TOKENS = 2000
|
102 |
if current_tokens + message_tokens > MAX_TOKENS:
|
|
|
|
|
|
|
|
|
|
|
103 |
return JSONResponse({
|
104 |
"error": "token_limit_exceeded",
|
105 |
-
"message":
|
106 |
"tokens_used": current_tokens,
|
107 |
"tokens_limit": MAX_TOKENS
|
108 |
}, status_code=403)
|
109 |
|
110 |
-
|
111 |
-
|
112 |
is_history_question = any(
|
113 |
phrase in user_message.lower()
|
114 |
for phrase in [
|
@@ -147,84 +180,56 @@ async def chat(request: Request):
|
|
147 |
"ce que j'ai demandé", "j'ai dit quoi", "quelles questions",
|
148 |
"c'était quoi ma", "quelle était ma", "mes questions"
|
149 |
]) or re.search(r"(?:quelle|quelles|quoi).*?(\d+)[a-z]{2}.*?question", q_text.lower()) \
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
if not is_meta:
|
155 |
actual_questions.append(q_text)
|
156 |
|
|
|
|
|
157 |
if not actual_questions:
|
158 |
-
|
159 |
-
|
160 |
-
})
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
})
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
})
|
181 |
|
182 |
-
match_nth = re.search(r"(?:quelle|quelles|quoi).*?(\d+)[a-z]{2}.*?question", user_message.lower())
|
183 |
-
if match_nth:
|
184 |
-
try:
|
185 |
-
question_number = int(match_nth.group(1))
|
186 |
-
if 0 < question_number <= len(actual_questions):
|
187 |
-
return JSONResponse({
|
188 |
-
"response": f"Votre {question_number}{'ère' if question_number == 1 else 'ème'} question était : « {actual_questions[question_number-1]} »"
|
189 |
-
})
|
190 |
-
else:
|
191 |
-
return JSONResponse({
|
192 |
-
"response": f"Vous n'avez pas encore posé {question_number} questions dans cette conversation."
|
193 |
-
})
|
194 |
-
except:
|
195 |
-
pass
|
196 |
-
|
197 |
-
question_number = None
|
198 |
-
if any(p in user_message.lower() for p in ["deuxième question", "2ème question", "2eme question", "seconde question"]):
|
199 |
-
question_number = 2
|
200 |
-
else:
|
201 |
-
match = re.search(r'(\d+)[eèiéê]*m*e* question', user_message.lower())
|
202 |
-
if match:
|
203 |
-
try:
|
204 |
-
question_number = int(match.group(1))
|
205 |
-
except:
|
206 |
-
pass
|
207 |
-
|
208 |
-
if question_number is not None:
|
209 |
-
if 0 < question_number <= len(actual_questions):
|
210 |
-
suffix = "ère" if question_number == 1 else "ème"
|
211 |
-
return JSONResponse({
|
212 |
-
"response": f"Votre {question_number}{suffix} question était : « {actual_questions[question_number-1]} »"
|
213 |
-
})
|
214 |
else:
|
215 |
-
|
216 |
-
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
else:
|
224 |
-
question_list = "\n".join([f"{i+1}. {q}" for i, q in enumerate(actual_questions)])
|
225 |
-
return JSONResponse({
|
226 |
-
"response": f"Voici les questions que vous avez posées dans cette conversation :\n\n{question_list}"
|
227 |
-
})
|
228 |
|
229 |
context = None
|
230 |
if not is_history_question and embedding_model:
|
@@ -232,18 +237,19 @@ async def chat(request: Request):
|
|
232 |
if context and conversation_id:
|
233 |
conversation_history[conversation_id].append(f"Contexte : {context}")
|
234 |
|
235 |
-
if conversation_id:
|
236 |
-
conversation_history[conversation_id].append(f"Question : {user_message}")
|
237 |
-
|
238 |
system_prompt = (
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
|
|
|
|
|
|
|
|
247 |
|
248 |
enriched_context = ""
|
249 |
|
@@ -295,72 +301,104 @@ async def chat(request: Request):
|
|
295 |
|
296 |
if conversation_id and len(conversation_history.get(conversation_id, [])) > 0:
|
297 |
history = conversation_history[conversation_id]
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
|
|
|
|
|
|
303 |
|
304 |
-
|
305 |
-
|
306 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
|
308 |
messages.append({"role": "user", "content": user_message})
|
309 |
|
310 |
-
|
311 |
-
completion = hf_client.chat.completions.create(
|
312 |
-
model="mistralai/Mistral-7B-Instruct-v0.3",
|
313 |
-
messages=messages,
|
314 |
-
max_tokens=1024,
|
315 |
-
temperature=0.7
|
316 |
-
)
|
317 |
-
bot_response = completion.choices[0].message["content"].strip()
|
318 |
-
if bot_response.endswith((".", "!", "?")) == False and len(bot_response) > 500:
|
319 |
-
bot_response += "\n\n(Note: Ma réponse a été limitée par des contraintes de taille. N'hésitez pas à me demander de poursuivre si vous souhaitez plus d'informations.)"
|
320 |
-
except Exception:
|
321 |
try:
|
322 |
-
|
|
|
|
|
|
|
|
|
323 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
324 |
-
|
325 |
-
|
326 |
-
temperature=0.7
|
|
|
327 |
)
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
"sender": "bot",
|
343 |
-
"text": bot_response,
|
344 |
-
"timestamp": datetime.utcnow()
|
345 |
-
})
|
346 |
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
"
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
conversation_history = {}
|
17 |
hf_client = InferenceClient(token=HF_TOKEN)
|
18 |
|
19 |
+
|
20 |
+
def save_bot_response(conversation_id, current_user, text, current_tokens=0, message_tokens=0):
|
21 |
+
if not conversation_id or not current_user:
|
22 |
+
print(" Impossible de sauvegarder la réponse")
|
23 |
+
return None
|
24 |
+
|
25 |
+
try:
|
26 |
+
message_id = db.messages.insert_one({
|
27 |
+
"conversation_id": conversation_id,
|
28 |
+
"user_id": str(current_user["_id"]),
|
29 |
+
"sender": "bot",
|
30 |
+
"text": text,
|
31 |
+
"timestamp": datetime.utcnow()
|
32 |
+
}).inserted_id
|
33 |
+
|
34 |
+
response_tokens = int(len(text.split()) * 1.3) if text else 0
|
35 |
+
total_tokens = current_tokens + message_tokens + response_tokens
|
36 |
+
|
37 |
+
db.conversations.update_one(
|
38 |
+
{"_id": ObjectId(conversation_id)},
|
39 |
+
{"$set": {
|
40 |
+
"last_message": text[:100] + ("..." if len(text) > 100 else ""),
|
41 |
+
"updated_at": datetime.utcnow(),
|
42 |
+
"token_count": total_tokens
|
43 |
+
}}
|
44 |
+
)
|
45 |
+
|
46 |
+
print(f"Réponse du bot sauvegardée : {message_id}")
|
47 |
+
return message_id
|
48 |
+
except Exception as e:
|
49 |
+
print(f" Erreur: {str(e)}")
|
50 |
+
return None
|
51 |
+
|
52 |
+
|
53 |
try:
|
54 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
55 |
embedding_model = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
|
|
|
101 |
conversation_id = data.get("conversation_id")
|
102 |
skip_save = data.get("skip_save", False)
|
103 |
|
104 |
+
if not user_message:
|
105 |
+
raise HTTPException(status_code=400, detail="Le champ 'message' est requis.")
|
106 |
+
|
107 |
+
current_user = None
|
108 |
+
try:
|
109 |
+
current_user = await get_current_user(request)
|
110 |
+
except HTTPException:
|
111 |
+
pass
|
112 |
|
113 |
if not skip_save and conversation_id and current_user:
|
114 |
db.messages.insert_one({
|
|
|
119 |
"timestamp": datetime.utcnow()
|
120 |
})
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
current_tokens = 0
|
123 |
message_tokens = 0
|
124 |
if current_user and conversation_id:
|
|
|
129 |
if conv:
|
130 |
current_tokens = conv.get("token_count", 0)
|
131 |
message_tokens = int(len(user_message.split()) * 1.3)
|
|
|
132 |
if current_tokens + message_tokens > MAX_TOKENS:
|
133 |
+
error_message = "⚠️ **Limite de taille de conversation atteinte**\n\nCette conversation est devenue trop longue. Pour continuer à discuter, veuillez créer une nouvelle conversation."
|
134 |
+
|
135 |
+
if conversation_id and current_user:
|
136 |
+
save_bot_response(conversation_id, current_user, error_message, current_tokens, message_tokens)
|
137 |
+
|
138 |
return JSONResponse({
|
139 |
"error": "token_limit_exceeded",
|
140 |
+
"message": error_message,
|
141 |
"tokens_used": current_tokens,
|
142 |
"tokens_limit": MAX_TOKENS
|
143 |
}, status_code=403)
|
144 |
|
|
|
|
|
145 |
is_history_question = any(
|
146 |
phrase in user_message.lower()
|
147 |
for phrase in [
|
|
|
180 |
"ce que j'ai demandé", "j'ai dit quoi", "quelles questions",
|
181 |
"c'était quoi ma", "quelle était ma", "mes questions"
|
182 |
]) or re.search(r"(?:quelle|quelles|quoi).*?(\d+)[a-z]{2}.*?question", q_text.lower()) \
|
183 |
+
or re.search(r"derni[eè]re question", q_text.lower()) \
|
184 |
+
or re.search(r"premi[eè]re question", q_text.lower()) \
|
185 |
+
or re.search(r"question pr[eé]c[eé]dente", q_text.lower()) \
|
186 |
+
or re.search(r"(toutes|liste|quelles|quoi).*questions", q_text.lower())
|
187 |
if not is_meta:
|
188 |
actual_questions.append(q_text)
|
189 |
|
190 |
+
history_response = ""
|
191 |
+
|
192 |
if not actual_questions:
|
193 |
+
history_response = "Vous n'avez pas encore posé de question dans cette conversation. C'est notre premier échange."
|
194 |
+
else:
|
|
|
195 |
|
196 |
+
if any(phrase in user_message.lower() for phrase in ["question précédente", "dernière question"]) and len(actual_questions) > 1:
|
197 |
+
prev_question = actual_questions[-1] if actual_questions else "Aucune question précédente trouvée."
|
198 |
+
history_response = f"**Votre question précédente était :**\n\n\"{prev_question}\""
|
|
|
199 |
|
200 |
+
elif any(phrase in user_message.lower() for phrase in ["première question", "1ère question", "1ere question"]):
|
201 |
+
first_question = actual_questions[0] if actual_questions else "Aucune première question trouvée."
|
202 |
+
history_response = f"**Votre première question était :**\n\n\"{first_question}\""
|
203 |
+
|
204 |
+
elif re.search(r"(\d+)[èeme]{1,3}", user_message.lower()):
|
205 |
+
match = re.search(r"(\d+)[èeme]{1,3}", user_message.lower())
|
206 |
+
if match:
|
207 |
+
question_num = int(match.group(1))
|
208 |
+
if 0 < question_num <= len(actual_questions):
|
209 |
+
specific_question = actual_questions[question_num-1]
|
210 |
+
history_response = f"**Votre question n°{question_num} était :**\n\n\"{specific_question}\""
|
211 |
+
else:
|
212 |
+
history_response = f"Je ne trouve pas de question n°{question_num} dans notre conversation. Vous n'avez posé que {len(actual_questions)} question(s)."
|
|
|
213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
else:
|
215 |
+
history_response = "**Voici les questions que vous avez posées dans cette conversation :**\n\n"
|
216 |
+
for i, question in enumerate(actual_questions, 1):
|
217 |
+
history_response += f"{i}. {question}\n"
|
218 |
+
|
219 |
+
if len(actual_questions) > 3:
|
220 |
+
history_response += f"\nVous avez posé {len(actual_questions)} questions dans cette conversation."
|
221 |
+
|
222 |
+
if conversation_id:
|
223 |
+
conversation_history[conversation_id].append(f"Réponse : {history_response}")
|
224 |
+
|
225 |
+
if conversation_id and current_user:
|
226 |
+
save_bot_response(conversation_id, current_user, history_response, current_tokens, message_tokens)
|
227 |
+
print(f"Réponse à la question d'historique sauvegardée pour conversation {conversation_id}")
|
228 |
|
229 |
+
return JSONResponse({"response": history_response})
|
230 |
+
|
231 |
+
if conversation_id:
|
232 |
+
conversation_history[conversation_id].append(f"Question : {user_message}")
|
|
|
|
|
|
|
|
|
|
|
233 |
|
234 |
context = None
|
235 |
if not is_history_question and embedding_model:
|
|
|
237 |
if context and conversation_id:
|
238 |
conversation_history[conversation_id].append(f"Contexte : {context}")
|
239 |
|
|
|
|
|
|
|
240 |
system_prompt = (
|
241 |
+
"Tu es un chatbot spécialisé dans la santé mentale, et plus particulièrement la schizophrénie. "
|
242 |
+
"Tu réponds de façon fiable, claire et empathique, en t'appuyant uniquement sur des sources médicales et en français. "
|
243 |
+
"IMPORTANT: Fais particulièrement attention aux questions de suivi. Si l'utilisateur pose une question qui ne précise "
|
244 |
+
"pas clairement le sujet mais qui fait suite à votre échange précédent, comprends que cette question fait référence "
|
245 |
+
"au contexte de la conversation précédente. Par exemple, si l'utilisateur demande 'Comment les traite-t-on?' après "
|
246 |
+
"avoir parlé des symptômes positifs de la schizophrénie, ta réponse doit porter spécifiquement sur le traitement "
|
247 |
+
"des symptômes positifs, et non sur la schizophrénie en général. IMPORTANT: Vise tes réponses sous forme de Markdown."
|
248 |
+
"IMPÉRATIF: Structure tes réponses en Markdown, utilisant **des gras** pour les points importants, "
|
249 |
+
"des titres avec ## pour les sections principales, des listes à puces avec * pour énumérer des points, "
|
250 |
+
"et > pour les citations importantes. Cela rend ton contenu plus facile à lire et à comprendre."
|
251 |
+
|
252 |
+
)
|
253 |
|
254 |
enriched_context = ""
|
255 |
|
|
|
301 |
|
302 |
if conversation_id and len(conversation_history.get(conversation_id, [])) > 0:
|
303 |
history = conversation_history[conversation_id]
|
304 |
+
|
305 |
+
user_messages = []
|
306 |
+
bot_messages = []
|
307 |
+
|
308 |
+
for i in range(len(history)):
|
309 |
+
if i < len(history) and history[i].startswith("Question :"):
|
310 |
+
user_text = history[i].replace("Question : ", "")
|
311 |
+
user_messages.append(user_text)
|
312 |
|
313 |
+
if i+1 < len(history) and history[i+1].startswith("Réponse :"):
|
314 |
+
bot_text = history[i+1].replace("Réponse : ", "")
|
315 |
+
bot_messages.append(bot_text)
|
316 |
+
|
317 |
+
valid_pairs = min(len(user_messages), len(bot_messages))
|
318 |
+
|
319 |
+
for i in range(valid_pairs):
|
320 |
+
messages.append({"role": "user", "content": user_messages[i]})
|
321 |
+
messages.append({"role": "assistant", "content": bot_messages[i]})
|
322 |
|
323 |
messages.append({"role": "user", "content": user_message})
|
324 |
|
325 |
+
async def generate_stream():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
try:
|
327 |
+
collected_response = ""
|
328 |
+
|
329 |
+
yield "data: {\"type\": \"start\"}\n\n"
|
330 |
+
|
331 |
+
completion_stream = hf_client.chat.completions.create(
|
332 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
333 |
+
messages=messages,
|
334 |
+
max_tokens=1024,
|
335 |
+
temperature=0.7,
|
336 |
+
stream=True
|
337 |
)
|
338 |
+
chunk_buffer = ""
|
339 |
+
chunk_count = 0
|
340 |
+
MAX_CHUNKS_BEFORE_SEND = 3
|
341 |
+
for chunk in completion_stream:
|
342 |
+
if chunk.choices and chunk.choices[0].delta.content:
|
343 |
+
content = chunk.choices[0].delta.content
|
344 |
+
collected_response += content
|
345 |
+
chunk_buffer += content
|
346 |
+
chunk_count += 1
|
347 |
+
|
348 |
+
if chunk_count >= MAX_CHUNKS_BEFORE_SEND or '\n' in content:
|
349 |
+
yield f"data: {json.dumps({'content': chunk_buffer})}\n\n"
|
350 |
+
chunk_buffer = ""
|
351 |
+
chunk_count = 0
|
|
|
|
|
|
|
|
|
352 |
|
353 |
+
if chunk_buffer:
|
354 |
+
yield f"data: {json.dumps({'content': chunk_buffer})}\n\n"
|
355 |
+
|
356 |
+
if collected_response.endswith((".", "!", "?")) == False and len(collected_response) > 500:
|
357 |
+
suffix = "\n\n(Note: Ma réponse a été limitée par des contraintes de taille. N'hésitez pas à me demander de poursuivre si vous souhaitez plus d'informations.)"
|
358 |
+
collected_response += suffix
|
359 |
+
yield f"data: {json.dumps({'content': suffix})}\n\n"
|
360 |
+
|
361 |
+
if conversation_id:
|
362 |
+
conversation_history[conversation_id].append(f"Réponse : {collected_response}")
|
363 |
+
|
364 |
+
if len(conversation_history[conversation_id]) > 50:
|
365 |
+
conversation_history[conversation_id] = conversation_history[conversation_id][-50:]
|
366 |
+
|
367 |
+
if conversation_id and current_user:
|
368 |
+
save_bot_response(conversation_id, current_user, collected_response, current_tokens, message_tokens)
|
369 |
+
|
370 |
+
yield "data: {\"type\": \"end\"}\n\n"
|
371 |
+
|
372 |
+
except Exception as e:
|
373 |
+
error_message = str(e)
|
374 |
+
print(f"❌ Streaming error: {error_message}")
|
375 |
+
|
376 |
+
try:
|
377 |
+
fallback = hf_client.text_generation(
|
378 |
+
model="mistralai/Mistral-7B-Instruct-v0.3",
|
379 |
+
prompt=f"<s>[INST] {system_prompt}\n\nQuestion: {user_message} [/INST]",
|
380 |
+
max_new_tokens=512,
|
381 |
+
temperature=0.7
|
382 |
+
)
|
383 |
+
yield f"data: {json.dumps({'content': fallback})}\n\n"
|
384 |
+
|
385 |
+
if conversation_id:
|
386 |
+
conversation_history[conversation_id].append(f"Réponse : {fallback}")
|
387 |
+
|
388 |
+
if conversation_id and current_user:
|
389 |
+
save_bot_response(conversation_id, current_user, fallback, current_tokens, message_tokens)
|
390 |
+
|
391 |
+
except Exception as fallback_error:
|
392 |
+
print(f" Erreur: {str(fallback_error)}")
|
393 |
+
error_response = "Je suis désolé, je rencontre actuellement des difficultés techniques"
|
394 |
+
yield f"data: {json.dumps({'content': error_response})}\n\n"
|
395 |
+
|
396 |
+
if conversation_id and current_user:
|
397 |
+
save_bot_response(conversation_id, current_user, error_response, current_tokens, message_tokens)
|
398 |
+
|
399 |
+
yield "data: {\"type\": \"end\"}\n\n"
|
400 |
+
|
401 |
+
return StreamingResponse(
|
402 |
+
generate_stream(),
|
403 |
+
media_type="text/event-stream"
|
404 |
+
)
|