AxL95 commited on
Commit
c5d86f7
·
verified ·
1 Parent(s): a9df640

Upload 18 files

Browse files
frontend/src/components/ChatInterface.jsx CHANGED
@@ -1,6 +1,6 @@
1
  import React, { useState, useRef, useEffect } from 'react';
2
- import axios from 'axios';
3
  import ReactMarkdown from 'react-markdown';
 
4
  import Avatar from './Avatar.jsx';
5
  import '../App.css';
6
 
@@ -8,8 +8,10 @@ const ChatInterface = ({ messages = [], setMessages = () => {}, onMessageSent =
8
  const [inputMessage, setInputMessage] = useState('');
9
  const [isLoading, setIsLoading] = useState(false);
10
  const messagesEndRef = useRef(null);
11
- const textareaRef = useRef(null);
12
-
 
 
13
 
14
  const isMarkdown = (text) => /[#*_>`-]/.test(text);
15
 
@@ -17,7 +19,6 @@ const ChatInterface = ({ messages = [], setMessages = () => {}, onMessageSent =
17
  messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
18
  };
19
 
20
-
21
  useEffect(() => {
22
  scrollToBottom();
23
  }, [messages]);
@@ -28,11 +29,27 @@ const ChatInterface = ({ messages = [], setMessages = () => {}, onMessageSent =
28
 
29
  onMessageSent(message);
30
 
31
- const response = await axios.post('https://15af0837fca124cf6d.gradio.live/gradio_api/run/predict', {
32
- data: [message, null],
33
- fn_index: 0
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  });
35
- const botResponse = response.data.data[0];
 
 
 
36
  setMessages(prev => [
37
  ...prev,
38
  { sender: 'user', text: message },
@@ -50,6 +67,8 @@ const ChatInterface = ({ messages = [], setMessages = () => {}, onMessageSent =
50
  }
51
  };
52
 
 
 
53
  const handleSubmit = (e) => {
54
  e.preventDefault();
55
  if (inputMessage.trim() === '') return;
 
1
  import React, { useState, useRef, useEffect } from 'react';
 
2
  import ReactMarkdown from 'react-markdown';
3
+ import { InferenceClient } from "@huggingface/inference";
4
  import Avatar from './Avatar.jsx';
5
  import '../App.css';
6
 
 
8
  const [inputMessage, setInputMessage] = useState('');
9
  const [isLoading, setIsLoading] = useState(false);
10
  const messagesEndRef = useRef(null);
11
+ const textareaRef = useRef(null);
12
+
13
+ // Initialisation du client Hugging Face (idéalement dans un .env)
14
+ const hfClient = new InferenceClient(process.env.REACT_APP_HF_TOKEN || "votre_token_ici");
15
 
16
  const isMarkdown = (text) => /[#*_>`-]/.test(text);
17
 
 
19
  messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
20
  };
21
 
 
22
  useEffect(() => {
23
  scrollToBottom();
24
  }, [messages]);
 
29
 
30
  onMessageSent(message);
31
 
32
+ // Appel au modèle Mistral via Hugging Face
33
+ const chatCompletion = await hfClient.chatCompletion({
34
+ provider: "together", // Vous pouvez aussi utiliser "novita" ou d'autres providers
35
+ model: "mistralai/Mistral-7B-Instruct-v0.3",
36
+ messages: [
37
+ {
38
+ role: "system",
39
+ content: "Tu es un assistant médical expert spécialisé uniquement dans la schizophrénie. Réponds uniquement aux questions concernant la schizophrénie. Si la question concerne un autre sujet médical, refuse poliment de répondre."
40
+ },
41
+ {
42
+ role: "user",
43
+ content: message
44
+ }
45
+ ],
46
+ max_tokens: 512,
47
+ temperature: 0.7,
48
  });
49
+
50
+ // Extraction de la réponse
51
+ const botResponse = chatCompletion.choices[0].message.content;
52
+
53
  setMessages(prev => [
54
  ...prev,
55
  { sender: 'user', text: message },
 
67
  }
68
  };
69
 
70
+
71
+
72
  const handleSubmit = (e) => {
73
  e.preventDefault();
74
  if (inputMessage.trim() === '') return;