sakthi07 commited on
Commit
1079835
Β·
verified Β·
1 Parent(s): 9935eea

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +92 -98
streamlit_app.py CHANGED
@@ -1,98 +1,92 @@
1
- # app.py
2
- import os
3
-
4
-
5
- # Make Streamlit write config locally (avoids PermissionError in Spaces)
6
- os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false" # disable usage stats
7
- os.environ["STREAMLIT_CONFIG_DIR"] = os.getcwd() # store Streamlit configs locally
8
-
9
- # os.environ["STREAMLIT_CONFIG_DIR"] = ".streamlit"
10
- os.environ["STREAMLIT_LOG_FOLDER"] = ".streamlit"
11
- os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false"
12
-
13
-
14
- import streamlit as st
15
- # from dotenv import load_dotenv
16
- from langchain.chains import ConversationalRetrievalChain
17
- from langchain.memory import ConversationBufferMemory
18
- from langchain_community.vectorstores import FAISS
19
- from langchain.embeddings.openai import OpenAIEmbeddings
20
- from langchain.chat_models import ChatOpenAI
21
-
22
- # ------------------ Load environment variables ------------------
23
- # load_dotenv()
24
- OPENAI_API_KEY = os.environ.getenv("OPENAI_API_KEY")
25
-
26
- # ------------------ Paths ------------------
27
- VECTORSTORE_PATH = os.path.join("storage", "faiss_index") # folder containing index.faiss and index.pkl
28
-
29
- # ------------------ Load vectorstore ------------------
30
- @st.cache_resource
31
- def load_vectorstore(path):
32
- if not os.path.exists(path):
33
- st.error(f"FAISS index not found at {path}. Please run ingest.py first.")
34
- return None
35
- embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
36
- vectorstore = FAISS.load_local(
37
- path,
38
- embeddings,
39
- allow_dangerous_deserialization=True
40
- )
41
- return vectorstore
42
-
43
- vectorstore = load_vectorstore(VECTORSTORE_PATH)
44
- if vectorstore is None:
45
- st.stop()
46
-
47
- # ------------------ Load LLM ------------------
48
- @st.cache_resource
49
- def load_llm():
50
- llm = ChatOpenAI(
51
- model_name="gpt-3.5-turbo",
52
- temperature=0,
53
- openai_api_key=OPENAI_API_KEY
54
- )
55
- return llm
56
-
57
- llm = load_llm()
58
-
59
- # ------------------ Memory ------------------
60
- memory = ConversationBufferMemory(
61
- memory_key="chat_history",
62
- return_messages=True
63
- )
64
-
65
- # ------------------ Conversational Retrieval Chain ------------------
66
- qa_chain = ConversationalRetrievalChain.from_llm(
67
- llm=llm,
68
- retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
69
- memory=memory,
70
- output_key="answer"
71
- )
72
-
73
- # ------------------ Streamlit UI ------------------
74
- st.title("πŸ’‰ Diabetes Chatbot")
75
- st.write("Chat with the bot about diabetes. It remembers your questions during this session!")
76
-
77
- # Initialize chat history
78
- if "chat_history" not in st.session_state:
79
- st.session_state["chat_history"] = []
80
-
81
- # ------------------ Chat Interface ------------------
82
- user_input = st.chat_input("Type your question here...")
83
-
84
- if user_input:
85
- # Display user message instantly
86
- st.session_state["chat_history"].append((user_input, None))
87
-
88
- # Run QA chain and generate answer
89
- with st.spinner("Bot is thinking..."):
90
- result = qa_chain({"question": user_input, "chat_history": st.session_state["chat_history"]})
91
- answer = result["answer"]
92
- # Update the last user message with the bot response
93
- st.session_state["chat_history"][-1] = (user_input, answer)
94
-
95
- # Display chat history using Streamlit chat messages
96
- for q, a in st.session_state["chat_history"]:
97
- st.chat_message("user").write(q)
98
- st.chat_message("assistant").write(a)
 
1
+ # app.py
2
+ import os
3
+
4
+
5
+
6
+
7
+
8
+ import streamlit as st
9
+ # from dotenv import load_dotenv
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain_community.vectorstores import FAISS
13
+ from langchain.embeddings.openai import OpenAIEmbeddings
14
+ from langchain.chat_models import ChatOpenAI
15
+
16
+ # ------------------ Load environment variables ------------------
17
+ # load_dotenv()
18
+ OPENAI_API_KEY = os.environ.getenv("OPENAI_API_KEY")
19
+
20
+ # ------------------ Paths ------------------
21
+ VECTORSTORE_PATH = os.path.join("storage", "faiss_index") # folder containing index.faiss and index.pkl
22
+
23
+ # ------------------ Load vectorstore ------------------
24
+ @st.cache_resource
25
+ def load_vectorstore(path):
26
+ if not os.path.exists(path):
27
+ st.error(f"FAISS index not found at {path}. Please run ingest.py first.")
28
+ return None
29
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
30
+ vectorstore = FAISS.load_local(
31
+ path,
32
+ embeddings,
33
+ allow_dangerous_deserialization=True
34
+ )
35
+ return vectorstore
36
+
37
+ vectorstore = load_vectorstore(VECTORSTORE_PATH)
38
+ if vectorstore is None:
39
+ st.stop()
40
+
41
+ # ------------------ Load LLM ------------------
42
+ @st.cache_resource
43
+ def load_llm():
44
+ llm = ChatOpenAI(
45
+ model_name="gpt-3.5-turbo",
46
+ temperature=0,
47
+ openai_api_key=OPENAI_API_KEY
48
+ )
49
+ return llm
50
+
51
+ llm = load_llm()
52
+
53
+ # ------------------ Memory ------------------
54
+ memory = ConversationBufferMemory(
55
+ memory_key="chat_history",
56
+ return_messages=True
57
+ )
58
+
59
+ # ------------------ Conversational Retrieval Chain ------------------
60
+ qa_chain = ConversationalRetrievalChain.from_llm(
61
+ llm=llm,
62
+ retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
63
+ memory=memory,
64
+ output_key="answer"
65
+ )
66
+
67
+ # ------------------ Streamlit UI ------------------
68
+ st.title("πŸ’‰ Diabetes Chatbot")
69
+ st.write("Chat with the bot about diabetes. It remembers your questions during this session!")
70
+
71
+ # Initialize chat history
72
+ if "chat_history" not in st.session_state:
73
+ st.session_state["chat_history"] = []
74
+
75
+ # ------------------ Chat Interface ------------------
76
+ user_input = st.chat_input("Type your question here...")
77
+
78
+ if user_input:
79
+ # Display user message instantly
80
+ st.session_state["chat_history"].append((user_input, None))
81
+
82
+ # Run QA chain and generate answer
83
+ with st.spinner("Bot is thinking..."):
84
+ result = qa_chain({"question": user_input, "chat_history": st.session_state["chat_history"]})
85
+ answer = result["answer"]
86
+ # Update the last user message with the bot response
87
+ st.session_state["chat_history"][-1] = (user_input, answer)
88
+
89
+ # Display chat history using Streamlit chat messages
90
+ for q, a in st.session_state["chat_history"]:
91
+ st.chat_message("user").write(q)
92
+ st.chat_message("assistant").write(a)