Spaces:
Sleeping
Sleeping
yuanjunchai
commited on
Commit
·
952b7e0
1
Parent(s):
97b8b6c
update deeplearning method whose test accuracy is 0.4180
Browse files- app2.py +76 -0
- design diagram.md +15 -0
- function.py +125 -0
- open_ai_key.txt +1 -0
- pinecone_api.txt +1 -0
app2.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
import streamlit as st
|
3 |
+
from function import Head_Agent
|
4 |
+
|
5 |
+
def init_chatbot():
|
6 |
+
|
7 |
+
with open('open_ai_key.txt', 'r', encoding='utf-8') as file:
|
8 |
+
openai_key = file.readline().strip()
|
9 |
+
with open('pinecone_api.txt', 'r', encoding='utf-8') as file:
|
10 |
+
pinecone_key = file.readline().strip()
|
11 |
+
|
12 |
+
pinecone_index_name = 'machine-learning-index'
|
13 |
+
|
14 |
+
return Head_Agent(openai_key, pinecone_key, pinecone_index_name)
|
15 |
+
|
16 |
+
|
17 |
+
# ------------- Streamlit -------------------
|
18 |
+
st.title("My Streamlit Chatbot with Greetings")
|
19 |
+
|
20 |
+
if "chatbot" not in st.session_state:
|
21 |
+
st.session_state["chatbot"] = init_chatbot()
|
22 |
+
|
23 |
+
chatbot = st.session_state["chatbot"]
|
24 |
+
|
25 |
+
user_query = st.text_input("Please enter your question:")
|
26 |
+
|
27 |
+
greeting_keywords = {
|
28 |
+
"hi", "hello", "hey", "how are you", "how r u", "yo", "good morning", "good evening", "good afternoon"
|
29 |
+
}
|
30 |
+
|
31 |
+
if st.button("Sent"):
|
32 |
+
if not user_query.strip():
|
33 |
+
st.warning("Please enter valid content.")
|
34 |
+
else:
|
35 |
+
# ---
|
36 |
+
normalized_input = user_query.lower().strip()
|
37 |
+
if normalized_input in greeting_keywords:
|
38 |
+
greet_response = "Hello there! How can I assist you today?"
|
39 |
+
st.write("Robot: ", greet_response)
|
40 |
+
|
41 |
+
chatbot.conv_history.append(f"User: {user_query}")
|
42 |
+
chatbot.conv_history.append(f"Robot: {greet_response}")
|
43 |
+
|
44 |
+
else:
|
45 |
+
if chatbot.obnoxious_agent.check_query(user_query):
|
46 |
+
st.write("Robot: Your question is inappropriate, please try another one.")
|
47 |
+
|
48 |
+
else:
|
49 |
+
docs = chatbot.query_agent.query_vector_store(user_query)
|
50 |
+
matches = docs["matches"]
|
51 |
+
|
52 |
+
response = chatbot.answering_agent.generate_response(
|
53 |
+
user_query,
|
54 |
+
matches,
|
55 |
+
chatbot.conv_history
|
56 |
+
)
|
57 |
+
|
58 |
+
chatbot.conv_history.append(f"User: {user_query}")
|
59 |
+
chatbot.conv_history.append(f"Robot: {response}")
|
60 |
+
|
61 |
+
# 3.
|
62 |
+
conversation_context = (
|
63 |
+
f"User query: {user_query}\n"
|
64 |
+
f"Retrieve document summaries: {response}"
|
65 |
+
)
|
66 |
+
relevance = chatbot.relevant_agent.get_relevance(conversation_context)
|
67 |
+
|
68 |
+
if relevance.strip().lower() == "no":
|
69 |
+
st.write("【Robot: generated answer, but not sure if it's relevant:】", response)
|
70 |
+
else:
|
71 |
+
st.write("Robot:", response)
|
72 |
+
|
73 |
+
st.write("---")
|
74 |
+
st.subheader("Conversation History")
|
75 |
+
for msg in chatbot.conv_history:
|
76 |
+
st.write(msg)
|
design diagram.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```mermaid
|
2 |
+
flowchart TB
|
3 |
+
A["User enters query in Streamlit UI"] --> B{Is it a Greeting?}
|
4 |
+
B -- Yes --> C["Display \"Hello there!\""]
|
5 |
+
B -- No --> D{Check with Obnoxious_Agent}
|
6 |
+
D -- Yes --> E["Display \"Question is inappropriate\""]
|
7 |
+
D -- No --> F["Query_Agent retrieves docs from Pinecone"]
|
8 |
+
F --> G["Answering_Agent uses GPT to generate answer"]
|
9 |
+
G --> H{"Relevant_Documents_Agent checks doc relevance"}
|
10 |
+
H -- Yes --> I["Final response displayed to user"]
|
11 |
+
H -- No --> M["Final response with Unrelated tips displayed to user"]
|
12 |
+
|
13 |
+
```
|
14 |
+
|
15 |
+
####
|
function.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# function.py
|
2 |
+
|
3 |
+
import openai
|
4 |
+
from pinecone import Pinecone, ServerlessSpec
|
5 |
+
from langchain.embeddings import OpenAIEmbeddings
|
6 |
+
|
7 |
+
|
8 |
+
class Obnoxious_Agent:
|
9 |
+
|
10 |
+
def __init__(self, client) -> None:
|
11 |
+
self.client = client
|
12 |
+
self.prompt = "Determine if the following query is uncomfortable (returns only Yes or No):"
|
13 |
+
|
14 |
+
def set_prompt(self, prompt):
|
15 |
+
self.prompt = prompt
|
16 |
+
|
17 |
+
def extract_action(self, response_text) -> bool:
|
18 |
+
|
19 |
+
return "yes" in response_text.lower()
|
20 |
+
|
21 |
+
def check_query(self, query):
|
22 |
+
full_prompt = f"{self.prompt}\nQuery: {query}"
|
23 |
+
response = self.client.ChatCompletion.create(
|
24 |
+
model="gpt-3.5-turbo",
|
25 |
+
messages=[{"role": "user", "content": full_prompt}],
|
26 |
+
max_tokens=10
|
27 |
+
)
|
28 |
+
content = response["choices"][0]["message"]["content"]
|
29 |
+
return self.extract_action(content)
|
30 |
+
|
31 |
+
|
32 |
+
class Query_Agent:
|
33 |
+
|
34 |
+
def __init__(self, pinecone_index, openai_client, embeddings) -> None:
|
35 |
+
self.pinecone_index = pinecone_index
|
36 |
+
self.client = openai_client
|
37 |
+
self.embeddings = embeddings
|
38 |
+
|
39 |
+
def query_vector_store(self, query, k=5):
|
40 |
+
|
41 |
+
query_vector = self.embeddings.embed_query(query)
|
42 |
+
|
43 |
+
result = self.pinecone_index.query(
|
44 |
+
vector=query_vector,
|
45 |
+
top_k=k,
|
46 |
+
include_metadata=True
|
47 |
+
)
|
48 |
+
return result
|
49 |
+
|
50 |
+
|
51 |
+
class Answering_Agent:
|
52 |
+
|
53 |
+
def __init__(self, openai_client) -> None:
|
54 |
+
self.client = openai_client
|
55 |
+
|
56 |
+
def generate_response(self, query, docs, conv_history, k=5):
|
57 |
+
|
58 |
+
docs_texts = []
|
59 |
+
for doc in docs:
|
60 |
+
metadata = doc.get("metadata", {})
|
61 |
+
text = metadata.get("text", "")
|
62 |
+
docs_texts.append(text)
|
63 |
+
|
64 |
+
docs_text = "\n".join(docs_texts)
|
65 |
+
history_text = "\n".join(conv_history)
|
66 |
+
|
67 |
+
full_prompt = (
|
68 |
+
"Answer a user query based on the following related documents and dialog history.\n"
|
69 |
+
f"Related documents:\n{docs_text}\n"
|
70 |
+
f"Conversation history:\n{history_text}\n"
|
71 |
+
f"User query: {query}\nAnswer:"
|
72 |
+
)
|
73 |
+
response = self.client.ChatCompletion.create(
|
74 |
+
model="gpt-3.5-turbo",
|
75 |
+
messages=[{"role": "user", "content": full_prompt}],
|
76 |
+
max_tokens=150
|
77 |
+
)
|
78 |
+
content = response["choices"][0]["message"]["content"]
|
79 |
+
return content.strip()
|
80 |
+
|
81 |
+
|
82 |
+
class Relevant_Documents_Agent:
|
83 |
+
|
84 |
+
def __init__(self, openai_client) -> None:
|
85 |
+
self.client = openai_client
|
86 |
+
|
87 |
+
def get_relevance(self, conversation) -> str:
|
88 |
+
prompt = (
|
89 |
+
"You are a highly skilled assistant. Please determine if the returned documents "
|
90 |
+
"are directly relevant to the user's query. Respond with 'Yes' if you believe the "
|
91 |
+
"documents are relevant, or 'No' if you believe they are not.\n\n"
|
92 |
+
"Context:\n"
|
93 |
+
f"{conversation}\n\n"
|
94 |
+
"Please respond with 'Yes' or 'No' only."
|
95 |
+
)
|
96 |
+
response = self.client.ChatCompletion.create(
|
97 |
+
model="gpt-3.5-turbo",
|
98 |
+
messages=[{"role": "user", "content": prompt}],
|
99 |
+
max_tokens=10
|
100 |
+
)
|
101 |
+
content = response["choices"][0]["message"]["content"]
|
102 |
+
return content.strip()
|
103 |
+
|
104 |
+
|
105 |
+
class Head_Agent:
|
106 |
+
|
107 |
+
def __init__(self, openai_key, pinecone_key, pinecone_index_name) -> None:
|
108 |
+
|
109 |
+
openai.api_key = openai_key
|
110 |
+
|
111 |
+
|
112 |
+
pc = Pinecone(api_key=pinecone_key)
|
113 |
+
|
114 |
+
self.pinecone_index = pc.Index(pinecone_index_name)
|
115 |
+
|
116 |
+
self.embeddings = OpenAIEmbeddings(openai_api_key=openai_key)
|
117 |
+
|
118 |
+
self.openai_client = openai
|
119 |
+
|
120 |
+
self.obnoxious_agent = Obnoxious_Agent(self.openai_client)
|
121 |
+
self.query_agent = Query_Agent(self.pinecone_index, self.openai_client, self.embeddings)
|
122 |
+
self.answering_agent = Answering_Agent(self.openai_client)
|
123 |
+
self.relevant_agent = Relevant_Documents_Agent(self.openai_client)
|
124 |
+
|
125 |
+
self.conv_history = []
|
open_ai_key.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
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
|
pinecone_api.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pcsk_2fYVS5_RLwpM61jCGwKQZugPhauxdgmsPpU3V3oxRP16oVGgEaEsbeUoEDH8BRXbrMxXv1
|