Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -71,7 +71,7 @@ from datetime import datetime
|
|
71 |
# Fetch secrets from Hugging Face Spaces
|
72 |
api_key = os.environ["API_KEY"]
|
73 |
endpoint = os.environ["OPENAI_API_BASE"]
|
74 |
-
llama_api_key = os.environ['
|
75 |
mem0_api_key = os.environ['mem0']
|
76 |
|
77 |
# Initialize the OpenAI embedding function for Chroma
|
@@ -122,7 +122,7 @@ class AgentState(TypedDict):
|
|
122 |
|
123 |
def expand_query(state):
|
124 |
"""
|
125 |
-
Expands the user query to improve retrieval of
|
126 |
|
127 |
Args:
|
128 |
state (Dict): The current state of the workflow, containing the user query.
|
@@ -131,8 +131,8 @@ def expand_query(state):
|
|
131 |
Dict: The updated state with the expanded query.
|
132 |
"""
|
133 |
print("---------Expanding Query---------")
|
134 |
-
system_message = '''You are a
|
135 |
-
Convert the user query into something that
|
136 |
Perform query expansion on the question received. If there are multiple common ways of phrasing a user question \
|
137 |
or common synonyms for key words in the question, make sure to return multiple versions \
|
138 |
of the query with the different phrasings.
|
@@ -210,7 +210,7 @@ def retrieve_context(state):
|
|
210 |
|
211 |
def craft_response(state: Dict) -> Dict:
|
212 |
"""
|
213 |
-
Generates a response using the retrieved context, focusing on
|
214 |
|
215 |
Args:
|
216 |
state (Dict): The current state of the workflow, containing the query and retrieved context.
|
@@ -219,7 +219,7 @@ def craft_response(state: Dict) -> Dict:
|
|
219 |
Dict: The updated state with the generated response.
|
220 |
"""
|
221 |
print("---------craft_response---------")
|
222 |
-
system_message = '''you are a smart
|
223 |
The answer you provide must come from the context and feedback provided.
|
224 |
If information provided is not enough to answer the query response with 'I don't know the answer. Not in my records'''
|
225 |
|
@@ -498,13 +498,13 @@ def agentic_rag(query: str):
|
|
498 |
#================================ Guardrails ===========================#
|
499 |
llama_guard_client = Groq(api_key=llama_api_key)
|
500 |
# Function to filter user input with Llama Guard
|
501 |
-
def filter_input_with_llama_guard(user_input, model="llama-guard-
|
502 |
"""
|
503 |
Filters user input using Llama Guard to ensure it is safe.
|
504 |
|
505 |
Parameters:
|
506 |
- user_input: The input provided by the user.
|
507 |
-
- model: The Llama Guard model to be used for filtering (default is "llama-guard-
|
508 |
|
509 |
Returns:
|
510 |
- The filtered and safe input.
|
@@ -524,7 +524,7 @@ def filter_input_with_llama_guard(user_input, model="llama-guard-3-8b"):
|
|
524 |
|
525 |
#============================= Adding Memory to the agent using mem0 ===============================#
|
526 |
|
527 |
-
class
|
528 |
def __init__(self):
|
529 |
"""
|
530 |
Initialize the NutritionBot class, setting up memory, the LLM client, tools, and the agent executor.
|
@@ -586,7 +586,7 @@ class AgentState(TypedDict):
|
|
586 |
|
587 |
def expand_query(state):
|
588 |
"""
|
589 |
-
Expands the user query to improve retrieval of
|
590 |
|
591 |
Args:
|
592 |
state (Dict): The current state of the workflow, containing the user query.
|
@@ -595,8 +595,8 @@ def expand_query(state):
|
|
595 |
Dict: The updated state with the expanded query.
|
596 |
"""
|
597 |
print("---------Expanding Query---------")
|
598 |
-
system_message = '''You are a
|
599 |
-
Convert the user query into something that a
|
600 |
Perform query expansion on the question received. If there are multiple common ways of phrasing a user question \
|
601 |
or common synonyms for key words in the question, make sure to return multiple versions \
|
602 |
of the query with the different phrasings.
|
@@ -675,7 +675,7 @@ def retrieve_context(state):
|
|
675 |
|
676 |
def craft_response(state: Dict) -> Dict:
|
677 |
"""
|
678 |
-
Generates a response using the retrieved context, focusing on
|
679 |
|
680 |
Args:
|
681 |
state (Dict): The current state of the workflow, containing the query and retrieved context.
|
@@ -684,7 +684,7 @@ def craft_response(state: Dict) -> Dict:
|
|
684 |
Dict: The updated state with the generated response.
|
685 |
"""
|
686 |
print("---------craft_response---------")
|
687 |
-
system_message = '''you are a smart
|
688 |
The answer you provide must come from the context and feedback provided.
|
689 |
If information provided is not enough to answer the query response with 'I don't know the answer. Not in my records'''
|
690 |
|
@@ -964,13 +964,13 @@ def agentic_rag(query: str):
|
|
964 |
#================================ Guardrails ===========================#
|
965 |
llama_guard_client = Groq(api_key=llama_api_key)
|
966 |
# Function to filter user input with Llama Guard
|
967 |
-
def filter_input_with_llama_guard(user_input, model="llama-guard-
|
968 |
"""
|
969 |
Filters user input using Llama Guard to ensure it is safe.
|
970 |
|
971 |
Parameters:
|
972 |
- user_input: The input provided by the user.
|
973 |
-
- model: The Llama Guard model to be used for filtering (default is "llama-guard-
|
974 |
|
975 |
Returns:
|
976 |
- The filtered and safe input.
|
@@ -990,10 +990,10 @@ def filter_input_with_llama_guard(user_input, model="llama-guard-3-8b"):
|
|
990 |
|
991 |
#============================= Adding Memory to the agent using mem0 ===============================#
|
992 |
|
993 |
-
class
|
994 |
def __init__(self):
|
995 |
"""
|
996 |
-
Initialize the
|
997 |
"""
|
998 |
|
999 |
# Initialize a memory client to store and retrieve customer interactions
|
@@ -1011,17 +1011,17 @@ class climateBot:
|
|
1011 |
tools = [agentic_rag]
|
1012 |
|
1013 |
# Define the system prompt to set the behavior of the chatbot
|
1014 |
-
system_prompt = """You are a caring and knowledgeable Climate Agent, specializing in climate
|
1015 |
Guidelines for Interaction:
|
1016 |
Maintain a polite, professional, and reassuring tone.
|
1017 |
-
Show genuine empathy for customer concerns and
|
1018 |
Reference past interactions to provide personalized and consistent advice.
|
1019 |
-
Engage with the customer by asking about their
|
1020 |
Ensure consistent and accurate information across conversations.
|
1021 |
If any detail is unclear or missing, proactively ask for clarification.
|
1022 |
-
Always use the agentic_rag tool to retrieve up-to-date and evidence-based climate
|
1023 |
Keep track of ongoing issues and follow-ups to ensure continuity in support.
|
1024 |
-
Your primary goal is to help customers make informed climate
|
1025 |
|
1026 |
"""
|
1027 |
|
@@ -1141,10 +1141,10 @@ class climateBot:
|
|
1141 |
#=====================User Interface using streamlit ===========================#
|
1142 |
def climate_streamlit():
|
1143 |
"""
|
1144 |
-
A Streamlit-based UI for the Climate Agent.
|
1145 |
"""
|
1146 |
-
st.title("
|
1147 |
-
st.write("Ask me anything about climate
|
1148 |
st.write("Type 'exit' to end the conversation.")
|
1149 |
|
1150 |
# Initialize session state for chat history and user_id if they don't exist
|
@@ -1162,7 +1162,7 @@ def climate_streamlit():
|
|
1162 |
st.session_state.user_id = user_id
|
1163 |
st.session_state.chat_history.append({
|
1164 |
"role": "assistant",
|
1165 |
-
"content": f"Welcome, {user_id}! How can I help you climate
|
1166 |
})
|
1167 |
st.session_state.login_submitted = True # Set flag to trigger rerun
|
1168 |
if st.session_state.get("login_submitted", False):
|
@@ -1181,7 +1181,7 @@ def climate_streamlit():
|
|
1181 |
st.session_state.chat_history.append({"role": "user", "content": "exit"})
|
1182 |
with st.chat_message("user"):
|
1183 |
st.write("exit")
|
1184 |
-
goodbye_msg = "Goodbye! Feel free to return if you have more questions about climate
|
1185 |
st.session_state.chat_history.append({"role": "assistant", "content": goodbye_msg})
|
1186 |
with st.chat_message("assistant"):
|
1187 |
st.write(goodbye_msg)
|
@@ -1201,7 +1201,7 @@ def climate_streamlit():
|
|
1201 |
if filtered_result in ["safe", "unsafe S6", "unsafe S7"]: # Blanks #3, #4, #5: Fill in with allowed safe statuses (e.g., "safe", "unsafe S7", "unsafe S6")
|
1202 |
try:
|
1203 |
if 'chatbot' not in st.session_state:
|
1204 |
-
st.session_state.chatbot =
|
1205 |
response = st.session_state.chatbot.handle_customer_query(st.session_state.user_id, user_query)
|
1206 |
# Blank #7: Fill in with the method to handle queries (e.g., handle_customer_query)
|
1207 |
st.write(response)
|
|
|
71 |
# Fetch secrets from Hugging Face Spaces
|
72 |
api_key = os.environ["API_KEY"]
|
73 |
endpoint = os.environ["OPENAI_API_BASE"]
|
74 |
+
llama_api_key = os.environ['LLAMA_API_KEY']
|
75 |
mem0_api_key = os.environ['mem0']
|
76 |
|
77 |
# Initialize the OpenAI embedding function for Chroma
|
|
|
122 |
|
123 |
def expand_query(state):
|
124 |
"""
|
125 |
+
Expands the user query to improve retrieval of climate-related information.
|
126 |
|
127 |
Args:
|
128 |
state (Dict): The current state of the workflow, containing the user query.
|
|
|
131 |
Dict: The updated state with the expanded query.
|
132 |
"""
|
133 |
print("---------Expanding Query---------")
|
134 |
+
system_message = '''You are a domain expert assisting in answering questions related to climate-related information.
|
135 |
+
Convert the user query into something that a climate professional would understand. Use domain related words.
|
136 |
Perform query expansion on the question received. If there are multiple common ways of phrasing a user question \
|
137 |
or common synonyms for key words in the question, make sure to return multiple versions \
|
138 |
of the query with the different phrasings.
|
|
|
210 |
|
211 |
def craft_response(state: Dict) -> Dict:
|
212 |
"""
|
213 |
+
Generates a response using the retrieved context, focusing on climate solutions.
|
214 |
|
215 |
Args:
|
216 |
state (Dict): The current state of the workflow, containing the query and retrieved context.
|
|
|
219 |
Dict: The updated state with the generated response.
|
220 |
"""
|
221 |
print("---------craft_response---------")
|
222 |
+
system_message = '''you are a smart climate specialist. Use the context and feedback to respond to the query.
|
223 |
The answer you provide must come from the context and feedback provided.
|
224 |
If information provided is not enough to answer the query response with 'I don't know the answer. Not in my records'''
|
225 |
|
|
|
498 |
#================================ Guardrails ===========================#
|
499 |
llama_guard_client = Groq(api_key=llama_api_key)
|
500 |
# Function to filter user input with Llama Guard
|
501 |
+
def filter_input_with_llama_guard(user_input, model="llama-guard-4-12b"):
|
502 |
"""
|
503 |
Filters user input using Llama Guard to ensure it is safe.
|
504 |
|
505 |
Parameters:
|
506 |
- user_input: The input provided by the user.
|
507 |
+
- model: The Llama Guard model to be used for filtering (default is "llama-guard-4-12b").
|
508 |
|
509 |
Returns:
|
510 |
- The filtered and safe input.
|
|
|
524 |
|
525 |
#============================= Adding Memory to the agent using mem0 ===============================#
|
526 |
|
527 |
+
class ClimateBot:
|
528 |
def __init__(self):
|
529 |
"""
|
530 |
Initialize the NutritionBot class, setting up memory, the LLM client, tools, and the agent executor.
|
|
|
586 |
|
587 |
def expand_query(state):
|
588 |
"""
|
589 |
+
Expands the user query to improve retrieval of climate-related information.
|
590 |
|
591 |
Args:
|
592 |
state (Dict): The current state of the workflow, containing the user query.
|
|
|
595 |
Dict: The updated state with the expanded query.
|
596 |
"""
|
597 |
print("---------Expanding Query---------")
|
598 |
+
system_message = '''You are a domain expert assisting in answering questions related to climate-related information.
|
599 |
+
Convert the user query into something that a climate professional would understand. Use domain related words.
|
600 |
Perform query expansion on the question received. If there are multiple common ways of phrasing a user question \
|
601 |
or common synonyms for key words in the question, make sure to return multiple versions \
|
602 |
of the query with the different phrasings.
|
|
|
675 |
|
676 |
def craft_response(state: Dict) -> Dict:
|
677 |
"""
|
678 |
+
Generates a response using the retrieved context, focusing on climate solutions.
|
679 |
|
680 |
Args:
|
681 |
state (Dict): The current state of the workflow, containing the query and retrieved context.
|
|
|
684 |
Dict: The updated state with the generated response.
|
685 |
"""
|
686 |
print("---------craft_response---------")
|
687 |
+
system_message = '''you are a smart climate specialist. Use the context and feedback to respond to the query.
|
688 |
The answer you provide must come from the context and feedback provided.
|
689 |
If information provided is not enough to answer the query response with 'I don't know the answer. Not in my records'''
|
690 |
|
|
|
964 |
#================================ Guardrails ===========================#
|
965 |
llama_guard_client = Groq(api_key=llama_api_key)
|
966 |
# Function to filter user input with Llama Guard
|
967 |
+
def filter_input_with_llama_guard(user_input, model="llama-guard-4-12b"):
|
968 |
"""
|
969 |
Filters user input using Llama Guard to ensure it is safe.
|
970 |
|
971 |
Parameters:
|
972 |
- user_input: The input provided by the user.
|
973 |
+
- model: The Llama Guard model to be used for filtering (default is "llama-guard-4-12b").
|
974 |
|
975 |
Returns:
|
976 |
- The filtered and safe input.
|
|
|
990 |
|
991 |
#============================= Adding Memory to the agent using mem0 ===============================#
|
992 |
|
993 |
+
class ClimateBot:
|
994 |
def __init__(self):
|
995 |
"""
|
996 |
+
Initialize the ClimateBot class, setting up memory, the LLM client, tools, and the agent executor.
|
997 |
"""
|
998 |
|
999 |
# Initialize a memory client to store and retrieve customer interactions
|
|
|
1011 |
tools = [agentic_rag]
|
1012 |
|
1013 |
# Define the system prompt to set the behavior of the chatbot
|
1014 |
+
system_prompt = """You are a caring and knowledgeable Climate Agent, specializing in climate-related guidance. Your goal is to provide accurate, empathetic, and tailored climate related solutions while ensuring a seamless customer experience.
|
1015 |
Guidelines for Interaction:
|
1016 |
Maintain a polite, professional, and reassuring tone.
|
1017 |
+
Show genuine empathy for customer concerns and climate challenges.
|
1018 |
Reference past interactions to provide personalized and consistent advice.
|
1019 |
+
Engage with the customer by asking about their company size, industry, and location before offering recommendations.
|
1020 |
Ensure consistent and accurate information across conversations.
|
1021 |
If any detail is unclear or missing, proactively ask for clarification.
|
1022 |
+
Always use the agentic_rag tool to retrieve up-to-date and evidence-based climate insights.
|
1023 |
Keep track of ongoing issues and follow-ups to ensure continuity in support.
|
1024 |
+
Your primary goal is to help customers make informed climate impact decisions that align with their company size, industry and location.
|
1025 |
|
1026 |
"""
|
1027 |
|
|
|
1141 |
#=====================User Interface using streamlit ===========================#
|
1142 |
def climate_streamlit():
|
1143 |
"""
|
1144 |
+
A Streamlit-based UI for the Climate Specialist Agent.
|
1145 |
"""
|
1146 |
+
st.title("Climate Specialist")
|
1147 |
+
st.write("Ask me anything about climate change, causes, impact, solutions and more.")
|
1148 |
st.write("Type 'exit' to end the conversation.")
|
1149 |
|
1150 |
# Initialize session state for chat history and user_id if they don't exist
|
|
|
1162 |
st.session_state.user_id = user_id
|
1163 |
st.session_state.chat_history.append({
|
1164 |
"role": "assistant",
|
1165 |
+
"content": f"Welcome, {user_id}! How can I help you with climate solutions today?"
|
1166 |
})
|
1167 |
st.session_state.login_submitted = True # Set flag to trigger rerun
|
1168 |
if st.session_state.get("login_submitted", False):
|
|
|
1181 |
st.session_state.chat_history.append({"role": "user", "content": "exit"})
|
1182 |
with st.chat_message("user"):
|
1183 |
st.write("exit")
|
1184 |
+
goodbye_msg = "Goodbye! Feel free to return if you have more questions about climate change."
|
1185 |
st.session_state.chat_history.append({"role": "assistant", "content": goodbye_msg})
|
1186 |
with st.chat_message("assistant"):
|
1187 |
st.write(goodbye_msg)
|
|
|
1201 |
if filtered_result in ["safe", "unsafe S6", "unsafe S7"]: # Blanks #3, #4, #5: Fill in with allowed safe statuses (e.g., "safe", "unsafe S7", "unsafe S6")
|
1202 |
try:
|
1203 |
if 'chatbot' not in st.session_state:
|
1204 |
+
st.session_state.chatbot = ClimateBot() # Blank #6: Fill in with the chatbot class initialization (e.g., ClimateBot)
|
1205 |
response = st.session_state.chatbot.handle_customer_query(st.session_state.user_id, user_query)
|
1206 |
# Blank #7: Fill in with the method to handle queries (e.g., handle_customer_query)
|
1207 |
st.write(response)
|