# app.py import streamlit as st from function import Head_Agent def init_chatbot(): with open('open_ai_key.txt', 'r', encoding='utf-8') as file: openai_key = file.readline().strip() with open('pinecone_api.txt', 'r', encoding='utf-8') as file: pinecone_key = file.readline().strip() pinecone_index_name = 'machine-learning-index' return Head_Agent(openai_key, pinecone_key, pinecone_index_name) # ------------- Streamlit ------------------- st.title("My Streamlit Chatbot with Greetings") if "chatbot" not in st.session_state: st.session_state["chatbot"] = init_chatbot() chatbot = st.session_state["chatbot"] user_query = st.text_input("Please enter your question:") greeting_keywords = { "hi", "hello", "hey", "how are you", "how r u", "yo", "good morning", "good evening", "good afternoon" } if st.button("Sent"): if not user_query.strip(): st.warning("Please enter valid content.") else: # --- normalized_input = user_query.lower().strip() if normalized_input in greeting_keywords: greet_response = "Hello there! How can I assist you today?" st.write("Robot: ", greet_response) chatbot.conv_history.append(f"User: {user_query}") chatbot.conv_history.append(f"Robot: {greet_response}") else: if chatbot.obnoxious_agent.check_query(user_query): st.write("Robot: Your question is inappropriate, please try another one.") else: docs = chatbot.query_agent.query_vector_store(user_query) matches = docs["matches"] response = chatbot.answering_agent.generate_response( user_query, matches, chatbot.conv_history ) chatbot.conv_history.append(f"User: {user_query}") chatbot.conv_history.append(f"Robot: {response}") # 3. conversation_context = ( f"User query: {user_query}\n" f"Retrieve document summaries: {response}" ) relevance = chatbot.relevant_agent.get_relevance(conversation_context) if relevance.strip().lower() == "no": st.write("【Robot: generated answer, but not sure if it's relevant:】", response) else: st.write("Robot:", response) st.write("---") st.subheader("Conversation History") for msg in chatbot.conv_history: st.write(msg)