Spaces:
Sleeping
Sleeping
Refined Multi-Agent XAI Demo by hiding explicit prompt references, improving final plan clarity, and enhancing conversational flow with contextual variations.
Browse files
app.py
CHANGED
@@ -66,7 +66,7 @@ def summarize_conversation(conversation):
|
|
66 |
if speaker == "Engineer" or speaker == "Analyst":
|
67 |
key_points.append(f"- {speaker}: {text}")
|
68 |
summary += "\n".join(key_points[-6:]) # Include only the last 3 turns each
|
69 |
-
summary += "\n\nThis collaborative plan integrates technical and analytical insights."
|
70 |
return summary
|
71 |
|
72 |
##############################################################################
|
@@ -88,8 +88,8 @@ if st.button("Generate Responses"):
|
|
88 |
user_text = st.session_state.user_input
|
89 |
st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
|
90 |
|
91 |
-
engineer_prompt_base = f"
|
92 |
-
analyst_prompt_base = "
|
93 |
|
94 |
for turn in range(3):
|
95 |
# Engineer generates a response
|
@@ -107,7 +107,7 @@ if st.button("Generate Responses"):
|
|
107 |
# Analyst generates a response based on engineer's output
|
108 |
with st.spinner(f"Analyst is formulating response {turn + 1}..."):
|
109 |
analyst_resp = generate_response(
|
110 |
-
prompt=f"Engineer
|
111 |
tokenizer=tokenizerA,
|
112 |
model=modelA
|
113 |
)
|
|
|
66 |
if speaker == "Engineer" or speaker == "Analyst":
|
67 |
key_points.append(f"- {speaker}: {text}")
|
68 |
summary += "\n".join(key_points[-6:]) # Include only the last 3 turns each
|
69 |
+
summary += "\n\nThis collaborative plan integrates technical and analytical insights into an actionable framework."
|
70 |
return summary
|
71 |
|
72 |
##############################################################################
|
|
|
88 |
user_text = st.session_state.user_input
|
89 |
st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
|
90 |
|
91 |
+
engineer_prompt_base = f"Given the problem: {user_text}, provide a concise and actionable technical solution."
|
92 |
+
analyst_prompt_base = "Based on the engineer's suggestion, provide complementary data-driven recommendations."
|
93 |
|
94 |
for turn in range(3):
|
95 |
# Engineer generates a response
|
|
|
107 |
# Analyst generates a response based on engineer's output
|
108 |
with st.spinner(f"Analyst is formulating response {turn + 1}..."):
|
109 |
analyst_resp = generate_response(
|
110 |
+
prompt=f"Engineer suggested: {engineer_resp}. {analyst_prompt_base}",
|
111 |
tokenizer=tokenizerA,
|
112 |
model=modelA
|
113 |
)
|