Spaces:
Sleeping
Sleeping
Enhanced Multi-Agent XAI Demo with hidden prompts, streamlined final plan summarization, and improved response diversity. Adjusted repetition penalties and conversation formatting for clarity.
Browse files
app.py
CHANGED
@@ -48,7 +48,7 @@ def generate_response(prompt, tokenizer, model, max_sentences=2):
|
|
48 |
temperature=0.6,
|
49 |
do_sample=True,
|
50 |
top_p=0.8,
|
51 |
-
repetition_penalty=2.
|
52 |
no_repeat_ngram_size=4,
|
53 |
pad_token_id=tokenizer.pad_token_id
|
54 |
)
|
@@ -61,10 +61,12 @@ def summarize_conversation(conversation):
|
|
61 |
Summarize the entire conversation to produce a cohesive and actionable plan.
|
62 |
"""
|
63 |
summary = "### Final Plan\n"
|
|
|
64 |
for speaker, text in conversation:
|
65 |
if speaker == "Engineer" or speaker == "Analyst":
|
66 |
-
|
67 |
-
summary += "\
|
|
|
68 |
return summary
|
69 |
|
70 |
##############################################################################
|
@@ -86,14 +88,14 @@ if st.button("Generate Responses"):
|
|
86 |
user_text = st.session_state.user_input
|
87 |
st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
|
88 |
|
89 |
-
|
90 |
-
|
91 |
|
92 |
for turn in range(3):
|
93 |
# Engineer generates a response
|
94 |
with st.spinner(f"Engineer is formulating response {turn + 1}..."):
|
95 |
engineer_resp = generate_response(
|
96 |
-
prompt=
|
97 |
tokenizer=tokenizerE,
|
98 |
model=modelE
|
99 |
)
|
@@ -103,10 +105,9 @@ if st.button("Generate Responses"):
|
|
103 |
st.markdown(f"### Engineer Response ({turn + 1})\n{engineer_resp}")
|
104 |
|
105 |
# Analyst generates a response based on engineer's output
|
106 |
-
analyst_prompt = f"Engineer: {engineer_resp}\nAnalyst: Respond with actionable insights or complementary data-driven recommendations."
|
107 |
with st.spinner(f"Analyst is formulating response {turn + 1}..."):
|
108 |
analyst_resp = generate_response(
|
109 |
-
prompt=
|
110 |
tokenizer=tokenizerA,
|
111 |
model=modelA
|
112 |
)
|
|
|
48 |
temperature=0.6,
|
49 |
do_sample=True,
|
50 |
top_p=0.8,
|
51 |
+
repetition_penalty=2.2,
|
52 |
no_repeat_ngram_size=4,
|
53 |
pad_token_id=tokenizer.pad_token_id
|
54 |
)
|
|
|
61 |
Summarize the entire conversation to produce a cohesive and actionable plan.
|
62 |
"""
|
63 |
summary = "### Final Plan\n"
|
64 |
+
key_points = []
|
65 |
for speaker, text in conversation:
|
66 |
if speaker == "Engineer" or speaker == "Analyst":
|
67 |
+
key_points.append(f"- {speaker}: {text}")
|
68 |
+
summary += "\n".join(key_points[-6:]) # Include only the last 3 turns each
|
69 |
+
summary += "\n\nThis collaborative plan integrates technical and analytical insights."
|
70 |
return summary
|
71 |
|
72 |
##############################################################################
|
|
|
88 |
user_text = st.session_state.user_input
|
89 |
st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
|
90 |
|
91 |
+
engineer_prompt_base = f"The user asked: {user_text}. Provide a concise technical solution."
|
92 |
+
analyst_prompt_base = "Respond with complementary data-driven insights."
|
93 |
|
94 |
for turn in range(3):
|
95 |
# Engineer generates a response
|
96 |
with st.spinner(f"Engineer is formulating response {turn + 1}..."):
|
97 |
engineer_resp = generate_response(
|
98 |
+
prompt=engineer_prompt_base,
|
99 |
tokenizer=tokenizerE,
|
100 |
model=modelE
|
101 |
)
|
|
|
105 |
st.markdown(f"### Engineer Response ({turn + 1})\n{engineer_resp}")
|
106 |
|
107 |
# Analyst generates a response based on engineer's output
|
|
|
108 |
with st.spinner(f"Analyst is formulating response {turn + 1}..."):
|
109 |
analyst_resp = generate_response(
|
110 |
+
prompt=f"Engineer said: {engineer_resp}. {analyst_prompt_base}",
|
111 |
tokenizer=tokenizerA,
|
112 |
model=modelA
|
113 |
)
|