6Genix commited on
Commit
52f0fa0
·
1 Parent(s): 1df2849

Refactored Multi-Agent XAI Demo to hide prompts, limit response length, and enable a natural conversational flow between the Engineer and Analyst. Finalized with a cohesive plan summarizing the dialogue.

Browse files
Files changed (1) hide show
  1. app.py +37 -61
app.py CHANGED
@@ -36,15 +36,10 @@ tokenizerA, modelA = load_model_analyst()
36
  # ENGINEER / ANALYST GENERATION
37
  ##############################################################################
38
 
39
- def generate_engineer_response(user_text, tokenizer, model):
40
  """
41
- Generate a concise technical response from the Engineer based on the user's input.
42
  """
43
- prompt = f"""
44
- User: {user_text}
45
-
46
- Engineer: Provide a technical solution or approach that directly addresses the user's problem.
47
- """
48
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
49
  outputs = model.generate(
50
  inputs["input_ids"],
@@ -57,43 +52,19 @@ Engineer: Provide a technical solution or approach that directly addresses the u
57
  no_repeat_ngram_size=4,
58
  pad_token_id=tokenizer.pad_token_id
59
  )
60
- return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
61
-
62
- def generate_analyst_response(engineer_output, tokenizer, model):
63
- """
64
- Generate a conversational response from the Analyst based on the Engineer's response.
65
- """
66
- prompt = f"""
67
- Engineer: {engineer_output}
68
-
69
- Analyst: Respond with actionable insights or complementary data-driven recommendations.
70
- """
71
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
72
- outputs = model.generate(
73
- inputs["input_ids"],
74
- attention_mask=inputs["attention_mask"],
75
- max_new_tokens=50, # Restrict length
76
- temperature=0.6,
77
- do_sample=True,
78
- top_p=0.8,
79
- repetition_penalty=2.0,
80
- no_repeat_ngram_size=4,
81
- pad_token_id=tokenizer.pad_token_id
82
- )
83
- return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
84
 
85
  def summarize_conversation(conversation):
86
  """
87
  Summarize the entire conversation to produce a cohesive and actionable plan.
88
  """
89
  summary = "### Final Plan\n"
90
- engineer_response = next((text for speaker, text in conversation if speaker == "Engineer"), "")
91
- analyst_response = next((text for speaker, text in conversation if speaker == "Analyst"), "")
92
-
93
- summary += f"- **Engineer Perspective:**\n {engineer_response}\n\n"
94
- summary += f"- **Analyst Perspective:**\n {analyst_response}\n\n"
95
- summary += "This collaborative plan ensures a balance of technical and analytical insights."
96
-
97
  return summary
98
 
99
  ##############################################################################
@@ -115,29 +86,34 @@ if st.button("Generate Responses"):
115
  user_text = st.session_state.user_input
116
  st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
117
 
118
- # Engineer generates a response
119
- with st.spinner("Engineer is formulating a solution..."):
120
- engineer_resp = generate_engineer_response(
121
- user_text=user_text,
122
- tokenizer=tokenizerE,
123
- model=modelE
124
- )
125
- st.session_state.conversation.append(("Engineer", engineer_resp))
126
-
127
- # Display Engineer response immediately
128
- st.markdown(f"### Engineer Response\n{engineer_resp}")
129
-
130
- # Analyst generates a response based on engineer's output
131
- with st.spinner("Analyst is analyzing data and providing insights..."):
132
- analyst_resp = generate_analyst_response(
133
- engineer_output=engineer_resp,
134
- tokenizer=tokenizerA,
135
- model=modelA
136
- )
137
- st.session_state.conversation.append(("Analyst", analyst_resp))
138
-
139
- # Display Analyst response immediately
140
- st.markdown(f"### Analyst Response\n{analyst_resp}")
 
 
 
 
 
141
 
142
  # Summarize the final plan
143
  with st.spinner("Generating the final plan..."):
 
36
  # ENGINEER / ANALYST GENERATION
37
  ##############################################################################
38
 
39
+ def generate_response(prompt, tokenizer, model, max_sentences=2):
40
  """
41
+ Generate a concise response based on the provided prompt.
42
  """
 
 
 
 
 
43
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
44
  outputs = model.generate(
45
  inputs["input_ids"],
 
52
  no_repeat_ngram_size=4,
53
  pad_token_id=tokenizer.pad_token_id
54
  )
55
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
56
+ # Limit to max_sentences by splitting and rejoining
57
+ return " ".join(response.split(".")[:max_sentences]) + "."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  def summarize_conversation(conversation):
60
  """
61
  Summarize the entire conversation to produce a cohesive and actionable plan.
62
  """
63
  summary = "### Final Plan\n"
64
+ for speaker, text in conversation:
65
+ if speaker == "Engineer" or speaker == "Analyst":
66
+ summary += f"- **{speaker}:** {text}\n"
67
+ summary += "\nThis collaborative plan integrates technical and analytical insights."
 
 
 
68
  return summary
69
 
70
  ##############################################################################
 
86
  user_text = st.session_state.user_input
87
  st.session_state.conversation = [("User", user_text)] # Clear and restart conversation
88
 
89
+ engineer_prompt = f"User: {user_text}\nEngineer: Provide a technical solution or approach that directly addresses the user's problem."
90
+ analyst_prompt = ""
91
+
92
+ for turn in range(3):
93
+ # Engineer generates a response
94
+ with st.spinner(f"Engineer is formulating response {turn + 1}..."):
95
+ engineer_resp = generate_response(
96
+ prompt=engineer_prompt,
97
+ tokenizer=tokenizerE,
98
+ model=modelE
99
+ )
100
+ st.session_state.conversation.append(("Engineer", engineer_resp))
101
+
102
+ # Display Engineer response
103
+ st.markdown(f"### Engineer Response ({turn + 1})\n{engineer_resp}")
104
+
105
+ # Analyst generates a response based on engineer's output
106
+ analyst_prompt = f"Engineer: {engineer_resp}\nAnalyst: Respond with actionable insights or complementary data-driven recommendations."
107
+ with st.spinner(f"Analyst is formulating response {turn + 1}..."):
108
+ analyst_resp = generate_response(
109
+ prompt=analyst_prompt,
110
+ tokenizer=tokenizerA,
111
+ model=modelA
112
+ )
113
+ st.session_state.conversation.append(("Analyst", analyst_resp))
114
+
115
+ # Display Analyst response
116
+ st.markdown(f"### Analyst Response ({turn + 1})\n{analyst_resp}")
117
 
118
  # Summarize the final plan
119
  with st.spinner("Generating the final plan..."):