6Genix commited on
Commit
d753076
·
1 Parent(s): 8357c9c

Iterating to improve models' responses.

Browse files
Files changed (1) hide show
  1. app.py +40 -15
app.py CHANGED
@@ -30,13 +30,13 @@ tokenizerA, modelA = load_model_analyst()
30
 
31
  def generate_engineer_response(user_text, tokenizer, model):
32
  """
33
- Engineer generates a concise approach or solution based on user input.
34
  """
35
  prompt = f"""
36
- User text: {user_text}
37
-
38
- Provide a technical approach or solution.
39
- """
40
  inputs = tokenizer.encode(prompt, return_tensors="pt")
41
  outputs = model.generate(
42
  inputs,
@@ -51,14 +51,13 @@ Provide a technical approach or solution.
51
 
52
  def generate_analyst_response(user_text, engineer_output, tokenizer, model):
53
  """
54
- Analyst provides an approach or solution based on user input and engineer's output.
55
  """
56
  prompt = f"""
57
- User text: {user_text}
58
-
59
  Engineer provided the following: {engineer_output}
60
 
61
  Provide an approach or solution from a data-centric perspective.
 
62
  """
63
  inputs = tokenizer.encode(prompt, return_tensors="pt")
64
  outputs = model.generate(
@@ -81,26 +80,52 @@ st.title("Multi-Agent System with XAI Demo")
81
  if "conversation" not in st.session_state:
82
  st.session_state.conversation = []
83
 
84
- user_input = st.text_input("Enter a question/scenario:")
 
 
 
85
 
86
  if st.button("Start/Continue Conversation"):
87
- if user_input.strip():
88
- # 1) Engineer
 
 
 
89
  engineer_resp = generate_engineer_response(
90
- user_text=user_input,
91
  tokenizer=tokenizerE,
92
  model=modelE
93
  )
94
  st.session_state.conversation.append(("Engineer", engineer_resp))
95
 
96
- # 2) Analyst
97
  analyst_resp = generate_analyst_response(
98
- user_text=user_input,
99
  engineer_output=engineer_resp,
100
  tokenizer=tokenizerA,
101
  model=modelA
102
  )
103
  st.session_state.conversation.append(("Analyst", analyst_resp))
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  for speaker, text in st.session_state.conversation:
106
- st.markdown(f"**{speaker}:** {text}")
 
 
 
 
30
 
31
  def generate_engineer_response(user_text, tokenizer, model):
32
  """
33
+ As an Engineer, generate a concise approach or solution based on user input.
34
  """
35
  prompt = f"""
36
+ User text: {user_text}
37
+ Provide a technical approach or solution.
38
+
39
+ """
40
  inputs = tokenizer.encode(prompt, return_tensors="pt")
41
  outputs = model.generate(
42
  inputs,
 
51
 
52
  def generate_analyst_response(user_text, engineer_output, tokenizer, model):
53
  """
54
+ As an Analyst, provide an approach or solution based on user input and engineer's output.
55
  """
56
  prompt = f"""
 
 
57
  Engineer provided the following: {engineer_output}
58
 
59
  Provide an approach or solution from a data-centric perspective.
60
+
61
  """
62
  inputs = tokenizer.encode(prompt, return_tensors="pt")
63
  outputs = model.generate(
 
80
  if "conversation" not in st.session_state:
81
  st.session_state.conversation = []
82
 
83
+ if "user_input" not in st.session_state:
84
+ st.session_state.user_input = ""
85
+
86
+ st.text_area("User Input:", value=st.session_state.user_input, height=100, max_chars=None, key="user_input")
87
 
88
  if st.button("Start/Continue Conversation"):
89
+ if st.session_state.user_input.strip():
90
+ user_text = st.session_state.user_input
91
+ st.session_state.conversation.append(("User", user_text))
92
+
93
+ # Engineer generates a response
94
  engineer_resp = generate_engineer_response(
95
+ user_text=user_text,
96
  tokenizer=tokenizerE,
97
  model=modelE
98
  )
99
  st.session_state.conversation.append(("Engineer", engineer_resp))
100
 
101
+ # Analyst generates a response based on engineer's output
102
  analyst_resp = generate_analyst_response(
103
+ user_text=user_text,
104
  engineer_output=engineer_resp,
105
  tokenizer=tokenizerA,
106
  model=modelA
107
  )
108
  st.session_state.conversation.append(("Analyst", analyst_resp))
109
 
110
+ # Limit the conversation to 3 exchanges between Engineer and Analyst
111
+ for _ in range(2):
112
+ engineer_resp = generate_engineer_response(
113
+ user_text=analyst_resp,
114
+ tokenizer=tokenizerE,
115
+ model=modelE
116
+ )
117
+ st.session_state.conversation.append(("Engineer", engineer_resp))
118
+
119
+ analyst_resp = generate_analyst_response(
120
+ user_text=engineer_resp,
121
+ engineer_output=engineer_resp,
122
+ tokenizer=tokenizerA,
123
+ model=modelA
124
+ )
125
+ st.session_state.conversation.append(("Analyst", analyst_resp))
126
+
127
  for speaker, text in st.session_state.conversation:
128
+ if speaker == "User":
129
+ st.markdown(f"**{speaker}:** {text}")
130
+ else:
131
+ st.markdown(f"<div style='display:none'>{speaker}: {text}</div>", unsafe_allow_html=True)