6Genix commited on
Commit
4375e7a
·
1 Parent(s): 838db58

Addressed NameError.

Browse files
Files changed (1) hide show
  1. app.py +36 -41
app.py CHANGED
@@ -1,23 +1,54 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  @st.cache_resource
5
  def load_agentA():
6
- # e.g., DistilGPT2
7
  tokenizerA = AutoTokenizer.from_pretrained("distilgpt2")
8
  modelA = AutoModelForCausalLM.from_pretrained("distilgpt2")
9
  return tokenizerA, modelA
10
 
11
  @st.cache_resource
12
  def load_agentB():
13
- # e.g., GPT-Neo 125M
14
  tokenizerB = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
15
  modelB = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
16
  return tokenizerB, modelB
17
 
 
18
  tokenizerA, modelA = load_agentA()
19
  tokenizerB, modelB = load_agentB()
20
 
 
21
  st.title("True Multi-Agent Conversation")
22
 
23
  # We store the conversation as a list of (speaker, text).
@@ -27,12 +58,11 @@ if "conversation" not in st.session_state:
27
  user_input = st.text_input("Enter a question or scenario:")
28
 
29
  if st.button("Start/Continue Conversation"):
30
- # 1) The user’s prompt goes to Agent A first.
31
  if len(st.session_state.conversation) == 0:
32
  st.session_state.conversation.append(("User", user_input))
33
  else:
34
- # If conversation is ongoing, you can treat this user_input differently,
35
- # or ignore if you want to keep the user out after the initial scenario.
36
  st.session_state.conversation.append(("User", user_input))
37
 
38
  # --- AGENT A Step ---
@@ -55,39 +85,4 @@ if st.button("Start/Continue Conversation"):
55
 
56
  # Display the entire conversation so far
57
  for speaker, text in st.session_state.conversation:
58
- st.markdown(f"**{speaker}:** {text}")
59
-
60
-
61
- def generate_response(agent_name, model, tokenizer, conversation):
62
- """
63
- Takes the entire conversation as context, plus the agent name,
64
- and runs a single inference call for that agent.
65
- """
66
- # 1) Build a textual prompt from conversation
67
- # e.g. A simple approach: just concatenate everything
68
- # focusing on the last few messages to avoid token limit issues
69
- prompt_text = build_prompt(conversation, agent_name)
70
-
71
- inputs = tokenizer.encode(prompt_text, return_tensors="pt")
72
- outputs = model.generate(
73
- inputs,
74
- max_length=200,
75
- temperature=0.7,
76
- do_sample=True,
77
- top_p=0.9
78
- )
79
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
80
-
81
-
82
- def build_prompt(conversation, agent_name):
83
- """
84
- Construct a single prompt that includes the entire conversation so far,
85
- labeling each line with speaker, and ends with the new agent's label.
86
- """
87
- text_blocks = []
88
- for speaker, text in conversation:
89
- text_blocks.append(f"{speaker}: {text}")
90
-
91
- # Now add the new agent's label at the end, so the model continues from there
92
- text_blocks.append(f"{agent_name}:")
93
- return "\n".join(text_blocks)
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ def build_prompt(conversation, agent_name):
5
+ """
6
+ Construct a single prompt that includes the entire conversation so far,
7
+ labeling each line with speaker, and ends with the new agent's label.
8
+ """
9
+ text_blocks = []
10
+ for speaker, text in conversation:
11
+ text_blocks.append(f"{speaker}: {text}")
12
+
13
+ # Now add the new agent's label at the end, so the model continues from there
14
+ text_blocks.append(f"{agent_name}:")
15
+ return "\n".join(text_blocks)
16
+
17
+ def generate_response(agent_name, model, tokenizer, conversation):
18
+ """
19
+ Takes the entire conversation as context, plus the agent name,
20
+ and runs a single inference call for that agent.
21
+ """
22
+ prompt_text = build_prompt(conversation, agent_name)
23
+ inputs = tokenizer.encode(prompt_text, return_tensors="pt")
24
+ outputs = model.generate(
25
+ inputs,
26
+ max_length=200,
27
+ temperature=0.7,
28
+ do_sample=True,
29
+ top_p=0.9
30
+ )
31
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
  @st.cache_resource
34
  def load_agentA():
35
+ """Loads the DistilGPT2 model/tokenizer for Agent A."""
36
  tokenizerA = AutoTokenizer.from_pretrained("distilgpt2")
37
  modelA = AutoModelForCausalLM.from_pretrained("distilgpt2")
38
  return tokenizerA, modelA
39
 
40
  @st.cache_resource
41
  def load_agentB():
42
+ """Loads the GPT-Neo-125M model/tokenizer for Agent B."""
43
  tokenizerB = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
44
  modelB = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
45
  return tokenizerB, modelB
46
 
47
+ # Load agents
48
  tokenizerA, modelA = load_agentA()
49
  tokenizerB, modelB = load_agentB()
50
 
51
+ # Streamlit app starts here
52
  st.title("True Multi-Agent Conversation")
53
 
54
  # We store the conversation as a list of (speaker, text).
 
58
  user_input = st.text_input("Enter a question or scenario:")
59
 
60
  if st.button("Start/Continue Conversation"):
61
+ # 1) If this is the first message, add the user input
62
  if len(st.session_state.conversation) == 0:
63
  st.session_state.conversation.append(("User", user_input))
64
  else:
65
+ # If conversation is ongoing, append user’s new input
 
66
  st.session_state.conversation.append(("User", user_input))
67
 
68
  # --- AGENT A Step ---
 
85
 
86
  # Display the entire conversation so far
87
  for speaker, text in st.session_state.conversation:
88
+ st.markdown(f"**{speaker}:** {text}")