6Genix commited on
Commit
838db58
·
1 Parent(s): 703a1e6

Reconfigured app to introduce a true multi-agent system.

Browse files
Files changed (1) hide show
  1. app.py +76 -29
app.py CHANGED
@@ -2,45 +2,92 @@ import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  @st.cache_resource
5
- def load_model():
6
- tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
7
- model = AutoModelForCausalLM.from_pretrained("distilgpt2")
8
- return tokenizer, model
 
9
 
10
- tokenizer, model = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- st.title("Multi-Agent Dialogue Simulator")
13
- user_input = st.text_input("Enter a scenario or question:")
14
 
15
- if st.button("Generate Collaboration"):
16
- # Create a custom prompt with two roles
17
- prompt = f"""
18
- The following is a conversation between two agents:
19
- Agent A: A Lean Six Sigma process re-engineer.
20
- Agent B: An AI/data scientist.
 
 
21
 
22
- They discuss how to solve the user's challenge:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- User scenario: {user_input}
 
 
25
 
26
- Agent A: Let's break down the problem step by step.
27
- Agent B:
 
 
 
28
  """
 
 
 
 
29
 
30
- # Generate the conversation
31
- inputs = tokenizer.encode(prompt, return_tensors="pt")
32
  outputs = model.generate(
33
- inputs,
34
- max_length=300,
35
- min_length=50,
36
  temperature=0.7,
37
  do_sample=True,
38
- top_p=0.9,
39
- repetition_penalty=1.2
40
  )
41
- raw_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
42
 
43
- # Post-process to split or isolate Agent B's portion
44
- # (For simplicity, we'll just display raw_text)
45
- st.markdown("**Conversation**:")
46
- st.write(raw_text)
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  @st.cache_resource
5
+ def load_agentA():
6
+ # e.g., DistilGPT2
7
+ tokenizerA = AutoTokenizer.from_pretrained("distilgpt2")
8
+ modelA = AutoModelForCausalLM.from_pretrained("distilgpt2")
9
+ return tokenizerA, modelA
10
 
11
+ @st.cache_resource
12
+ def load_agentB():
13
+ # e.g., GPT-Neo 125M
14
+ tokenizerB = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
15
+ modelB = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
16
+ return tokenizerB, modelB
17
+
18
+ tokenizerA, modelA = load_agentA()
19
+ tokenizerB, modelB = load_agentB()
20
+
21
+ st.title("True Multi-Agent Conversation")
22
+
23
+ # We store the conversation as a list of (speaker, text).
24
+ if "conversation" not in st.session_state:
25
+ st.session_state.conversation = []
26
 
27
+ user_input = st.text_input("Enter a question or scenario:")
 
28
 
29
+ if st.button("Start/Continue Conversation"):
30
+ # 1) The user’s prompt goes to Agent A first.
31
+ if len(st.session_state.conversation) == 0:
32
+ st.session_state.conversation.append(("User", user_input))
33
+ else:
34
+ # If conversation is ongoing, you can treat this user_input differently,
35
+ # or ignore if you want to keep the user out after the initial scenario.
36
+ st.session_state.conversation.append(("User", user_input))
37
 
38
+ # --- AGENT A Step ---
39
+ agentA_text = generate_response(
40
+ agent_name="Agent A",
41
+ model=modelA,
42
+ tokenizer=tokenizerA,
43
+ conversation=st.session_state.conversation
44
+ )
45
+ st.session_state.conversation.append(("Agent A", agentA_text))
46
+
47
+ # --- AGENT B Step ---
48
+ agentB_text = generate_response(
49
+ agent_name="Agent B",
50
+ model=modelB,
51
+ tokenizer=tokenizerB,
52
+ conversation=st.session_state.conversation
53
+ )
54
+ st.session_state.conversation.append(("Agent B", agentB_text))
55
 
56
+ # Display the entire conversation so far
57
+ for speaker, text in st.session_state.conversation:
58
+ st.markdown(f"**{speaker}:** {text}")
59
 
60
+
61
+ def generate_response(agent_name, model, tokenizer, conversation):
62
+ """
63
+ Takes the entire conversation as context, plus the agent name,
64
+ and runs a single inference call for that agent.
65
  """
66
+ # 1) Build a textual prompt from conversation
67
+ # e.g. A simple approach: just concatenate everything
68
+ # focusing on the last few messages to avoid token limit issues
69
+ prompt_text = build_prompt(conversation, agent_name)
70
 
71
+ inputs = tokenizer.encode(prompt_text, return_tensors="pt")
 
72
  outputs = model.generate(
73
+ inputs,
74
+ max_length=200,
 
75
  temperature=0.7,
76
  do_sample=True,
77
+ top_p=0.9
 
78
  )
79
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
80
+
81
+
82
+ def build_prompt(conversation, agent_name):
83
+ """
84
+ Construct a single prompt that includes the entire conversation so far,
85
+ labeling each line with speaker, and ends with the new agent's label.
86
+ """
87
+ text_blocks = []
88
+ for speaker, text in conversation:
89
+ text_blocks.append(f"{speaker}: {text}")
90
 
91
+ # Now add the new agent's label at the end, so the model continues from there
92
+ text_blocks.append(f"{agent_name}:")
93
+ return "\n".join(text_blocks)