6Genix commited on
Commit
703a1e6
·
1 Parent(s): e2760f6

Updated policy to include kick-start.

Browse files
Files changed (1) hide show
  1. app.py +12 -9
app.py CHANGED
@@ -15,25 +15,28 @@ user_input = st.text_input("Enter a scenario or question:")
15
  if st.button("Generate Collaboration"):
16
  # Create a custom prompt with two roles
17
  prompt = f"""
18
- The following is a conversation between two agents.
19
- Agent A is a Lean Six Sigma process re-engineer.
20
- Agent B is an AI/data scientist.
21
- They discuss how to solve the user's challenge.
22
 
23
- User question/scenario: {user_input}
24
 
25
- Agent A:
 
 
 
26
  """
27
 
28
  # Generate the conversation
29
  inputs = tokenizer.encode(prompt, return_tensors="pt")
30
  outputs = model.generate(
31
  inputs,
32
- max_length=200,
33
- num_return_sequences=1,
34
  temperature=0.7,
35
  do_sample=True,
36
- top_p=0.9
 
37
  )
38
  raw_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
 
 
15
  if st.button("Generate Collaboration"):
16
  # Create a custom prompt with two roles
17
  prompt = f"""
18
+ The following is a conversation between two agents:
19
+ Agent A: A Lean Six Sigma process re-engineer.
20
+ Agent B: An AI/data scientist.
 
21
 
22
+ They discuss how to solve the user's challenge:
23
 
24
+ User scenario: {user_input}
25
+
26
+ Agent A: Let's break down the problem step by step.
27
+ Agent B:
28
  """
29
 
30
  # Generate the conversation
31
  inputs = tokenizer.encode(prompt, return_tensors="pt")
32
  outputs = model.generate(
33
  inputs,
34
+ max_length=300,
35
+ min_length=50,
36
  temperature=0.7,
37
  do_sample=True,
38
+ top_p=0.9,
39
+ repetition_penalty=1.2
40
  )
41
  raw_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
42