6Genix commited on
Commit
abd0aa0
·
1 Parent(s): 548eb84

Updated the controller

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -98,7 +98,7 @@ Analyst_Instructions: <...>
98
  inputs = tokenizer.encode(prompt, return_tensors="pt")
99
  outputs = model.generate(
100
  inputs,
101
- max_length=128, # keep it short
102
  temperature=0.7,
103
  do_sample=True,
104
  top_p=0.9,
@@ -131,7 +131,7 @@ If out of scope/unethical, politely refuse.
131
  inputs = tokenizer.encode(prompt, return_tensors="pt")
132
  outputs = model.generate(
133
  inputs,
134
- max_length=128,
135
  temperature=0.7,
136
  do_sample=True,
137
  top_p=0.9,
@@ -163,7 +163,7 @@ If out of scope/unethical, politely refuse.
163
  inputs = tokenizer.encode(prompt, return_tensors="pt")
164
  outputs = model.generate(
165
  inputs,
166
- max_length=128,
167
  temperature=0.7,
168
  do_sample=True,
169
  top_p=0.9,
 
98
  inputs = tokenizer.encode(prompt, return_tensors="pt")
99
  outputs = model.generate(
100
  inputs,
101
+ max_length=256, # Extend length for better outputs
102
  temperature=0.7,
103
  do_sample=True,
104
  top_p=0.9,
 
131
  inputs = tokenizer.encode(prompt, return_tensors="pt")
132
  outputs = model.generate(
133
  inputs,
134
+ max_length=256, # Extend length for detailed outputs
135
  temperature=0.7,
136
  do_sample=True,
137
  top_p=0.9,
 
163
  inputs = tokenizer.encode(prompt, return_tensors="pt")
164
  outputs = model.generate(
165
  inputs,
166
+ max_length=256, # Extend length for detailed outputs
167
  temperature=0.7,
168
  do_sample=True,
169
  top_p=0.9,