Datasets:

ArXiv:
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
from openai import OpenAI
import json
# from vllm import LLM, SamplingParams
import requests
class BaseAgent:
def __init__(self, system_prompt="", use_history=True, temp=0.5, top_p=0.95):
self.use_history = use_history
self.client = OpenAI()
self.system = system_prompt
self.temp = temp
self.top_p = top_p
self.input_tokens_count = 0
self.output_tokens_count = 0
self.messages = []
if self.system:
self.messages.append({"role": "system", "content": system_prompt})
def __call__(self, message, parse=False):
self.messages.append({"role": "user", "content": message})
result = self.generate(message, parse)
self.messages.append({"role": "assistant", "content": result})
if parse:
try:
result = self.parse_json(result)
except:
raise Exception("Error content is list below:\n", result)
return result
def generate(self, message, json_format):
if self.use_history:
input_messages = self.messages
else:
input_messages = [
{"role": "system", "content": self.system},
{"role": "user", "content": message}
]
if json_format:
response = self.client.chat.completions.create(
model="gpt-4o-2024-08-06", # gpt-4
messages=input_messages,
temperature=self.temp,
top_p=self.top_p,
response_format = { "type": "json_object" }
)
else:
response = self.client.chat.completions.create(
model="gpt-4o-2024-08-06", # gpt-4
messages=input_messages,
temperature=self.temp,
top_p=self.top_p,
)
self.update_tokens_count(response)
return response.choices[0].message.content
def parse_json(self, response):
return json.loads(response)
def add(self, message: dict):
self.messages.append(message)
def update_tokens_count(self, response):
self.input_tokens_count += response.usage.prompt_tokens
self.output_tokens_count += response.usage.completion_tokens
def show_usage(self):
print(f"Total input tokens used: {self.input_tokens_count}\nTotal output tokens used: {self.output_tokens_count}")
class BaseAgent_SFT:
def __init__(self, system_prompt="", use_history=True, temp=0, top_p=1, model_name_or_path="http://0.0.0.0:12333/v1/chat/completions"):
self.use_history = use_history
if not model_name_or_path.startswith("http"):
self.client = LLM(model=model_name_or_path, tokenizer=model_name_or_path, gpu_memory_utilization=0.5, tensor_parallel_size=1)
self.api = False
else:
self.client = model_name_or_path
self.model_name = "eval-agent"
self.api = True
self.system = system_prompt
self.temp = temp
self.top_p = top_p
self.input_tokens_count = 0
self.output_tokens_count = 0
self.messages = []
if self.system:
self.messages.append({"role": "system", "content": system_prompt})
def __call__(self, message):
self.messages.append({"role": "user", "content": message})
result = self.generate(message)
self.messages.append({"role": "assistant", "content": result})
return result
def generate(self, message):
if self.use_history:
input_messages = self.messages
else:
input_messages = [
{"role": "system", "content": self.system},
{"role": "user", "content": message}
]
if self.api:
payload = {
"model": self.model_name,
"messages": input_messages,
"max_tokens": 1024,
"temperature": self.temp,
"top_p": self.top_p,
"stream": False
}
for _ in range(3):
try:
response = requests.post(self.client, json=payload, timeout=120)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
except requests.exceptions.RequestException as e:
print(f"❌ API request failed: {e}")
continue
except (KeyError, IndexError) as e:
print(f"❌ Unexpected response format: {e}")
continue
return None
else:
response = self.client.generate(
input_messages,
sampling_params=SamplingParams(
max_tokens=1024,
temperature=self.temp,
top_p=self.top_p,
n=1,
),
)
return response[0].outputs[0].text
class BaseAgent_Open:
def __init__(self, system_prompt="", use_history=True, temp=0, top_p=1, model_name_or_path="Qwen/Qwen2.5-3B-Instruct"):
self.use_history = use_history
self.client = LLM(model=model_name_or_path, tokenizer=model_name_or_path, gpu_memory_utilization=0.5, tensor_parallel_size=1)
self.tokenizer = self.client.get_tokenizer()
self.system = system_prompt
self.temp = temp
self.top_p = top_p
self.messages = []
if self.system:
self.messages.append({"role": "system", "content": system_prompt})
def __call__(self, message):
self.messages.append({"role": "user", "content": message})
result = self.generate(message)
self.messages.append({"role": "assistant", "content": result})
return result
def generate(self, message):
if self.use_history:
input_messages = self.messages
else:
input_messages = [
{"role": "system", "content": self.system},
{"role": "user", "content": message}
]
# Convert messages to string using tokenizer's chat template
prompt = self.tokenizer.apply_chat_template(
input_messages,
tokenize=False,
add_generation_prompt=True
)
response = self.client.generate(
prompt,
sampling_params=SamplingParams(
max_tokens=1024,
temperature=self.temp,
top_p=self.top_p,
n=1,
),
)
return response[0].outputs[0].text