|
|
|
|
|
""" |
|
|
Evaluation script for the fine-tuned Qwen2.5-3B evaluation agent model. |
|
|
|
|
|
This script evaluates the trained model on various tasks including: |
|
|
- VBench evaluation (text-to-video generation quality assessment) |
|
|
- T2I-CompBench evaluation (text-to-image generation quality assessment) |
|
|
- Open-ended evaluation queries |
|
|
|
|
|
The model uses CoT-like reasoning format for quality assessment. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import argparse |
|
|
import requests |
|
|
import time |
|
|
from typing import Dict, List, Any, Optional |
|
|
from pathlib import Path |
|
|
import pandas as pd |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
class EvalAgentTester: |
|
|
"""Tester for the fine-tuned evaluation agent model.""" |
|
|
|
|
|
def __init__(self, model_url: str = "http://0.0.0.0:12333/v1/chat/completions", |
|
|
model_name: str = "eval-agent"): |
|
|
""" |
|
|
Initialize the evaluation tester. |
|
|
|
|
|
Args: |
|
|
model_url: URL of the model server (launched via vLLM) |
|
|
model_name: Name of the served model |
|
|
""" |
|
|
self.model_url = model_url |
|
|
self.model_name = model_name |
|
|
self.test_data = {} |
|
|
|
|
|
def load_test_data(self, data_path: str = "data/postprocess_20250819/ea_cot_dataset_10k.json"): |
|
|
"""Load test dataset.""" |
|
|
try: |
|
|
with open(data_path, 'r', encoding='utf-8') as f: |
|
|
self.test_data = json.load(f) |
|
|
print(f"Loaded {len(self.test_data)} test samples from {data_path}") |
|
|
except FileNotFoundError: |
|
|
print(f"Test data file not found: {data_path}") |
|
|
print("Please run the data preprocessing script first.") |
|
|
return False |
|
|
except json.JSONDecodeError as e: |
|
|
print(f"Error parsing JSON file: {e}") |
|
|
return False |
|
|
return True |
|
|
|
|
|
def call_model(self, instruction: str, input_text: str = "", system: str = "", |
|
|
history: List = None, max_tokens: int = 2048, temperature: float = 0.7) -> Optional[str]: |
|
|
""" |
|
|
Call the fine-tuned model via API. |
|
|
|
|
|
Args: |
|
|
instruction: Main instruction/question |
|
|
input_text: Additional input context |
|
|
system: System prompt |
|
|
history: Conversation history |
|
|
max_tokens: Maximum response tokens |
|
|
temperature: Sampling temperature |
|
|
|
|
|
Returns: |
|
|
Model response or None if error |
|
|
""" |
|
|
|
|
|
messages = [] |
|
|
|
|
|
if system: |
|
|
messages.append({"role": "system", "content": system}) |
|
|
|
|
|
|
|
|
if history: |
|
|
for human, assistant in history: |
|
|
messages.append({"role": "user", "content": human}) |
|
|
messages.append({"role": "assistant", "content": assistant}) |
|
|
|
|
|
|
|
|
user_content = instruction |
|
|
if input_text: |
|
|
user_content = f"{instruction}\n\n{input_text}" |
|
|
|
|
|
messages.append({"role": "user", "content": user_content}) |
|
|
|
|
|
payload = { |
|
|
"model": self.model_name, |
|
|
"messages": messages, |
|
|
"max_tokens": max_tokens, |
|
|
"temperature": temperature, |
|
|
"stream": False |
|
|
} |
|
|
|
|
|
try: |
|
|
response = requests.post(self.model_url, json=payload, timeout=60) |
|
|
response.raise_for_status() |
|
|
|
|
|
result = response.json() |
|
|
return result["choices"][0]["message"]["content"] |
|
|
|
|
|
except requests.exceptions.RequestException as e: |
|
|
print(f"API request failed: {e}") |
|
|
return None |
|
|
except (KeyError, IndexError) as e: |
|
|
print(f"Unexpected response format: {e}") |
|
|
return None |
|
|
|
|
|
def evaluate_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: |
|
|
""" |
|
|
Evaluate a single sample. |
|
|
|
|
|
Args: |
|
|
sample: Test sample with instruction, input, output, system, history |
|
|
|
|
|
Returns: |
|
|
Evaluation result with prediction and metadata |
|
|
""" |
|
|
instruction = sample.get("instruction", "") |
|
|
input_text = sample.get("input", "") |
|
|
expected_output = sample.get("output", "") |
|
|
system = sample.get("system", "") |
|
|
history = sample.get("history", []) |
|
|
|
|
|
|
|
|
prediction = self.call_model( |
|
|
instruction=instruction, |
|
|
input_text=input_text, |
|
|
system=system, |
|
|
history=history |
|
|
) |
|
|
|
|
|
result = { |
|
|
"instruction": instruction, |
|
|
"input": input_text, |
|
|
"expected_output": expected_output, |
|
|
"prediction": prediction, |
|
|
"system": system, |
|
|
"history": history, |
|
|
"success": prediction is not None |
|
|
} |
|
|
|
|
|
return result |
|
|
|
|
|
def run_evaluation(self, num_samples: int = 100, save_results: bool = True, |
|
|
output_path: str = "evaluation_results.json") -> Dict[str, Any]: |
|
|
""" |
|
|
Run evaluation on a subset of test data. |
|
|
|
|
|
Args: |
|
|
num_samples: Number of samples to evaluate |
|
|
save_results: Whether to save results to file |
|
|
output_path: Path to save results |
|
|
|
|
|
Returns: |
|
|
Evaluation summary and results |
|
|
""" |
|
|
if not self.test_data: |
|
|
print("No test data loaded. Please call load_test_data() first.") |
|
|
return {} |
|
|
|
|
|
|
|
|
test_samples = list(self.test_data)[:num_samples] if isinstance(self.test_data, list) else list(self.test_data.values())[:num_samples] |
|
|
|
|
|
print(f"Evaluating {len(test_samples)} samples...") |
|
|
|
|
|
results = [] |
|
|
successful_calls = 0 |
|
|
failed_calls = 0 |
|
|
|
|
|
for i, sample in enumerate(tqdm(test_samples, desc="Evaluating")): |
|
|
result = self.evaluate_sample(sample) |
|
|
results.append(result) |
|
|
|
|
|
if result["success"]: |
|
|
successful_calls += 1 |
|
|
else: |
|
|
failed_calls += 1 |
|
|
|
|
|
|
|
|
time.sleep(0.1) |
|
|
|
|
|
|
|
|
summary = { |
|
|
"total_samples": len(test_samples), |
|
|
"successful_calls": successful_calls, |
|
|
"failed_calls": failed_calls, |
|
|
"success_rate": successful_calls / len(test_samples) if test_samples else 0, |
|
|
"results": results |
|
|
} |
|
|
|
|
|
|
|
|
if save_results: |
|
|
with open(output_path, 'w', encoding='utf-8') as f: |
|
|
json.dump(summary, f, indent=2, ensure_ascii=False) |
|
|
print(f"Results saved to {output_path}") |
|
|
|
|
|
return summary |
|
|
|
|
|
def analyze_results(self, results: Dict[str, Any]) -> None: |
|
|
""" |
|
|
Analyze and print evaluation results. |
|
|
|
|
|
Args: |
|
|
results: Results from run_evaluation() |
|
|
""" |
|
|
print("\n" + "="*50) |
|
|
print("EVALUATION SUMMARY") |
|
|
print("="*50) |
|
|
|
|
|
print(f"Total samples evaluated: {results['total_samples']}") |
|
|
print(f"Successful API calls: {results['successful_calls']}") |
|
|
print(f"Failed API calls: {results['failed_calls']}") |
|
|
print(f"Success rate: {results['success_rate']:.2%}") |
|
|
|
|
|
if results['results']: |
|
|
print("\n" + "-"*50) |
|
|
print("SAMPLE RESULTS") |
|
|
print("-"*50) |
|
|
|
|
|
|
|
|
successful_results = [r for r in results['results'] if r['success']] |
|
|
for i, result in enumerate(successful_results[:3]): |
|
|
print(f"\nSample {i+1}:") |
|
|
print(f"Instruction: {result['instruction'][:100]}...") |
|
|
print(f"Input: {result['input'][:50]}..." if result['input'] else "Input: (empty)") |
|
|
print(f"Expected: {result['expected_output'][:100]}...") |
|
|
print(f"Predicted: {result['prediction'][:100]}..." if result['prediction'] else "Predicted: (failed)") |
|
|
|
|
|
print("\n" + "="*50) |
|
|
|
|
|
def test_specific_tasks(self) -> None: |
|
|
"""Test the model on specific evaluation tasks.""" |
|
|
|
|
|
print("\n" + "="*50) |
|
|
print("TESTING SPECIFIC EVALUATION TASKS") |
|
|
print("="*50) |
|
|
|
|
|
|
|
|
print("\n1. Testing VBench-style video evaluation:") |
|
|
vbench_instruction = "How accurately does the model generate specific object classes as described in the text prompt?" |
|
|
vbench_system = """ |
|
|
You are an expert in evaluating video generation models. Your task is to dynamically explore the model's capabilities step by step, simulating the process of human exploration. |
|
|
|
|
|
Dynamic evaluation refers to initially providing a preliminary focus based on the user's question, and then continuously adjusting what aspects to focus on according to the intermediate evaluation results. |
|
|
|
|
|
Please provide your analysis using the following format: |
|
|
Sub-aspect: The specific aspect you want to focus on. |
|
|
Tool: The evaluation tool you choose to use. |
|
|
Thought: Detailed explanation of your reasoning. |
|
|
""" |
|
|
|
|
|
response = self.call_model( |
|
|
instruction=vbench_instruction, |
|
|
system=vbench_system |
|
|
) |
|
|
|
|
|
print(f"Response: {response[:500]}..." if response else "Failed to get response") |
|
|
|
|
|
|
|
|
print("\n2. Testing T2I-CompBench-style image evaluation:") |
|
|
t2i_instruction = "How well does the model handle color accuracy in generated images?" |
|
|
t2i_system = """ |
|
|
You are an expert evaluator for text-to-image generation models. Evaluate the model's performance on color accuracy. |
|
|
|
|
|
Provide your assessment with reasoning and specific examples. |
|
|
""" |
|
|
|
|
|
response = self.call_model( |
|
|
instruction=t2i_instruction, |
|
|
system=t2i_system |
|
|
) |
|
|
|
|
|
print(f"Response: {response[:500]}..." if response else "Failed to get response") |
|
|
|
|
|
|
|
|
print("\n3. Testing open-ended evaluation:") |
|
|
open_instruction = "What are the key strengths and weaknesses of this image generation model?" |
|
|
|
|
|
response = self.call_model( |
|
|
instruction=open_instruction |
|
|
) |
|
|
|
|
|
print(f"Response: {response[:500]}..." if response else "Failed to get response") |
|
|
|
|
|
print("\n" + "="*50) |
|
|
|
|
|
|
|
|
def main(): |
|
|
"""Main evaluation function.""" |
|
|
parser = argparse.ArgumentParser(description="Evaluate the fine-tuned Qwen2.5-3B evaluation agent") |
|
|
parser.add_argument("--model_url", default="http://0.0.0.0:12333/v1/chat/completions", |
|
|
help="URL of the model server") |
|
|
parser.add_argument("--model_name", default="eval-agent", |
|
|
help="Name of the served model") |
|
|
parser.add_argument("--data_path", default="data/postprocess_20250819/ea_cot_dataset_10k.json", |
|
|
help="Path to test dataset") |
|
|
parser.add_argument("--num_samples", type=int, default=50, |
|
|
help="Number of samples to evaluate") |
|
|
parser.add_argument("--output_path", default="evaluation_results.json", |
|
|
help="Path to save evaluation results") |
|
|
parser.add_argument("--test_tasks", action="store_true", |
|
|
help="Run specific task tests instead of full evaluation") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
print("Initializing Evaluation Agent Tester...") |
|
|
tester = EvalAgentTester(model_url=args.model_url, model_name=args.model_name) |
|
|
|
|
|
if args.test_tasks: |
|
|
|
|
|
tester.test_specific_tasks() |
|
|
else: |
|
|
|
|
|
print(f"Loading test data from {args.data_path}...") |
|
|
if not tester.load_test_data(args.data_path): |
|
|
print("Failed to load test data. Exiting.") |
|
|
return |
|
|
|
|
|
|
|
|
print("Starting evaluation...") |
|
|
results = tester.run_evaluation( |
|
|
num_samples=args.num_samples, |
|
|
save_results=True, |
|
|
output_path=args.output_path |
|
|
) |
|
|
|
|
|
|
|
|
tester.analyze_results(results) |
|
|
|
|
|
print(f"\nEvaluation complete! Results saved to {args.output_path}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |