#!/usr/bin/env python3 """ Interactive console for the fine-tuned Qwen2.5-3B evaluation agent model. This script provides an interactive chat interface to experiment with the trained model for various evaluation tasks including VBench, T2I-CompBench, and open-ended queries. """ import json import requests import argparse import sys from typing import List, Dict, Any, Optional import readline # For better input handling class InteractiveEvalAgent: """Interactive console for the evaluation agent model.""" def __init__(self, model_url: str = "http://0.0.0.0:12334/v1/chat/completions", model_name: str = "eval-agent"): """ Initialize the interactive console. Args: model_url: URL of the model server model_name: Name of the served model """ self.model_url = model_url self.model_name = model_name self.conversation_history = [] self.system_prompts = self._load_system_prompts() self.current_system = "" def _load_system_prompts(self) -> Dict[str, str]: """Load predefined system prompts for different evaluation tasks.""" return { "vbench": """ You are an expert in evaluating video generation models. Your task is to dynamically explore the model's capabilities step by step, simulating the process of human exploration. Dynamic evaluation refers to initially providing a preliminary focus based on the user's question, and then continuously adjusting what aspects to focus on according to the intermediate evaluation results. Below are the currently available evaluation tools: • Subject Consistency - Assesses whether a subject maintains consistent appearance throughout the video • Background Consistency - Assesses whether the background scene remains consistent throughout the video • Motion Smoothness - Evaluates whether motion is smooth and natural • Aesthetic Quality - Assesses the aesthetic quality of the generated video • Imaging Quality - Assesses distortion levels (over-exposure, noise, blur) • Overall Consistency - Evaluates alignment between video and input prompt • Multiple Objects - Evaluates ability to generate multiple objects in one scene • Object Class - Assesses ability to generate specific object classes accurately • Dynamic Degree - Evaluates level of motion in the video • Human Action - Assesses whether humans perform described actions accurately • Color - Assesses whether colors match prompt specifications • Spatial Relationship - Assesses spatial arrangement of objects • Scene - Evaluates whether video represents the intended scene Please provide analysis using this format: Sub-aspect: The specific aspect you want to focus on Tool: The evaluation tool you choose to use Thought: Detailed explanation of your reasoning """, "t2i_compbench": """ You are an expert evaluator for text-to-image generation models specializing in compositional understanding. Your role is to assess how well models understand and generate images based on compositional aspects like: - Attribute binding (colors, shapes, textures) - Object relationships and spatial arrangements - Complex scene composition - Multiple object interactions Provide detailed analysis with specific examples and reasoning for your assessments. """, "open_ended": """ You are an expert AI model evaluator with deep knowledge of generative models, computer vision, and machine learning evaluation methodologies. Provide comprehensive, insightful analysis of model capabilities, limitations, and performance characteristics. Support your analysis with specific examples and technical reasoning. """, "general": """ \n# INSTRUCTIONS\nYou are an expert evaluation agent for text-to-video models. Your goal is to answer the user’s evaluation question through iterative, evidence-based probing. Follow the protocol below exactly. Produce no text outside the specified tags.\nIn each step, you must conduct reasoning inside and first every time before you get new information. During the reasoning, you should think about the sub-aspect to reason based on the user's query and the previous observation. Then, you should decide which evaluation tool to use to evaluate the model, by generating tool_name. The tool will be automatically executed, and the evaluation results will be returned to you enclosed in eval_results. You should analyze the evaluation results and recursively call the tool to evaluate the model until you have enough information to answer the user's query. After you think you have enough information to answer the user's query, you should generate summary to answer the user's query.\n\n## Loop protocol (3–6 rounds typical; may stop earlier if sufficient)\nEach round:\n1) Reason PRIVATELY first in:\n State the user’s question briefly. Propose ONE sub-aspect to probe next based on prior observations (or lack thereof). Justify the choice and define the minimal scenario/prompt complexity needed to test it. If previous results were weak on simple cases, stay simple; otherwise increase complexity stepwise.\n \n2) Call EXACTLY ONE evaluation tool by name:\n TOOL_NAME\n - Choose only one tool per round.\n - Only use the tools provided. \n \n3) The system will return:\n …sampled prompts, numeric scores, and/or video samples…\n\n4) Upon receiving , start the next round (go to step 1), refining the sub-aspect based on the new evidence.\n\n\n## TOOLS\nBelow are the currently available evaluation tools, and the description of each tool in the format of\n{\n \"tool_name\": \"tool_description\"\n}\n\nThe available tools are:\n{\n \"Subject Consistency\": \"This tool assesses whether a subject (e.g., a person, car, or cat) maintains consistent appearance throughout the video.\",\n \"Background Consistency\": \"This tool assesses whether the background scene remains consistent throughout the video.\",\n \"Motion Smoothness\": \"This tool evaluates whether the motion in the generated video is smooth and natural, following the physical laws of the real world. It focuses on the fluidity of movements rather than the visual consistency of subjects or backgrounds.\",\n \"Aesthetic Quality\": \"This tool can be used to assess the aesthetic quality of the generated video.\",\n \"Imaging Quality\": \"This tool assesses the level of distortion in the generated frames, including factors such as over-exposure, noise, and blur, to determine the overall clarity and visual fidelity.\",\n \"Appearance Style\": \"This tool assesses the consistency of the visual style (e.g., oil painting, black and white, watercolor) throughout the video, ensuring alignment with the specified look.\",\n \"Temporal Style\": \"This tool evaluates the consistency of temporal styles in the video, such as camera motions and other time-based effects, ensuring they align with the intended style.\",\n \"Overall Consistency\": \"This tool can evaluate the alignment between the generated video and the input prompt, i.e., whether the generation follows the prompt.\",\n \"Multiple Objects\": \"This tool can be used to evaluate the model’s ability to generate two different objects simultaneously in one scene.\",\n \"Object Class\": \"This tool assesses the model’s ability to generate specific classes of objects described in the text prompt accurately.\",\n \"Dynamic Degree\": \"This tool evaluates the level of motion in the video, assessing whether it contains significant dynamic movements, rather than being overly static.\",\n \"Human Action\": \"This tool assesses whether human subjects in the generated videos accurately perform the specific actions described in the text prompts.\",\n \"Color\": \"This tool assesses whether the colors of synthesized objects match the specifications provided in the text prompt.\",\n \"Spatial Relationship\": \"This tool assesses whether the spatial arrangement of objects matches the positioning and relationships described in the text prompt.\",\n \"Scene\": \"This tool evaluates whether the synthesized video accurately represents the intended scene described in the text prompt.\"\n}\n\nInitially, you will receive a query from the user. You will enter a loop with the following two options:\n\n**Option 1**: In this option, you will propose a sub-aspect to focus on based on the user's query.\n\n**Option 2**: If you feel that you have gathered sufficient information to answer the user's query, you may choose this option.\n\nWhen performing the evaluation, you must follow the following rules strictly:\n# Tag grammar (STRICT)\n- Allowed tags: \n- Tags are lowercase and must be properly paired. Never emit a closing tag that doesn’t match the most recent unclosed opening tag.\n- You MUST generate before you generate or .\n- You MUST NOT generate yourself; it will be provided to you by the system after each tool call.\n- After you output , STOP and output nothing else.\n """, "prompt-sys":""" You are a prompt engineer for an video generation model, capable of selecting appropriate prompts based on the user's given theme or description. To be successful, it is very important to follow the following rules: 1. You only need to focus on the user's input and select the appropriate prompts for video generation based on the latest input. 2. When selecting prompts, it's important to consider and explain why each prompt was chosen. 3. For each query, please provide 3-9 prompts with diverse content, all of which should be highly relevant to the query. 4. Avoid using explicit generation-related instructions in the prompt, such as “generate a…”. You will receive a prompt list. Please select the prompts to be used for this round from the list. Provide the chosen prompt in the following format: { "Step 1": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen }, "Step 2": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen }, "Step 3": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen }, ... } Please ensure the output is in JSON format """, "prompt-sys-open":""" You are a prompt engineer for an video generation model. You will be given a list of prompts. You need to select the prompts to be used for this round from the list. ## RULES 1. Only focus on the user's original question and select the appropriate prompts for video generation based on the latest input. 2. Each time you select a prompt, you need to explain why you selected this prompt. 3. For each query, provide 3-9 prompts, all of which should be highly relevant to the query, while as diverse as possible. 4. DO NOT use explicit generation-related instructions in the prompt, such as “generate a…”. You will receive a prompt list. Please select the prompts to be used for this round from the list. Provide the chosen prompt in the following format: { "Step 1": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen "auxiliary_info": (optional, only if the provided prompt has auxiliary information) The auxiliary information for the prompt. }, "Step 2": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen "auxiliary_info": (optional, only if the provided prompt has auxiliary information) The auxiliary information for the prompt. }, "Step 3": { "Prompt": "The chosen prompt", "Thought": Explain why this prompt was chosen "auxiliary_info": (optional, only if the provided prompt has auxiliary information) The auxiliary information for the prompt. }, ... } Please ensure the output is in JSON format """, } def call_model(self, message: str, system: str = "", temperature: float = 0.7, max_tokens: int = 2048, use_history: bool = True) -> Optional[str]: """ Call the model with a message. Args: message: User message system: System prompt temperature: Sampling temperature max_tokens: Maximum tokens in response use_history: Whether to include conversation history Returns: Model response or None if error """ messages = [] # Add system prompt if provided if system: messages.append({"role": "system", "content": system}) # Add conversation history if requested if use_history and self.conversation_history: for entry in self.conversation_history: messages.append({"role": "user", "content": entry["user"]}) messages.append({"role": "assistant", "content": entry["assistant"]}) # Add current message messages.append({"role": "user", "content": message}) payload = { "model": self.model_name, "messages": messages, "max_tokens": max_tokens, "temperature": temperature, "stream": False } try: response = requests.post(self.model_url, json=payload, timeout=120) response.raise_for_status() result = response.json() assistant_response = result["choices"][0]["message"]["content"] # Add to conversation history if use_history: self.conversation_history.append({ "user": message, "assistant": assistant_response }) return assistant_response except requests.exceptions.RequestException as e: print(f"❌ API request failed: {e}") return None except (KeyError, IndexError) as e: print(f"❌ Unexpected response format: {e}") return None def print_banner(self): """Print welcome banner.""" print("="*70) print("🤖 INTERACTIVE EVALUATION AGENT CONSOLE") print("="*70) print("Fine-tuned Qwen2.5-3B for Text-to-Image/Video Quality Assessment") print(f"Model URL: {self.model_url}") print(f"Model Name: {self.model_name}") print("="*70) print() print("Available commands:") print(" /help - Show this help message") print(" /system - Set system prompt (vbench/t2i_compbench/open_ended/general)") print(" /clear - Clear conversation history") print(" /history - Show conversation history") print(" /examples - Show example prompts") print(" /quit or /exit - Exit the console") print() print("Current system prompt: general") print("="*70) print() def print_examples(self): """Print example prompts for different evaluation tasks.""" print("📝 EXAMPLE PROMPTS:") print("-" * 50) print("\n🎬 VBench (Video Evaluation):") print("• How accurately does the model generate specific object classes?") print("• How well does the model maintain subject consistency?") print("• How smooth are the motions in generated videos?") print("• How aesthetically pleasing are the generated videos?") print("\n🖼️ T2I-CompBench (Image Evaluation):") print("• How well does the model handle color accuracy?") print("• Can the model generate multiple objects with correct spatial relationships?") print("• How accurate is attribute binding in generated images?") print("• How well does the model handle complex scene composition?") print("\n💭 Open-ended Evaluation:") print("• What are the key strengths and weaknesses of this model?") print("• Compare this model's performance to other generation models") print("• Analyze the failure cases and suggest improvements") print("• What scenarios is this model best suited for?") print("-" * 50) def handle_command(self, user_input: str) -> bool: """ Handle special commands. Args: user_input: User input string Returns: True if should continue, False if should exit """ user_input = user_input.strip() if user_input in ["/quit", "/exit"]: print("👋 Goodbye!") return False elif user_input == "/help": self.print_banner() elif user_input == "/clear": self.conversation_history.clear() print("🗑️ Conversation history cleared.") elif user_input == "/history": if not self.conversation_history: print("📝 No conversation history.") else: print(f"📝 Conversation History ({len(self.conversation_history)} exchanges):") print("-" * 50) for i, entry in enumerate(self.conversation_history, 1): print(f"\n[{i}] User: {entry['user'][:100]}...") print(f"[{i}] Assistant: {entry['assistant'][:200]}...") print("-" * 50) elif user_input == "/examples": self.print_examples() elif user_input.startswith("/system"): parts = user_input.split(maxsplit=1) if len(parts) < 2: print("Available system prompts: vbench, t2i_compbench, open_ended, general, prompt-sys, prompt-sys-open") else: system_name = parts[1].strip() if system_name in self.system_prompts: self.current_system = system_name print(f"✅ System prompt set to: {system_name}") else: print(f"❌ Unknown system prompt: {system_name}") print("Available: vbench, t2i_compbench, open_ended, general, prompt-sys, prompt-sys-open") else: print(f"❌ Unknown command: {user_input}") return True def run(self): """Run the interactive console.""" self.print_banner() # Test connection first print("🔗 Testing connection to model server...") test_response = self.call_model("Hello", use_history=False) if test_response is None: print("❌ Failed to connect to model server. Please check:") print(f" - Server is running at {self.model_url}") print(" - Model is properly loaded") return else: print("✅ Connected successfully!") print() while True: try: # Show current system prompt in prompt system_indicator = f"[{self.current_system}]" if self.current_system else "[general]" user_input = input(f"🤖 {system_indicator} You: ").strip() if not user_input: continue # Handle commands if user_input.startswith("/"): if not self.handle_command(user_input): break continue # Get system prompt system_prompt = self.system_prompts.get(self.current_system, self.system_prompts["general"]) print("\n🤔 Thinking...") # Call model response = self.call_model(user_input, system=system_prompt) if response: print(f"\n🤖 Assistant:\n{response}\n") else: print("❌ Failed to get response from model.\n") except KeyboardInterrupt: print("\n\n👋 Interrupted. Goodbye!") break except EOFError: print("\n\n👋 EOF. Goodbye!") break def main(): """Main function.""" parser = argparse.ArgumentParser(description="Interactive console for evaluation agent") parser.add_argument("--model_url", default="http://0.0.0.0:12334/v1/chat/completions", help="URL of the model server") parser.add_argument("--model_name", default="eval-agent", help="Name of the served model") parser.add_argument("--model_port", default=None, help="Port of the model server") args = parser.parse_args() if args.model_port is not None: args.model_url = f"http://0.0.0.0:{args.model_port}/v1/chat/completions" console = InteractiveEvalAgent( model_url=args.model_url, model_name=args.model_name ) console.run() if __name__ == "__main__": main()