Datasets:

ArXiv:
File size: 4,963 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import re
import json
import os

def parse_json(json_str):
    try:
        return json.loads(json_str)
    except Exception as e1:
        # First attempt: Try to extract JSON from markdown code block
        json_match = re.search(r'```json\n(.*?)\n```', json_str, re.DOTALL)
        if json_match:
            json_str = json_match.group(1)
            try:
                return json.loads(json_str)
            except:
                pass
        
        # Second attempt: Remove only problematic control characters but keep valid JSON structure
        # Don't escape newlines/tabs that are part of JSON formatting
        # Only remove control chars that shouldn't be in JSON (0x00-0x08, 0x0B, 0x0C, 0x0E-0x1F)
        cleaned_str = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]', '', json_str)
        
        try:
            data = json.loads(cleaned_str)
            return data
        except Exception as e2:
            # If still failing, print debug info
            print(f"DEBUG: Failed to parse JSON. Original error: {e1}")
            print(f"DEBUG: After cleanup error: {e2}")
            print(f"DEBUG: Original JSON string (first 500 chars): {repr(json_str[:500])}")
            print(f"DEBUG: Cleaned JSON string (first 500 chars): {repr(cleaned_str[:500])}")
            raise e2

def most_similar_string(prompt, string_list) -> dict:
    similarities = [Levenshtein.distance(prompt, item["Prompt"]) for item in string_list]
    most_similar_idx = similarities.index(min(similarities))
    return string_list[most_similar_idx]


def check_and_fix_prompt(chosed_prompts, prompt_list) -> dict:
    results_dict={}

    for key, item in chosed_prompts.items():
        thought = item["Thought"]
        sim_item = most_similar_string(item["Prompt"], prompt_list)
        sim_item["Thought"] = thought
        results_dict[key] = sim_item
        
    return results_dict


def format_dimension_as_string(df, dimension_name) -> str:
    df["Dimension"] = df["Dimension"].str.lower().replace(" ", "_") # the lookup key format is subject_consistency
    row = df.loc[df['Dimension'] == dimension_name]
    if row.empty:
        return f"No data found for dimension: {dimension_name}"
    
    formatted_string = (
        f"{row['Dimension'].values[0]}: "
        f"Very High -> {row['Very High'].values[0]}, "
        f"High -> {row['High'].values[0]}, "
        f"Moderate -> {row['Moderate'].values[0]}, "
        f"Low -> {row['Low'].values[0]}, "
        f"Very Low -> {row['Very Low'].values[0]}"
    )
    
    return formatted_string

def extract_between_tags(text, tag) -> str:
    pattern = f'<{tag}>(.*?)</{tag}>'
    match = re.search(pattern, text, re.DOTALL)
    return match.group(1) if match else None


def format_plans(plans_str) -> dict:
    '''
    "<think>The user's query is about the model's ability to generate multiple objects in a single scene. To begin evaluating this capability, I will start with a basic scenario involving two distinct objects. This will help establish whether the model can handle simple multi-object scenes before exploring more complex scenarios. Now I will analyze the Basic generation of two distinct objects in a simple scene sub-aspect dimension.</think><tool>Multiple Objects</tool>"
    '''

    plan = {}
    if '</summary>' in plans_str:
        thought_content, summary_content = extract_between_tags(plans_str, "think"), extract_between_tags(plans_str, "summary")
        plan["thought"], plan["summary"] = thought_content, summary_content
    else:   
        think_content, tool_content = extract_between_tags(plans_str, "think"), extract_between_tags(plans_str, "tool")
        plan["thought"], plan["tool"] = think_content, tool_content
    
    return plan

def save_json(content, file_path):
    with open(file_path, 'w') as json_file:
        json.dump(content, json_file, indent=4)
    print(f"Results are saved to {os.path.abspath(file_path)}")

def tool_existence(tool_name):
    tool_list = ["subject consistency", "background consistency", "motion smoothness", "aesthetic quality", "imaging quality", "appearance style", "temporal style", "overall consistency", "multiple objects", "object class", "dynamic degree", "human action", "color", "spatial relationship", "scene"]
    
    # tool name could be in the format of "Subject Consistency" or "subject consistency" or "subjtect_consistency" or "advanced_subject_consistency"
    tool_name = tool_name.lower().replace("_", " ")
    for tool in tool_list:
        if tool in tool_name:
            return tool.replace(" ", "_")
    return None

def compute_score(pred, gt):
    score = 0.
    answer = parse_answer(pred)

    if answer is None:
        return score
    
    pattern = r"\s*([A-Z])\s*"
    match = re.search(pattern, answer, re.DOTALL)
    try:
        answer = match.group(1)
        if answer.strip().lower() == gt.strip().lower():
            score = 1.
    except:
        pass

    return score