Datasets:

ArXiv:
File size: 4,355 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python3
"""
Improved script to find all JSON files starting with 'chat_history_output' 
and copy them to data/preprocess folder with shorter filenames.
"""

import os
import shutil
import glob
import hashlib
from pathlib import Path

def generate_short_name(original_path, counter):
    """Generate a short, unique filename."""
    # Extract key parts
    path_parts = original_path.split(os.sep)
    
    # Find relevant parts
    model_name = None
    dimension = None
    for part in path_parts:
        if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']:
            model_name = part
        if part.startswith('2024-') and 'How_' in part:
            # Extract the question and make it shorter
            question = part.split('-', 3)[-1] if len(part.split('-', 3)) > 3 else part
            # Take first few words and create a hash for uniqueness
            words = question.replace('How_', '').replace('_', ' ').split()[:3]
            dimension = '_'.join(words[:3]).replace('?', '')
    
    # Create hash of full path for uniqueness
    path_hash = hashlib.md5(original_path.encode()).hexdigest()[:8]
    
    # Build short name
    parts = []
    if model_name:
        parts.append(model_name)
    if dimension:
        parts.append(dimension[:30])  # Limit dimension length
    parts.append(f"hash_{path_hash}")
    parts.append(f"id_{counter:04d}")
    
    return f"{'_'.join(parts)}.json"

def find_and_copy_chat_history_files():
    """Find all chat_history_output*.json files and copy them to data/preprocess."""
    
    # Source directory
    # source_dir = "/home/data2/sltian/code/evaluation_agent_dev/ea-data/agent/vbench_results"
    source_dir = "/home/data2/sltian/code/evaluation_agent_dev/ea-data/agent/t2i_results"
    
    # Destination directory
    dest_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess-t2i"
    
    # Create destination directory if it doesn't exist
    os.makedirs(dest_dir, exist_ok=True)
    
    # Find all *chat_history*.json files recursively
    pattern = os.path.join(source_dir, "**/*chat_history*.json")
    chat_files = glob.glob(pattern, recursive=True)
    
    print(f"Found {len(chat_files)} *chat_history*.json files")
    
    copied_files = []
    counter = 1
    
    for file_path in chat_files:
        # Generate a short filename
        short_filename = generate_short_name(file_path, counter)
        
        # Destination file path
        dest_file = os.path.join(dest_dir, short_filename)
        
        # Copy the file
        try:
            shutil.copy2(file_path, dest_file)
            copied_files.append((file_path, dest_file))
            print(f"Copied [{counter:4d}]: {os.path.basename(file_path)} -> {short_filename}")
            counter += 1
        except Exception as e:
            print(f"Error copying {file_path}: {e}")
    
    print(f"\nSuccessfully copied {len(copied_files)} files to {dest_dir}")
    
    # Create a detailed mapping file
    mapping_file = os.path.join(dest_dir, "detailed_file_mapping.txt")
    with open(mapping_file, "w") as f:
        f.write("Short Filename -> Original Path\n")
        f.write("=" * 80 + "\n")
        for orig, copied in copied_files:
            short_name = os.path.basename(copied)
            f.write(f"{short_name} -> {orig}\n")
    
    print(f"Created detailed file mapping at: {mapping_file}")
    
    # Create a summary by model
    summary_file = os.path.join(dest_dir, "summary_by_model.txt")
    model_counts = {}
    for orig, copied in copied_files:
        path_parts = orig.split(os.sep)
        model = None
        for part in path_parts:
            if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']:
                model = part
                break
        if model:
            model_counts[model] = model_counts.get(model, 0) + 1
    
    with open(summary_file, "w") as f:
        f.write("Summary by Model\n")
        f.write("=" * 30 + "\n")
        for model, count in sorted(model_counts.items()):
            f.write(f"{model}: {count} files\n")
        f.write(f"\nTotal: {sum(model_counts.values())} files\n")
    
    print(f"Created summary at: {summary_file}")

if __name__ == "__main__":
    find_and_copy_chat_history_files()