try_single / try_single.py
syficy's picture
Upload folder using huggingface_hub
f7b470c verified
import os
import json
import datasets
from pathlib import Path
_DESCRIPTION = "IWR-Bench."
_HOMEPAGE = ""
_LICENSE = "apache-2.0"
_CITATION = """\
@misc{chen2025iwrbenchlvlmsreconstructinteractive,
title={IWR-Bench: Can LVLMs reconstruct interactive webpage from a user interaction video?},
author={Yang Chen and Minghao Liu and Yufan Shen and Yunwen Li and Tianyuan Huang and Xinyu Fang and Tianyu Zheng and Wenxuan Huang and Cheng Yang and Daocheng Fu and Jianbiao Mei and Rong Wu and Yunfei Zhao and Licheng Wen and Xuemeng Yang and Song Mao and Qunshu Lin and Zhi Yu and Yongliang Shen and Yu Qiao and Botian Shi},
year={2025},
eprint={2509.24709},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2509.24709},
}
"""
class IWRBench(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features({
"id": datasets.Value("int64"),
"classification": {
"interaction_complexity": datasets.Value("string"),
"visual_complexity": datasets.Value("string"),
"domain": datasets.Value("string"),
"sub_domain": datasets.Value("string"),
},
"recording_details": {
"resolution": datasets.Value("string"),
"environment": datasets.Value("string"),
},
"source_website": datasets.Value("string"),
"action_sequence": datasets.Sequence({
"type": datasets.Value("string"),
"parameters": {
"key": datasets.Value("string"),
"direction": datasets.Value("string"),
"scroll_page": datasets.Value("string"),
"description": datasets.Value("string"),
},
"visual_evaluation_flag": datasets.Value("bool"),
"checkpoint_screenshot": datasets.Image(),
"logical_assertion": datasets.Value("string"),
}),
"video": datasets.Audio(decode=True),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = os.path.dirname(os.path.abspath(__file__))
metadata_file = os.path.join(data_dir, "metadata.jsonl")
if not os.path.exists(metadata_file):
raise FileNotFoundError(
f"`metadata.jsonl` not found in {data_dir}."
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": metadata_file,
"base_path": data_dir,
},
),
]
def _generate_examples(self, filepath, base_path):
with open(filepath, "r", encoding="utf-8") as f:
for key, line in enumerate(f):
data = json.loads(line)
processed_sequence = []
for step in data.get("action_sequence", []):
screenshot_path_str = step.get("checkpoint_screenshot_path")
screenshot_full_path = str(Path(base_path) / Path(screenshot_path_str)) if screenshot_path_str else None
params = step.get("parameters", {})
processed_params = {
"key": params.get("key"),
"direction": params.get("direction"),
"scroll_page": params.get("scroll_page"),
"description": params.get("description"),
}
processed_step = {
"type": step.get("type"),
"parameters": processed_params,
"visual_evaluation_flag": step.get("visual_evaluation_flag"),
"checkpoint_screenshot": screenshot_full_path,
"logical_assertion": step.get("logical_assertion"),
}
processed_sequence.append(processed_step)
video_path_str = data.get("video_path")
video_full_path = str(Path(base_path) / Path(video_path_str)) if video_path_str else None
yield key, {
"id": data.get("id"),
"classification": data.get("classification"),
"recording_details": data.get("recording_details"),
"source_website": data.get("source_website"),
"action_sequence": processed_sequence,
"video": video_full_path,
}