images
images listlengths 1
1
| data_source
stringclasses 2
values | reward_model
dict | extra_info
dict | prompt
listlengths 2
2
| ability
stringclasses 3
values | env_name
stringclasses 1
value |
|---|---|---|---|---|---|---|
sealvqa_vaw
|
{
"ground_truth": "The color of the pants is white."
}
|
{
"answer": "The color of the pants is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409126.jpg",
"index": "sealvqa_vaw_1",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?"
},
{
"from": "gpt",
"value": "The color of the pants is white."
}
],
"question": "What is the color of the pants?",
"question_id": "sealvqa_vaw_1",
"search": 1,
"target_instances": [
{
"bbox": [
421.6666666666667,
115,
17.666666666666686,
32.66666666666666
],
"instance_id": "2409126013",
"name": "pants"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the design is not black."
}
|
{
"answer": "No, the color of the design is not black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2381719.jpg",
"index": "sealvqa_vaw_10",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the design black?"
},
{
"from": "gpt",
"value": "No, the color of the design is not black."
}
],
"question": "Is the color of the design black?",
"question_id": "sealvqa_vaw_10",
"search": 1,
"target_instances": [
{
"bbox": [
341.6666666666667,
174.16666666666666,
76,
46.00000000000003
],
"instance_id": "2381719002",
"name": "design"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the design black?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the building is blue."
}
|
{
"answer": "The color of the building is blue.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2383501.jpg",
"index": "sealvqa_vaw_13",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the building?"
},
{
"from": "gpt",
"value": "The color of the building is blue."
}
],
"question": "What is the color of the building?",
"question_id": "sealvqa_vaw_13",
"search": 1,
"target_instances": [
{
"bbox": [
403.3333333333333,
30.833333333333336,
76,
122.66666666666666
],
"instance_id": "2383501004",
"name": "building"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the building?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the lighthouse is red."
}
|
{
"answer": "The color of the lighthouse is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2395045.jpg",
"index": "sealvqa_vaw_20",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lighthouse?"
},
{
"from": "gpt",
"value": "The color of the lighthouse is red."
}
],
"question": "What is the color of the lighthouse?",
"question_id": "sealvqa_vaw_20",
"search": 1,
"target_instances": [
{
"bbox": [
233.33333333333334,
47.5,
38.49999999999997,
76
],
"instance_id": "2395045002",
"name": "lighthouse"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the lighthouse?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the plane is white."
}
|
{
"answer": "The color of the plane is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2398589.jpg",
"index": "sealvqa_vaw_25",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the plane?"
},
{
"from": "gpt",
"value": "The color of the plane is white."
}
],
"question": "What is the color of the plane?",
"question_id": "sealvqa_vaw_25",
"search": 1,
"target_instances": [
{
"bbox": [
223.33333333333331,
162.5,
51.83333333333337,
26.833333333333314
],
"instance_id": "2398589000",
"name": "plane"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the plane?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the painting is not orange."
}
|
{
"answer": "No, the color of the painting is not orange.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2399355.jpg",
"index": "sealvqa_vaw_27",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the painting orange?"
},
{
"from": "gpt",
"value": "No, the color of the painting is not orange."
}
],
"question": "Is the color of the painting orange?",
"question_id": "sealvqa_vaw_27",
"search": 1,
"target_instances": [
{
"bbox": [
113,
16,
56,
59
],
"instance_id": "2399355000",
"name": "painting"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the painting orange?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the van is red."
}
|
{
"answer": "The color of the van is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2415548.jpg",
"index": "sealvqa_vaw_31",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the van?"
},
{
"from": "gpt",
"value": "The color of the van is red."
}
],
"question": "What is the color of the van?",
"question_id": "sealvqa_vaw_31",
"search": 1,
"target_instances": [
{
"bbox": [
140,
62.5,
96,
96.83333333333331
],
"instance_id": "2415548001",
"name": "van"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the van?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the pillow is fluffy."
}
|
{
"answer": "The material of the pillow is fluffy.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2396597.jpg",
"index": "sealvqa_vaw_32",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the pillow?"
},
{
"from": "gpt",
"value": "The material of the pillow is fluffy."
}
],
"question": "What is the material of the pillow?",
"question_id": "sealvqa_vaw_32",
"search": 1,
"target_instances": [
{
"bbox": [
100,
146.66666666666666,
105.16666666666666,
67.66666666666669
],
"instance_id": "2396597001",
"name": "pillow"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the pillow?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the roof is not brown."
}
|
{
"answer": "No, the color of the roof is not brown.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2326987.jpg",
"index": "sealvqa_vaw_36",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the roof brown?"
},
{
"from": "gpt",
"value": "No, the color of the roof is not brown."
}
],
"question": "Is the color of the roof brown?",
"question_id": "sealvqa_vaw_36",
"search": 1,
"target_instances": [
{
"bbox": [
256.41,
237.54000000000002,
29.859999999999957,
15.430000000000007
],
"instance_id": "2326987003",
"name": "roof"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the roof brown?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the material of the doll is not fluffy."
}
|
{
"answer": "No, the material of the doll is not fluffy.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2321166.jpg",
"index": "sealvqa_vaw_38",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the doll fluffy?"
},
{
"from": "gpt",
"value": "No, the material of the doll is not fluffy."
}
],
"question": "Is the material of the doll fluffy?",
"question_id": "sealvqa_vaw_38",
"search": 1,
"target_instances": [
{
"bbox": [
405,
203.33333333333334,
93.5,
106.00000000000003
],
"instance_id": "2321166001",
"name": "doll"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the material of the doll fluffy?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the boat is black."
}
|
{
"answer": "The color of the boat is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2399497.jpg",
"index": "sealvqa_vaw_43",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the boat?"
},
{
"from": "gpt",
"value": "The color of the boat is black."
}
],
"question": "What is the color of the boat?",
"question_id": "sealvqa_vaw_43",
"search": 1,
"target_instances": [
{
"bbox": [
182,
198,
109,
25
],
"instance_id": "2399497004",
"name": "boat"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the boat?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shoes is black."
}
|
{
"answer": "The color of the shoes is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2371600.jpg",
"index": "sealvqa_vaw_45",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shoes?"
},
{
"from": "gpt",
"value": "The color of the shoes is black."
}
],
"question": "What is the color of the shoes?",
"question_id": "sealvqa_vaw_45",
"search": 1,
"target_instances": [
{
"bbox": [
382.5,
282.5,
61,
36
],
"instance_id": "2371600003",
"name": "shoes"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shoes?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the jacket is blue."
}
|
{
"answer": "The color of the jacket is blue.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2376230.jpg",
"index": "sealvqa_vaw_47",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the jacket?"
},
{
"from": "gpt",
"value": "The color of the jacket is blue."
}
],
"question": "What is the color of the jacket?",
"question_id": "sealvqa_vaw_47",
"search": 1,
"target_instances": [
{
"bbox": [
284,
123,
54,
134
],
"instance_id": "2376230010",
"name": "jacket"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the jacket?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the scissors is orange."
}
|
{
"answer": "The color of the scissors is orange.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2360507.jpg",
"index": "sealvqa_vaw_49",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the scissors?"
},
{
"from": "gpt",
"value": "The color of the scissors is orange."
}
],
"question": "What is the color of the scissors?",
"question_id": "sealvqa_vaw_49",
"search": 1,
"target_instances": [
{
"bbox": [
154.375,
191.25,
74.74999999999997,
39.125
],
"instance_id": "2360507005",
"name": "scissors"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the scissors?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the pole is gray."
}
|
{
"answer": "The color of the pole is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2394494.jpg",
"index": "sealvqa_vaw_53",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pole?"
},
{
"from": "gpt",
"value": "The color of the pole is gray."
}
],
"question": "What is the color of the pole?",
"question_id": "sealvqa_vaw_53",
"search": 1,
"target_instances": [
{
"bbox": [
167.66,
148.84666666666666,
20.366666666666674,
112.77333333333334
],
"instance_id": "2394494001",
"name": "pole"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pole?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the statue is golden."
}
|
{
"answer": "The color of the statue is golden.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2377841.jpg",
"index": "sealvqa_vaw_56",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the statue?"
},
{
"from": "gpt",
"value": "The color of the statue is golden."
}
],
"question": "What is the color of the statue?",
"question_id": "sealvqa_vaw_56",
"search": 1,
"target_instances": [
{
"bbox": [
254.19,
204.795,
38.74000000000001,
45.95500000000001
],
"instance_id": "2377841001",
"name": "statue"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the statue?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the lines is yellow."
}
|
{
"answer": "The color of the lines is yellow.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2362978.jpg",
"index": "sealvqa_vaw_70",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lines?"
},
{
"from": "gpt",
"value": "The color of the lines is yellow."
}
],
"question": "What is the color of the lines?",
"question_id": "sealvqa_vaw_70",
"search": 1,
"target_instances": [
{
"bbox": [
310,
312.5,
71,
21.833333333333314
],
"instance_id": "2362978002",
"name": "lines"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the lines?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the deck is red."
}
|
{
"answer": "The color of the deck is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2405871.jpg",
"index": "sealvqa_vaw_72",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the deck?"
},
{
"from": "gpt",
"value": "The color of the deck is red."
}
],
"question": "What is the color of the deck?",
"question_id": "sealvqa_vaw_72",
"search": 1,
"target_instances": [
{
"bbox": [
322.5,
238.33333333333334,
59.33333333333337,
44.33333333333334
],
"instance_id": "2405871001",
"name": "deck"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the deck?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the clock is black."
}
|
{
"answer": "The color of the clock is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2396840.jpg",
"index": "sealvqa_vaw_78",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the clock?"
},
{
"from": "gpt",
"value": "The color of the clock is black."
}
],
"question": "What is the color of the clock?",
"question_id": "sealvqa_vaw_78",
"search": 1,
"target_instances": [
{
"bbox": [
120,
196,
104,
81
],
"instance_id": "2396840000",
"name": "clock"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the clock?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the pants is gray."
}
|
{
"answer": "The color of the pants is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2325841.jpg",
"index": "sealvqa_vaw_79",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?"
},
{
"from": "gpt",
"value": "The color of the pants is gray."
}
],
"question": "What is the color of the pants?",
"question_id": "sealvqa_vaw_79",
"search": 1,
"target_instances": [
{
"bbox": [
228,
325,
74,
49
],
"instance_id": "2325841004",
"name": "pants"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the pants is tan."
}
|
{
"answer": "The color of the pants is tan.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2415947.jpg",
"index": "sealvqa_vaw_80",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?"
},
{
"from": "gpt",
"value": "The color of the pants is tan."
}
],
"question": "What is the color of the pants?",
"question_id": "sealvqa_vaw_80",
"search": 1,
"target_instances": [
{
"bbox": [
361,
246,
39,
34
],
"instance_id": "2415947006",
"name": "pants"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the face expression of the dog is sad."
}
|
{
"answer": "Yes, the face expression of the dog is sad.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2316222.jpg",
"index": "sealvqa_vaw_81",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the face expression of the dog sad?"
},
{
"from": "gpt",
"value": "Yes, the face expression of the dog is sad."
}
],
"question": "Is the face expression of the dog sad?",
"question_id": "sealvqa_vaw_81",
"search": 1,
"target_instances": [
{
"bbox": [
282.5,
59.375,
69.125,
73.5
],
"instance_id": "2316222003",
"name": "dog"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the face expression of the dog sad?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_general
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the pots is black."
}
|
{
"answer": "The color of the pots is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2402021.jpg",
"index": "sealvqa_vaw_85",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pots?"
},
{
"from": "gpt",
"value": "The color of the pots is black."
}
],
"question": "What is the color of the pots?",
"question_id": "sealvqa_vaw_85",
"search": 1,
"target_instances": [
{
"bbox": [
243.7233333333333,
393.9466666666666,
45.879999999999995,
69.56666666666672
],
"instance_id": "2402021000",
"name": "pots"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pots?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the tail is white."
}
|
{
"answer": "The color of the tail is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2400624.jpg",
"index": "sealvqa_vaw_86",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the tail?"
},
{
"from": "gpt",
"value": "The color of the tail is white."
}
],
"question": "What is the color of the tail?",
"question_id": "sealvqa_vaw_86",
"search": 1,
"target_instances": [
{
"bbox": [
300,
192,
59,
60
],
"instance_id": "2400624004",
"name": "tail"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the tail?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the rim is not silver."
}
|
{
"answer": "No, the color of the rim is not silver.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2413749.jpg",
"index": "sealvqa_vaw_87",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the rim silver?"
},
{
"from": "gpt",
"value": "No, the color of the rim is not silver."
}
],
"question": "Is the color of the rim silver?",
"question_id": "sealvqa_vaw_87",
"search": 1,
"target_instances": [
{
"bbox": [
44.375,
23.750000000000004,
71,
101
],
"instance_id": "2413749002",
"name": "rim"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the rim silver?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the poles is gray."
}
|
{
"answer": "The color of the poles is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409728.jpg",
"index": "sealvqa_vaw_88",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the poles?"
},
{
"from": "gpt",
"value": "The color of the poles is gray."
}
],
"question": "What is the color of the poles?",
"question_id": "sealvqa_vaw_88",
"search": 1,
"target_instances": [
{
"bbox": [
460,
98.33333333333333,
26,
66.00000000000001
],
"instance_id": "2409728002",
"name": "poles"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the poles?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The pattern of the shorts is patterned."
}
|
{
"answer": "The pattern of the shorts is patterned.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2406342.jpg",
"index": "sealvqa_vaw_100",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the pattern of the shorts?"
},
{
"from": "gpt",
"value": "The pattern of the shorts is patterned."
}
],
"question": "What is the pattern of the shorts?",
"question_id": "sealvqa_vaw_100",
"search": 1,
"target_instances": [
{
"bbox": [
47,
249,
113,
83
],
"instance_id": "2406342001",
"name": "shorts"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the pattern of the shorts?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the car is gray."
}
|
{
"answer": "The color of the car is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2414148.jpg",
"index": "sealvqa_vaw_102",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the car?"
},
{
"from": "gpt",
"value": "The color of the car is gray."
}
],
"question": "What is the color of the car?",
"question_id": "sealvqa_vaw_102",
"search": 1,
"target_instances": [
{
"bbox": [
440,
163,
59,
91
],
"instance_id": "2414148004",
"name": "car"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the car?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the material of the plates is ceramic."
}
|
{
"answer": "Yes, the material of the plates is ceramic.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2392935.jpg",
"index": "sealvqa_vaw_103",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the plates ceramic?"
},
{
"from": "gpt",
"value": "Yes, the material of the plates is ceramic."
}
],
"question": "Is the material of the plates ceramic?",
"question_id": "sealvqa_vaw_103",
"search": 1,
"target_instances": [
{
"bbox": [
125.83333333333333,
297.5,
125.16666666666667,
36
],
"instance_id": "2392935004",
"name": "plates"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the material of the plates ceramic?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the tie is green."
}
|
{
"answer": "The color of the tie is green.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2385400.jpg",
"index": "sealvqa_vaw_111",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the tie?"
},
{
"from": "gpt",
"value": "The color of the tie is green."
}
],
"question": "What is the color of the tie?",
"question_id": "sealvqa_vaw_111",
"search": 1,
"target_instances": [
{
"bbox": [
352,
101,
127,
44
],
"instance_id": "2385400006",
"name": "tie"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the tie?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the pants is gray."
}
|
{
"answer": "The color of the pants is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2383513.jpg",
"index": "sealvqa_vaw_116",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?"
},
{
"from": "gpt",
"value": "The color of the pants is gray."
}
],
"question": "What is the color of the pants?",
"question_id": "sealvqa_vaw_116",
"search": 1,
"target_instances": [
{
"bbox": [
1,
188,
35,
89
],
"instance_id": "2383513003",
"name": "pants"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The pose of the person is walking."
}
|
{
"answer": "The pose of the person is walking.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2325858.jpg",
"index": "sealvqa_vaw_121",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the pose of the person?"
},
{
"from": "gpt",
"value": "The pose of the person is walking."
}
],
"question": "What is the pose of the person?",
"question_id": "sealvqa_vaw_121",
"search": 1,
"target_instances": [
{
"bbox": [
48.449999999999996,
265.05,
54.580000000000005,
138.37
],
"instance_id": "2325858002",
"name": "person"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the pose of the person?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shirt is gray."
}
|
{
"answer": "The color of the shirt is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2414477.jpg",
"index": "sealvqa_vaw_122",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shirt?"
},
{
"from": "gpt",
"value": "The color of the shirt is gray."
}
],
"question": "What is the color of the shirt?",
"question_id": "sealvqa_vaw_122",
"search": 1,
"target_instances": [
{
"bbox": [
461.6666666666667,
87.5,
35.99999999999994,
91
],
"instance_id": "2414477004",
"name": "shirt"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shirt?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the box is dark red."
}
|
{
"answer": "The color of the box is dark red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/3670.jpg",
"index": "sealvqa_vaw_124",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the box?"
},
{
"from": "gpt",
"value": "The color of the box is dark red."
}
],
"question": "What is the color of the box?",
"question_id": "sealvqa_vaw_124",
"search": 1,
"target_instances": [
{
"bbox": [
526,
216,
58,
57
],
"instance_id": "3670005",
"name": "box"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the box?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the sign is black."
}
|
{
"answer": "The color of the sign is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/1591821.jpg",
"index": "sealvqa_vaw_125",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the sign?"
},
{
"from": "gpt",
"value": "The color of the sign is black."
}
],
"question": "What is the color of the sign?",
"question_id": "sealvqa_vaw_125",
"search": 1,
"target_instances": [
{
"bbox": [
574,
240,
57,
97
],
"instance_id": "1591821007",
"name": "sign"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the sign?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the uniform is red."
}
|
{
"answer": "The color of the uniform is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409363.jpg",
"index": "sealvqa_vaw_131",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the uniform?"
},
{
"from": "gpt",
"value": "The color of the uniform is red."
}
],
"question": "What is the color of the uniform?",
"question_id": "sealvqa_vaw_131",
"search": 1,
"target_instances": [
{
"bbox": [
72.5,
21.666666666666668,
56.833333333333314,
160.16666666666669
],
"instance_id": "2409363005",
"name": "uniform"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the uniform?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the material of the spatula is not wooden."
}
|
{
"answer": "No, the material of the spatula is not wooden.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2366543.jpg",
"index": "sealvqa_vaw_132",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the spatula wooden?"
},
{
"from": "gpt",
"value": "No, the material of the spatula is not wooden."
}
],
"question": "Is the material of the spatula wooden?",
"question_id": "sealvqa_vaw_132",
"search": 1,
"target_instances": [
{
"bbox": [
180,
204,
72,
126
],
"instance_id": "2366543003",
"name": "spatula"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the material of the spatula wooden?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the label is white."
}
|
{
"answer": "Yes, the color of the label is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2354617.jpg",
"index": "sealvqa_vaw_137",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the label white?"
},
{
"from": "gpt",
"value": "Yes, the color of the label is white."
}
],
"question": "Is the color of the label white?",
"question_id": "sealvqa_vaw_137",
"search": 1,
"target_instances": [
{
"bbox": [
0,
92.87666666666667,
113.82333333333334,
27.180000000000007
],
"instance_id": "2354617007",
"name": "label"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the label white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the cow is tan."
}
|
{
"answer": "The color of the cow is tan.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2384657.jpg",
"index": "sealvqa_vaw_140",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the cow?"
},
{
"from": "gpt",
"value": "The color of the cow is tan."
}
],
"question": "What is the color of the cow?",
"question_id": "sealvqa_vaw_140",
"search": 1,
"target_instances": [
{
"bbox": [
133,
135,
126,
53
],
"instance_id": "2384657001",
"name": "cow"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the cow?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the chair is black."
}
|
{
"answer": "The color of the chair is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2401500.jpg",
"index": "sealvqa_vaw_141",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the chair?"
},
{
"from": "gpt",
"value": "The color of the chair is black."
}
],
"question": "What is the color of the chair?",
"question_id": "sealvqa_vaw_141",
"search": 1,
"target_instances": [
{
"bbox": [
324.1666666666667,
230,
32.66666666666663,
30.16666666666663
],
"instance_id": "2401500002",
"name": "chair"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the chair?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the pattern of the lines is patterned."
}
|
{
"answer": "Yes, the pattern of the lines is patterned.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409869.jpg",
"index": "sealvqa_vaw_144",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the pattern of the lines patterned?"
},
{
"from": "gpt",
"value": "Yes, the pattern of the lines is patterned."
}
],
"question": "Is the pattern of the lines patterned?",
"question_id": "sealvqa_vaw_144",
"search": 1,
"target_instances": [
{
"bbox": [
419.1666666666667,
320.83333333333337,
79.33333333333331,
53.5
],
"instance_id": "2409869005",
"name": "lines"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the pattern of the lines patterned?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_general
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the t-shirt is white."
}
|
{
"answer": "The color of the t-shirt is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2400460.jpg",
"index": "sealvqa_vaw_150",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the t-shirt?"
},
{
"from": "gpt",
"value": "The color of the t-shirt is white."
}
],
"question": "What is the color of the t-shirt?",
"question_id": "sealvqa_vaw_150",
"search": 1,
"target_instances": [
{
"bbox": [
133.125,
250,
52.375,
129.25000000000006
],
"instance_id": "2400460001",
"name": "t-shirt"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the t-shirt?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the line is not orange."
}
|
{
"answer": "No, the color of the line is not orange.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2362283.jpg",
"index": "sealvqa_vaw_153",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the line orange?"
},
{
"from": "gpt",
"value": "No, the color of the line is not orange."
}
],
"question": "Is the color of the line orange?",
"question_id": "sealvqa_vaw_153",
"search": 1,
"target_instances": [
{
"bbox": [
2.2,
81.95,
41.699999999999996,
9.249999999999986
],
"instance_id": "2362283002",
"name": "line"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the line orange?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the tail is maroon."
}
|
{
"answer": "The color of the tail is maroon.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2402406.jpg",
"index": "sealvqa_vaw_160",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the tail?"
},
{
"from": "gpt",
"value": "The color of the tail is maroon."
}
],
"question": "What is the color of the tail?",
"question_id": "sealvqa_vaw_160",
"search": 1,
"target_instances": [
{
"bbox": [
255.83333333333337,
50.833333333333336,
33.49999999999994,
34.333333333333336
],
"instance_id": "2402406003",
"name": "tail"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the tail?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the activity of the woman is drinking."
}
|
{
"answer": "Yes, the activity of the woman is drinking.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/4472.jpg",
"index": "sealvqa_vaw_163",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the activity of the woman drinking?"
},
{
"from": "gpt",
"value": "Yes, the activity of the woman is drinking."
}
],
"question": "Is the activity of the woman drinking?",
"question_id": "sealvqa_vaw_163",
"search": 1,
"target_instances": [
{
"bbox": [
375.44,
135.20000000000002,
53.00000000000006,
97.71999999999997
],
"instance_id": "4472000",
"name": "woman"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the activity of the woman drinking?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_general
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the fixture is chrome."
}
|
{
"answer": "The material of the fixture is chrome.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2388914.jpg",
"index": "sealvqa_vaw_164",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the fixture?"
},
{
"from": "gpt",
"value": "The material of the fixture is chrome."
}
],
"question": "What is the material of the fixture?",
"question_id": "sealvqa_vaw_164",
"search": 1,
"target_instances": [
{
"bbox": [
21.875,
324.375,
98.50000000000001,
59.125
],
"instance_id": "2388914000",
"name": "fixture"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the fixture?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the car is not white."
}
|
{
"answer": "No, the color of the car is not white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2933.jpg",
"index": "sealvqa_vaw_168",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the car white?"
},
{
"from": "gpt",
"value": "No, the color of the car is not white."
}
],
"question": "Is the color of the car white?",
"question_id": "sealvqa_vaw_168",
"search": 1,
"target_instances": [
{
"bbox": [
254,
448,
39,
44
],
"instance_id": "2933007",
"name": "car"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the car white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shoe is gray."
}
|
{
"answer": "The color of the shoe is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2319837.jpg",
"index": "sealvqa_vaw_171",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shoe?"
},
{
"from": "gpt",
"value": "The color of the shoe is gray."
}
],
"question": "What is the color of the shoe?",
"question_id": "sealvqa_vaw_171",
"search": 1,
"target_instances": [
{
"bbox": [
187.59,
435.12,
28.194999999999993,
8.770000000000039
],
"instance_id": "2319837003",
"name": "shoe"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shoe?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the rock is black."
}
|
{
"answer": "The color of the rock is black.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2388720.jpg",
"index": "sealvqa_vaw_174",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the rock?"
},
{
"from": "gpt",
"value": "The color of the rock is black."
}
],
"question": "What is the color of the rock?",
"question_id": "sealvqa_vaw_174",
"search": 1,
"target_instances": [
{
"bbox": [
127.5,
292.5,
139.33333333333331,
43.5
],
"instance_id": "2388720000",
"name": "rock"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the rock?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the traffic light is green."
}
|
{
"answer": "The color of the traffic light is green.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2374825.jpg",
"index": "sealvqa_vaw_176",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the traffic light?"
},
{
"from": "gpt",
"value": "The color of the traffic light is green."
}
],
"question": "What is the color of the traffic light?",
"question_id": "sealvqa_vaw_176",
"search": 1,
"target_instances": [
{
"bbox": [
8.333333333333334,
40.833333333333336,
39.333333333333336,
11
],
"instance_id": "2374825002",
"name": "traffic light"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the traffic light?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the light is white."
}
|
{
"answer": "The color of the light is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/1101.jpg",
"index": "sealvqa_vaw_178",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the light?"
},
{
"from": "gpt",
"value": "The color of the light is white."
}
],
"question": "What is the color of the light?",
"question_id": "sealvqa_vaw_178",
"search": 1,
"target_instances": [
{
"bbox": [
241.06666666666666,
45.86666666666667,
53.26666666666665,
20.199999999999996
],
"instance_id": "1101008",
"name": "light"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the light?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shirt is tan."
}
|
{
"answer": "The color of the shirt is tan.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2368335.jpg",
"index": "sealvqa_vaw_179",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shirt?"
},
{
"from": "gpt",
"value": "The color of the shirt is tan."
}
],
"question": "What is the color of the shirt?",
"question_id": "sealvqa_vaw_179",
"search": 1,
"target_instances": [
{
"bbox": [
102,
134,
65,
44
],
"instance_id": "2368335007",
"name": "shirt"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shirt?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shirt is red."
}
|
{
"answer": "The color of the shirt is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2321292.jpg",
"index": "sealvqa_vaw_186",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shirt?"
},
{
"from": "gpt",
"value": "The color of the shirt is red."
}
],
"question": "What is the color of the shirt?",
"question_id": "sealvqa_vaw_186",
"search": 1,
"target_instances": [
{
"bbox": [
112.5,
188.33333333333331,
67.66666666666666,
56.00000000000003
],
"instance_id": "2321292001",
"name": "shirt"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shirt?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The activity of the woman is drinking."
}
|
{
"answer": "The activity of the woman is drinking.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/4472.jpg",
"index": "sealvqa_vaw_187",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the activity of the woman?"
},
{
"from": "gpt",
"value": "The activity of the woman is drinking."
}
],
"question": "What is the activity of the woman?",
"question_id": "sealvqa_vaw_187",
"search": 1,
"target_instances": [
{
"bbox": [
375.44,
135.20000000000002,
53.00000000000006,
97.71999999999997
],
"instance_id": "4472000",
"name": "woman"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the activity of the woman?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the car is white."
}
|
{
"answer": "The color of the car is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2379809.jpg",
"index": "sealvqa_vaw_188",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the car?"
},
{
"from": "gpt",
"value": "The color of the car is white."
}
],
"question": "What is the color of the car?",
"question_id": "sealvqa_vaw_188",
"search": 1,
"target_instances": [
{
"bbox": [
356,
18,
89,
54
],
"instance_id": "2379809009",
"name": "car"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the car?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the rock hill is brown."
}
|
{
"answer": "Yes, the color of the rock hill is brown.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2399492.jpg",
"index": "sealvqa_vaw_189",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the rock hill brown?"
},
{
"from": "gpt",
"value": "Yes, the color of the rock hill is brown."
}
],
"question": "Is the color of the rock hill brown?",
"question_id": "sealvqa_vaw_189",
"search": 1,
"target_instances": [
{
"bbox": [
211.66666666666666,
65,
76.83333333333334,
54.33333333333333
],
"instance_id": "2399492008",
"name": "rock hill"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the rock hill brown?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the surfboard is white."
}
|
{
"answer": "Yes, the color of the surfboard is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2410358.jpg",
"index": "sealvqa_vaw_191",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the surfboard white?"
},
{
"from": "gpt",
"value": "Yes, the color of the surfboard is white."
}
],
"question": "Is the color of the surfboard white?",
"question_id": "sealvqa_vaw_191",
"search": 1,
"target_instances": [
{
"bbox": [
266.875,
182.5,
47.25,
31.625
],
"instance_id": "2410358004",
"name": "surfboard"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the surfboard white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the cow is cream colored."
}
|
{
"answer": "Yes, the color of the cow is cream colored.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2384657.jpg",
"index": "sealvqa_vaw_193",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the cow cream colored?"
},
{
"from": "gpt",
"value": "Yes, the color of the cow is cream colored."
}
],
"question": "Is the color of the cow cream colored?",
"question_id": "sealvqa_vaw_193",
"search": 1,
"target_instances": [
{
"bbox": [
133,
135,
126,
53
],
"instance_id": "2384657001",
"name": "cow"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the cow cream colored?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the towel is blue."
}
|
{
"answer": "The color of the towel is blue.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409101.jpg",
"index": "sealvqa_vaw_197",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the towel?"
},
{
"from": "gpt",
"value": "The color of the towel is blue."
}
],
"question": "What is the color of the towel?",
"question_id": "sealvqa_vaw_197",
"search": 1,
"target_instances": [
{
"bbox": [
153,
120,
41,
120
],
"instance_id": "2409101007",
"name": "towel"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the towel?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the pillow is tan."
}
|
{
"answer": "Yes, the color of the pillow is tan.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2036.jpg",
"index": "sealvqa_vaw_200",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the pillow tan?"
},
{
"from": "gpt",
"value": "Yes, the color of the pillow is tan."
}
],
"question": "Is the color of the pillow tan?",
"question_id": "sealvqa_vaw_200",
"search": 1,
"target_instances": [
{
"bbox": [
405,
198,
135,
23
],
"instance_id": "2036007",
"name": "pillow"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the pillow tan?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the lamp is brown."
}
|
{
"answer": "The color of the lamp is brown.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/4924.jpg",
"index": "sealvqa_vaw_203",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lamp?"
},
{
"from": "gpt",
"value": "The color of the lamp is brown."
}
],
"question": "What is the color of the lamp?",
"question_id": "sealvqa_vaw_203",
"search": 1,
"target_instances": [
{
"bbox": [
42.913333333333334,
228.17333333333335,
58.56666666666667,
39.72666666666663
],
"instance_id": "4924001",
"name": "lamp"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the lamp?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the mountains is snowy."
}
|
{
"answer": "The material of the mountains is snowy.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2398915.jpg",
"index": "sealvqa_vaw_204",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the mountains?"
},
{
"from": "gpt",
"value": "The material of the mountains is snowy."
}
],
"question": "What is the material of the mountains?",
"question_id": "sealvqa_vaw_204",
"search": 1,
"target_instances": [
{
"bbox": [
125,
259.16666666666663,
111.83333333333334,
32.666666666666686
],
"instance_id": "2398915001",
"name": "mountains"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the mountains?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the shower is stainless steel."
}
|
{
"answer": "The material of the shower is stainless steel.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2385677.jpg",
"index": "sealvqa_vaw_208",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the shower?"
},
{
"from": "gpt",
"value": "The material of the shower is stainless steel."
}
],
"question": "What is the material of the shower?",
"question_id": "sealvqa_vaw_208",
"search": 1,
"target_instances": [
{
"bbox": [
238.25333333333336,
81.27333333333334,
57.22333333333333,
158.53666666666666
],
"instance_id": "2385677001",
"name": "shower"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the shower?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the top is blue."
}
|
{
"answer": "The color of the top is blue.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409014.jpg",
"index": "sealvqa_vaw_210",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the top?"
},
{
"from": "gpt",
"value": "The color of the top is blue."
}
],
"question": "What is the color of the top?",
"question_id": "sealvqa_vaw_210",
"search": 1,
"target_instances": [
{
"bbox": [
143.125,
216.875,
80.50000000000003,
84.25
],
"instance_id": "2409014000",
"name": "top"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the top?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the box is red."
}
|
{
"answer": "The color of the box is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2376858.jpg",
"index": "sealvqa_vaw_211",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the box?"
},
{
"from": "gpt",
"value": "The color of the box is red."
}
],
"question": "What is the color of the box?",
"question_id": "sealvqa_vaw_211",
"search": 1,
"target_instances": [
{
"bbox": [
0.555,
303.03000000000003,
37.63,
121.99000000000001
],
"instance_id": "2376858001",
"name": "box"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the box?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the bird is red."
}
|
{
"answer": "The color of the bird is red.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2409370.jpg",
"index": "sealvqa_vaw_221",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the bird?"
},
{
"from": "gpt",
"value": "The color of the bird is red."
}
],
"question": "What is the color of the bird?",
"question_id": "sealvqa_vaw_221",
"search": 1,
"target_instances": [
{
"bbox": [
75.465,
225.22500000000002,
87.57999999999998,
82.89999999999998
],
"instance_id": "2409370001",
"name": "bird"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the bird?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the color of the scissors is not orange."
}
|
{
"answer": "No, the color of the scissors is not orange.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2360507.jpg",
"index": "sealvqa_vaw_223",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the scissors orange?"
},
{
"from": "gpt",
"value": "No, the color of the scissors is not orange."
}
],
"question": "Is the color of the scissors orange?",
"question_id": "sealvqa_vaw_223",
"search": 1,
"target_instances": [
{
"bbox": [
50.625,
178.75,
62.875,
21.62499999999997
],
"instance_id": "2360507041",
"name": "scissors"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the scissors orange?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the pants is khaki."
}
|
{
"answer": "The material of the pants is khaki.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2400540.jpg",
"index": "sealvqa_vaw_224",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the pants?"
},
{
"from": "gpt",
"value": "The material of the pants is khaki."
}
],
"question": "What is the material of the pants?",
"question_id": "sealvqa_vaw_224",
"search": 1,
"target_instances": [
{
"bbox": [
225,
190,
59,
70
],
"instance_id": "2400540003",
"name": "pants"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The color of the shoes is gray."
}
|
{
"answer": "The color of the shoes is gray.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2387243.jpg",
"index": "sealvqa_vaw_227",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shoes?"
},
{
"from": "gpt",
"value": "The color of the shoes is gray."
}
],
"question": "What is the color of the shoes?",
"question_id": "sealvqa_vaw_227",
"search": 1,
"target_instances": [
{
"bbox": [
200,
162.5,
38.5,
18.5
],
"instance_id": "2387243001",
"name": "shoes"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the color of the shoes?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "No, the state of the sauce is not full."
}
|
{
"answer": "No, the state of the sauce is not full.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2318649.jpg",
"index": "sealvqa_vaw_235",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the state of the sauce full?"
},
{
"from": "gpt",
"value": "No, the state of the sauce is not full."
}
],
"question": "Is the state of the sauce full?",
"question_id": "sealvqa_vaw_235",
"search": 1,
"target_instances": [
{
"bbox": [
364,
1,
48,
18
],
"instance_id": "2318649006",
"name": "sauce"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the state of the sauce full?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_general
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "The material of the wall is brick."
}
|
{
"answer": "The material of the wall is brick.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2414286.jpg",
"index": "sealvqa_vaw_242",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the wall?"
},
{
"from": "gpt",
"value": "The material of the wall is brick."
}
],
"question": "What is the material of the wall?",
"question_id": "sealvqa_vaw_242",
"search": 1,
"target_instances": [
{
"bbox": [
0.8333333333333334,
39.16666666666667,
59.33333333333333,
82.66666666666666
],
"instance_id": "2414286001",
"name": "wall"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nWhat is the material of the wall?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
|
sealvqa_vaw
|
{
"ground_truth": "Yes, the color of the clouds is white."
}
|
{
"answer": "Yes, the color of the clouds is white.",
"bbox_format": "absolute_xywh",
"bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format",
"conversation_turns": 2,
"data_split": "train",
"dataset_type": "vaw_attribute",
"has_targets": true,
"image_path": "gqa/images/2367134.jpg",
"index": "sealvqa_vaw_243",
"original_conversations": [
{
"from": "human",
"value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the clouds white?"
},
{
"from": "gpt",
"value": "Yes, the color of the clouds is white."
}
],
"question": "Is the color of the clouds white?",
"question_id": "sealvqa_vaw_243",
"search": 1,
"target_instances": [
{
"bbox": [
3.3333333333333335,
0.8333333333333334,
172.66666666666666,
31.000000000000004
],
"instance_id": "2367134001",
"name": "clouds"
}
]
}
|
[
{
"content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>",
"role": "system"
},
{
"content": "<image>\nIs the color of the clouds white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ",
"role": "user"
}
] |
sealvqa_vaw_attribute
|
visual_toolbox_v2
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 26