{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!git clone https://github.com/google-research/google-research.git\n", "!pip install tensorflow" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Download images" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "CgoFvmqCtps3", "outputId": "64381a1a-5e3d-4ce4-92dd-6a11e43f61ed" }, "outputs": [], "source": "# -*- coding: utf-8 -*-\nimport io\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport tensorflow as tf\nfrom PIL import Image\n\n# =========================\n# TF.Feature -> Python value conversion\n# =========================\ndef feature_to_list_bytes(f: tf.train.Feature) -> List[bytes]:\n return list(f.bytes_list.value)\n\ndef feature_to_list_int(f: tf.train.Feature) -> List[int]:\n return list(f.int64_list.value)\n\ndef feature_to_list_float(f: tf.train.Feature) -> List[float]:\n return list(f.float_list.value)\n\ndef get_feature(example: tf.train.Example, key: str) -> Optional[tf.train.Feature]:\n fmap = example.features.feature\n return fmap[key] if key in fmap else None\n\n# =========================\n# Extract coordinates from action (supports both normalized/pixel)\n# =========================\ndef extract_points_from_action(\n action: Dict[str, Any],\n img_w: int,\n img_h: int\n) -> List[Tuple[int, int]]:\n \"\"\"\n Returns: [(x_px, y_px), ...]\n - Single point (tap) returns length 1\n - Drag/swipe returns 2 points (start/end)\n - Empty list if no points\n \"\"\"\n pts: List[Tuple[int, int]] = []\n\n def to_px(x: float, y: float, normalized: Optional[bool]=None) -> Tuple[int,int]:\n if normalized is None:\n normalized = (0.0 <= x <= 1.0 and 0.0 <= y <= 1.0)\n if normalized:\n return (int(round(x * img_w)), int(round(y * img_h)))\n else:\n return (int(round(x)), int(round(y)))\n\n # 1) Top-level x, y\n if \"x\" in action and \"y\" in action:\n pts.append(to_px(float(action[\"x\"]), float(action[\"y\"]), None))\n\n # 2) point / click / tap / press / long_press / long_tap\n for k in [\"point\", \"click\", \"tap\", \"press\", \"long_press\", \"long_tap\"]:\n if k in action and isinstance(action[k], dict):\n px = action[k]\n if \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n if k in action and isinstance(action[k], list):\n for px in action[k]:\n if isinstance(px, dict) and \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n\n # 3) from/to, start/end\n for a, b in [(\"from\", \"to\"), (\"start\", \"end\")]:\n if a in action and b in action and isinstance(action[a], dict) and isinstance(action[b], dict):\n ax, ay = action[a].get(\"x\"), action[a].get(\"y\")\n bx, by = action[b].get(\"x\"), action[b].get(\"y\")\n if ax is not None and ay is not None and bx is not None and by is not None:\n pts.append(to_px(float(ax), float(ay), None))\n pts.append(to_px(float(bx), float(by), None))\n\n # 4) start_x/start_y/end_x/end_y\n cand = {\"start_x\": None, \"start_y\": None, \"end_x\": None, \"end_y\": None}\n found = False\n for ck in cand.keys():\n if ck in action:\n cand[ck] = float(action[ck])\n found = True\n if found and cand[\"start_x\"] is not None and cand[\"start_y\"] is not None:\n pts.append(to_px(cand[\"start_x\"], cand[\"start_y\"], None))\n if cand[\"end_x\"] is not None and cand[\"end_y\"] is not None:\n pts.append(to_px(cand[\"end_x\"], cand[\"end_y\"], None))\n\n # Remove duplicates\n uniq: List[Tuple[int,int]] = []\n seen = set()\n for p in pts:\n if p not in seen:\n uniq.append(p)\n seen.add(p)\n return uniq\n\n# =========================\n# Parse episode from TF Example\n# =========================\ndef load_episode_from_example(ex: tf.train.Example) -> Dict[str, Any]:\n f = ex.features.feature\n\n screenshots_bytes = feature_to_list_bytes(f[\"screenshots\"])\n a11y_bytes_list = feature_to_list_bytes(f[\"accessibility_trees\"])\n widths = feature_to_list_int(f[\"screenshot_widths\"])\n heights = feature_to_list_int(f[\"screenshot_heights\"])\n\n actions_json_list = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"actions\"])]\n step_insts = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"step_instructions\"])]\n actions = [json.loads(s) for s in actions_json_list]\n\n goal = f[\"goal\"].bytes_list.value[0].decode(\"utf-8\")\n episode_id = int(f[\"episode_id\"].int64_list.value[0]) if f[\"episode_id\"].int64_list.value else int(\n f[\"episode_id\"].bytes_list.value[0].decode(\"utf-8\")\n )\n\n assert len(screenshots_bytes) == len(widths) == len(heights), \"screenshot/width/height length mismatch\"\n assert len(actions) == len(step_insts) == (len(screenshots_bytes) - 1), \\\n \"actions/step_instructions must equal screenshots-1\"\n\n return {\n \"episode_id\": episode_id,\n \"goal\": goal,\n \"screenshots\": screenshots_bytes,\n \"a11y\": a11y_bytes_list,\n \"widths\": widths,\n \"heights\": heights,\n \"actions\": actions,\n \"step_instructions\": step_insts,\n }\n\n# =========================\n# Action mapping & utilities\n# =========================\ndef _center_xy(w: int, h: int) -> Tuple[int,int]:\n return (int(round(w/2)), int(round(h/2)))\n\ndef _norm_dir(d: Optional[str]) -> str:\n if not d: return \"down\"\n d = str(d).lower()\n if d in [\"up\",\"down\",\"left\",\"right\"]:\n return d\n if d in [\"u\",\"top\"]: return \"up\"\n if d in [\"d\",\"bottom\"]: return \"down\"\n if d in [\"l\"]: return \"left\"\n if d in [\"r\"]: return \"right\"\n return \"down\"\n\ndef map_action(\n action: Dict[str, Any],\n w: int,\n h: int,\n pts: List[Tuple[int,int]],\n) -> Optional[Dict[str, Any]]:\n \"\"\"\n Allowed mappings:\n click -> {\"type\": \"touch\", \"x\": , \"y\": }\n long_press -> {\"type\": \"long_touch\", \"x\": , \"y\": }\n input_text -> {\"type\": \"set_text\", \"text\": \"...\", \"x\": , \"y\": }\n scroll -> {\"type\": \"scroll\", \"direction\": \"up|down|left|right\", \"x\": , \"y\": }\n navigate_home -> {\"type\": \"press\", \"key\": \"home\"}\n navigate_back -> {\"type\": \"press\", \"key\": \"back\"}\n \"\"\"\n atype = (action.get(\"action_type\") or action.get(\"type\") or action.get(\"action\") or \"\").lower()\n x, y = (pts[0] if pts else _center_xy(w, h))\n\n if atype in [\"click\", \"tap\", \"press\", \"click_view\"]:\n return {\"type\": \"touch\", \"x\": x, \"y\": y}\n\n if atype in [\"long_press\", \"long_tap\", \"long_click\"]:\n return {\"type\": \"long_touch\", \"x\": x, \"y\": y}\n\n if atype in [\"input_text\", \"set_text\", \"type_text\", \"enter_text\", \"text\"]:\n text = action.get(\"text\") or action.get(\"input_text\") or action.get(\"value\") or \"\"\n return {\"type\": \"set_text\", \"text\": str(text), \"x\": x, \"y\": y}\n\n if atype in [\"scroll\", \"swipe\"]:\n if len(pts) >= 2:\n cx = (pts[0][0] + pts[1][0]) // 2\n cy = (pts[0][1] + pts[1][1]) // 2\n else:\n cx, cy = _center_xy(w, h)\n return {\"type\": \"scroll\", \"direction\": _norm_dir(action.get(\"direction\")), \"x\": cx, \"y\": cy}\n\n if atype in [\"navigate_home\", \"home\", \"press_home\"]:\n return {\"type\": \"press\", \"key\": \"home\"}\n\n if atype in [\"navigate_back\", \"back\", \"press_back\"]:\n return {\"type\": \"press\", \"key\": \"back\"}\n\n # Others (open_app, wait, etc.) → skip saving\n return None\n\ndef save_clean_image(img_bytes: bytes, episode_id: int, step_idx: int, base_dir: str = \"and_ctrl\") -> str:\n \"\"\"\n Save image as: out_episode_{EP}_step_{STEP:03d}.png (without overlay)\n \"\"\"\n Path(base_dir).mkdir(parents=True, exist_ok=True)\n fname = f\"out_episode_{episode_id}_step_{step_idx:03d}.png\"\n fpath = Path(base_dir) / fname\n Image.open(io.BytesIO(img_bytes)).convert(\"RGB\").save(fpath)\n # Return just the relative path from base_dir\n return f\"{base_dir}/{fname}\"\n\n# =========================\n# Export messages to JSON\n# =========================\ndef export_messages(ds, limit_episodes: int = 5, out_json: str = \"and_ctrl.json\", image_dir: str = \"and_ctrl\"):\n \"\"\"\n Extract allowed actions from first N episodes in TFRecordDataset\n and save them in request format to and_ctrl.json.\n \"\"\"\n all_items: List[Dict[str, Any]] = []\n ep_cnt = 0\n\n for raw in ds:\n ex = tf.train.Example()\n ex.ParseFromString(raw.numpy())\n ep = load_episode_from_example(ex)\n\n ep_id = ep[\"episode_id\"]\n for i, (action, inst) in enumerate(zip(ep[\"actions\"], ep[\"step_instructions\"])):\n w, h = ep[\"widths\"][i], ep[\"heights\"][i]\n img_bytes = ep[\"screenshots\"][i]\n pts = extract_points_from_action(action, w, h)\n mapped = map_action(action, w, h, pts)\n if not mapped:\n continue # Skip\n\n img_path = save_clean_image(img_bytes, ep_id, i, base_dir=image_dir)\n\n all_items.append({\n \"messages\": [\n {\"role\": \"user\", \"content\": f\"\\n{inst}\"},\n # Save as Python dict string (single quotes), not JSON\n {\"role\": \"assistant\", \"content\": str(mapped)}\n ],\n \"images\": [img_path]\n })\n\n ep_cnt += 1\n if ep_cnt >= limit_episodes:\n break\n\n with open(out_json, \"w\", encoding=\"utf-8\") as f:\n json.dump(all_items, f, ensure_ascii=False, indent=2)\n\n print(f\"[DONE] episodes processed: {ep_cnt}, items saved: {len(all_items)} → {out_json}\")\n\n# =========================\n# Main entry point\n# =========================\ndef main():\n # Adjust path pattern if needed\n filenames = tf.io.gfile.glob('gs://gresearch/android_control/android_control*')\n ds = tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n export_messages(ds, limit_episodes=50, out_json=\"and_ctrl.json\", image_dir=\"and_ctrl\")\n\nif __name__ == \"__main__\":\n main()" }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "9wgmlG4CxlkL", "outputId": "8dfce9bd-7003-4637-98ad-6843f2aeb9f9" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "46.5 GiB gs://gresearch/android_control/android_control*\n" ] } ], "source": [ "!gsutil du -sh gs://gresearch/android_control/android_control*\n" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "T4", "provenance": [] }, "kernelspec": { "display_name": "and_ctrl", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.11" } }, "nbformat": 4, "nbformat_minor": 0 }