Datasets:

Modalities:
Text
Formats:
webdataset
ArXiv:
Libraries:
Datasets
WebDataset
License:
juannat7 commited on
Commit
33d09a9
·
verified ·
1 Parent(s): acd6154

Upload /process_causaldynamics.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. process_causaldynamics.py +43 -36
process_causaldynamics.py CHANGED
@@ -1,49 +1,56 @@
1
  import os
2
- import glob
3
  import tarfile
 
 
4
 
5
- def extract_archives(shards_dir: str, target_base: str):
6
- """
7
- Extract all .tar.gz archives in `shards_dir` into `target_base`, preserving internal paths.
8
-
9
- Args:
10
- shards_dir (str): Directory containing .tar.gz files (e.g., "inputs" or "outputs").
11
- target_base (str): Base directory to extract archives into (e.g., "data").
12
- """
13
- if not os.path.isdir(shards_dir):
14
- print(f"Directory '{shards_dir}' not found. Skipping.")
15
- return
16
-
17
- archives = sorted(glob.glob(os.path.join(shards_dir, "*.tar.gz")))
18
- if not archives:
19
- print(f"No archives found in '{shards_dir}'.")
20
  return
21
-
22
- for arch in archives:
23
- print(f"Extracting '{arch}' into '{target_base}'...")
24
- with tarfile.open(arch, 'r:gz') as tar:
25
- tar.extractall(path=target_base)
26
- print(f"Done extracting archives from '{shards_dir}'.\n")
27
-
 
 
 
 
 
 
 
28
 
29
  def main():
30
- """
31
- Download and extract CausalDynamics data from HuggingFace
32
-
33
- Example usage:
34
- python process_causaldynamics.py
35
- """
36
-
37
- # Base directory for downloaded shards
38
  base_dir = os.getcwd()
39
- # Directory to place extracted files
40
  target_base = os.path.join(base_dir, "data")
41
  os.makedirs(target_base, exist_ok=True)
42
 
43
- # Extract data tarballs (inputs)
44
- extract_archives(os.path.join(base_dir, "inputs"), target_base)
45
- # Extract eval tarballs (outputs)
46
- extract_archives(os.path.join(base_dir, "outputs"), target_base)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  if __name__ == "__main__":
49
  main()
 
 
1
  import os
 
2
  import tarfile
3
+ import shutil
4
+ import requests
5
 
6
+ def download_file(url: str, dest: str):
7
+ """Download a file from a URL to a destination path."""
8
+ if os.path.exists(dest):
9
+ print(f"File '{dest}' already exists. Skipping download.")
 
 
 
 
 
 
 
 
 
 
 
10
  return
11
+ print(f"Downloading {url} → {dest}")
12
+ response = requests.get(url, stream=True)
13
+ response.raise_for_status()
14
+ with open(dest, 'wb') as f:
15
+ for chunk in response.iter_content(chunk_size=8192):
16
+ f.write(chunk)
17
+ print(f"Downloaded: {dest}")
18
+
19
+ def extract_tar_gz(filepath: str, target_dir: str):
20
+ """Extract a .tar.gz archive to a target directory."""
21
+ print(f"Extracting {filepath} → {target_dir}")
22
+ with tarfile.open(filepath, "r:gz") as tar:
23
+ tar.extractall(path=target_dir)
24
+ print(f"Extracted: {filepath}")
25
 
26
  def main():
27
+ base_url = "https://huggingface.co/datasets/kausable/CausalDynamics/resolve/main"
28
+ subsets = ["climate", "simple", "coupled"]
29
+ stages = ["inputs", "outputs"]
30
+
 
 
 
 
31
  base_dir = os.getcwd()
 
32
  target_base = os.path.join(base_dir, "data")
33
  os.makedirs(target_base, exist_ok=True)
34
 
35
+ for stage in stages:
36
+ stage_dir = os.path.join(base_dir, stage)
37
+ os.makedirs(stage_dir, exist_ok=True)
38
+
39
+ for subset in subsets:
40
+ filename = f"{subset}.tar.gz"
41
+ url = f"{base_url}/{stage}/{filename}"
42
+ dest_path = os.path.join(stage_dir, filename)
43
+
44
+ download_file(url, dest_path)
45
+ extract_tar_gz(dest_path, target_base)
46
+
47
+ # Remove downloaded input/output directories
48
+ for stage in stages:
49
+ dir_to_remove = os.path.join(base_dir, stage)
50
+ if os.path.isdir(dir_to_remove):
51
+ shutil.rmtree(dir_to_remove)
52
+ print(f"Removed: {dir_to_remove}")
53
 
54
  if __name__ == "__main__":
55
  main()
56
+