Datasets:
Sricharan Reddy Varra
commited on
Commit
·
d0916a6
1
Parent(s):
9fd97a0
updates to downloading, dataset updates
Browse files
ark_example.py
CHANGED
|
@@ -21,6 +21,8 @@ import os
|
|
| 21 |
import datasets
|
| 22 |
import pathlib
|
| 23 |
import glob
|
|
|
|
|
|
|
| 24 |
|
| 25 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 26 |
_CITATION = """\
|
|
@@ -48,17 +50,17 @@ _LICENSE = "https://github.com/angelolab/ark-analysis/blob/main/LICENSE"
|
|
| 48 |
|
| 49 |
|
| 50 |
_URL_DATA = {
|
| 51 |
-
"
|
| 52 |
-
"
|
| 53 |
-
"
|
| 54 |
}
|
| 55 |
|
| 56 |
_URL_DATASET_CONFIGS = {
|
| 57 |
-
"nb1": {"
|
| 58 |
"nb2": {
|
| 59 |
-
"
|
| 60 |
-
"
|
| 61 |
-
"
|
| 62 |
},
|
| 63 |
}
|
| 64 |
|
|
@@ -112,12 +114,12 @@ class ArkExample(datasets.GeneratorBasedBuilder):
|
|
| 112 |
|
| 113 |
def _info(self):
|
| 114 |
# This is the name of the configuration selected in BUILDER_CONFIGS above
|
| 115 |
-
if self.config.name
|
| 116 |
-
features = datasets.Features(
|
| 117 |
-
|
| 118 |
-
|
| 119 |
else:
|
| 120 |
-
|
| 121 |
return datasets.DatasetInfo(
|
| 122 |
# This is the description that will appear on the datasets page.
|
| 123 |
description=_DESCRIPTION,
|
|
@@ -142,29 +144,19 @@ class ArkExample(datasets.GeneratorBasedBuilder):
|
|
| 142 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 143 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 144 |
urls = _URL_DATASET_CONFIGS[self.config.name]
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
return [
|
| 148 |
datasets.SplitGenerator(
|
| 149 |
name=self.config.name,
|
| 150 |
# These kwargs will be passed to _generate_examples
|
| 151 |
-
gen_kwargs={"
|
| 152 |
),
|
| 153 |
]
|
| 154 |
|
| 155 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 156 |
-
def _generate_examples(self,
|
| 157 |
-
|
| 158 |
-
# Get all TMA paths
|
| 159 |
-
file_paths = list(pathlib.Path(filepath / "input_data").glob("*"))
|
| 160 |
-
|
| 161 |
-
# Loop over all the TMAs
|
| 162 |
-
for fp in file_paths:
|
| 163 |
-
|
| 164 |
-
# Get the file Name
|
| 165 |
-
fn = fp.stem
|
| 166 |
-
|
| 167 |
-
if self.config.name == "fovs":
|
| 168 |
-
yield fn, {
|
| 169 |
-
"Data Path": filepath.as_posix(),
|
| 170 |
-
}
|
|
|
|
| 21 |
import datasets
|
| 22 |
import pathlib
|
| 23 |
import glob
|
| 24 |
+
import pyarrow
|
| 25 |
+
import pprint
|
| 26 |
|
| 27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 28 |
_CITATION = """\
|
|
|
|
| 50 |
|
| 51 |
|
| 52 |
_URL_DATA = {
|
| 53 |
+
"image_data": "./data/image_data.zip",
|
| 54 |
+
"cell_table": "./data/segmentation/cell_table.zip",
|
| 55 |
+
"deepcell_output": "./data/segmentation/deepcell_output.zip",
|
| 56 |
}
|
| 57 |
|
| 58 |
_URL_DATASET_CONFIGS = {
|
| 59 |
+
"nb1": {"image_data": _URL_DATA["image_data"]},
|
| 60 |
"nb2": {
|
| 61 |
+
"image_data": _URL_DATA["image_data"],
|
| 62 |
+
"cell_table": _URL_DATA["cell_table"],
|
| 63 |
+
"deepcell_output": _URL_DATA["deepcell_output"],
|
| 64 |
},
|
| 65 |
}
|
| 66 |
|
|
|
|
| 114 |
|
| 115 |
def _info(self):
|
| 116 |
# This is the name of the configuration selected in BUILDER_CONFIGS above
|
| 117 |
+
if self.config.name in ["nb1", "nb2", "nb3", "nb4"]:
|
| 118 |
+
features = datasets.Features(
|
| 119 |
+
{f: datasets.Value("string") for f in _URL_DATASET_CONFIGS[self.config.name].keys()}
|
| 120 |
+
)
|
| 121 |
else:
|
| 122 |
+
ValueError("dataset name is incorrect.")
|
| 123 |
return datasets.DatasetInfo(
|
| 124 |
# This is the description that will appear on the datasets page.
|
| 125 |
description=_DESCRIPTION,
|
|
|
|
| 144 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 145 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 146 |
urls = _URL_DATASET_CONFIGS[self.config.name]
|
| 147 |
+
data_dirs = {}
|
| 148 |
+
for data_name, url in urls.items():
|
| 149 |
+
dl_path = pathlib.Path(dl_manager.download_and_extract(url))
|
| 150 |
+
data_dirs[data_name] = dl_path
|
| 151 |
|
| 152 |
return [
|
| 153 |
datasets.SplitGenerator(
|
| 154 |
name=self.config.name,
|
| 155 |
# These kwargs will be passed to _generate_examples
|
| 156 |
+
gen_kwargs={"dataset_paths": data_dirs},
|
| 157 |
),
|
| 158 |
]
|
| 159 |
|
| 160 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 161 |
+
def _generate_examples(self, dataset_paths: dict[str, pathlib.Path]):
|
| 162 |
+
yield self.config.name, dataset_paths
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/{input_data.zip → image_data.zip}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 400326580
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed9e347c43a846d4fb795d5c139fbb9dd3b1dc112df39a8ff90a79455b8d1420
|
| 3 |
size 400326580
|
data/segmentation/deepcell_output.zip
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 916593
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1de1fc3a72b500f2862d194b1f2c832af9097cb5139c35d1c5fdda1fc7178473
|
| 3 |
size 916593
|