|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import os |
|
|
from typing import Dict, Iterable, Iterator, List, Tuple, Optional |
|
|
from tqdm.auto import tqdm |
|
|
|
|
|
from datasets import Dataset, DatasetDict, Features, Sequence, Value, ClassLabel, disable_progress_bars |
|
|
|
|
|
disable_progress_bars() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_ELUT_FILES = [ |
|
|
"CB660_neural_stem_cell_IEX_Wan_2015.elut", |
|
|
"G166_glioma_stem_cell_IEX_Wan_2015_Hs_HCW_2.elut", |
|
|
"G166_glioma_stem_cell_IEX_Wan_2015_Hs_HCW_3.elut", |
|
|
"IEX_Wan_2015_Hs_IEX_1.elut", |
|
|
"IEX_Wan_2015_Hs_IEX_2.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_4.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_5.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_6.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_7.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_8.elut", |
|
|
"IEX_Wan_2015_Hs_HCW_9.elut", |
|
|
"HEK_293_T_cells_SEC_Mallam_2019_C1.elut", |
|
|
"HEK_293_T_cells_SEC_Mallam_2019_C2.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_IEX_Moutaoufik_2019_2_R1.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_IEX_Moutaoufik_2019_2_R2.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_IEX_Moutaoufik_2019_R1.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_IEX_Moutaoufik_2019_R2.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_SEC_Moutaoufik_2019_2_R1.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_SEC_Moutaoufik_2019_2_R2.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_SEC_Moutaoufik_2019_R1.elut", |
|
|
"NTera2_embryonal_carcinoma_stem_cells_SEC_Moutaoufik_2019_R2.elut", |
|
|
"T98G_glioblastoma_multiforme_cells_SEC_Conelly_2018_Bio1.elut", |
|
|
"T98G_glioblastoma_multiforme_cells_SEC_Conelly_2018_Bio2.elut", |
|
|
"U2OS_cells_SEC_Kirkwood_2013_rep1.elut", |
|
|
"U2OS_cells_SEC_Kirkwood_2013_rep2.elut", |
|
|
"U2OS_cells_SEC_Kirkwood_2013_rep3.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PT3281S1.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PT3441S1.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PT3442S1.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PT3701S1.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PTSS3801.elut", |
|
|
"U2OS_cells_SEC_Larance_2016_PTSS3802.elut", |
|
|
] |
|
|
|
|
|
|
|
|
DEFAULT_PAIR_FILES = { |
|
|
|
|
|
"train": ( |
|
|
"intact_complex_merge_20230309.train_ppis.txt", |
|
|
"intact_complex_merge_20230309.neg_train_ppis.txt", |
|
|
), |
|
|
"test": ( |
|
|
"intact_complex_merge_20230309.test_ppis.txt", |
|
|
"intact_complex_merge_20230309.neg_test_ppis.txt", |
|
|
), |
|
|
} |
|
|
|
|
|
|
|
|
def read_elut_table(path: str) -> Tuple[str, List[str], Dict[str, List[int]]]: |
|
|
experiment_id = os.path.splitext(os.path.basename(path))[0] |
|
|
table: Dict[str, List[int]] = {} |
|
|
|
|
|
with open(path, "r", encoding="utf-8", errors="ignore") as f: |
|
|
first = f.readline().rstrip("\n\r") |
|
|
if not first: |
|
|
return experiment_id, [], {} |
|
|
cols = first.split("\t") |
|
|
fraction_names = cols[1:] |
|
|
|
|
|
for line in f: |
|
|
line = line.rstrip("\n\r") |
|
|
if not line: |
|
|
continue |
|
|
toks = line.split("\t") |
|
|
if not toks: |
|
|
continue |
|
|
uniprot_id = toks[0] |
|
|
trace: Li8st[int] = [] |
|
|
for x in toks[1:]: |
|
|
if x == "" or x is None: |
|
|
trace.append(0) |
|
|
else: |
|
|
try: |
|
|
trace.append(int(float(x))) |
|
|
except ValueError: |
|
|
trace.append(0) |
|
|
table[uniprot_id] = trace |
|
|
|
|
|
return experiment_id, fraction_names, table |
|
|
|
|
|
|
|
|
def read_pair_file(path: str) -> Iterable[Tuple[str, str]]: |
|
|
with open(path, "r", encoding="utf-8", errors="ignore") as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line or line.startswith("#"): |
|
|
continue |
|
|
parts = line.split() |
|
|
if len(parts) != 2: |
|
|
continue |
|
|
yield parts[0], parts[1] |
|
|
|
|
|
|
|
|
def build_proteins_dataset(elut_files: List[str]) -> Dataset: |
|
|
features = Features( |
|
|
{ |
|
|
"experiment_id": Value("string"), |
|
|
"uniprot_id": Value("string"), |
|
|
"fraction_names": Sequence(Value("string")), |
|
|
"trace": Sequence(Value("int32")), |
|
|
} |
|
|
) |
|
|
|
|
|
def gen() -> Iterator[dict]: |
|
|
with tqdm(total=None, unit="ex", |
|
|
desc="proteins: examples") as pbar: |
|
|
for elut_path in elut_files: |
|
|
exp_id, frac_names, table = read_elut_table(elut_path) |
|
|
for uid, trace in tqdm(table.items(), |
|
|
desc=f"{os.path.basename(elut_path)}", |
|
|
leave=False): |
|
|
yield { |
|
|
"experiment_id": exp_id, |
|
|
"uniprot_id": uid, |
|
|
"fraction_names": frac_names, |
|
|
"trace": trace, |
|
|
} |
|
|
pbar.update(1) |
|
|
return Dataset.from_generator(gen, features=features) |
|
|
|
|
|
|
|
|
def build_pairs_dataset( |
|
|
elut_files: List[str], |
|
|
split_posneg: Dict[str, Tuple[List[str], List[str]]] |
|
|
) -> DatasetDict: |
|
|
features = Features( |
|
|
{ |
|
|
"experiment_id": Value("string"), |
|
|
"uniprot_id1": Value("string"), |
|
|
"uniprot_id2": Value("string"), |
|
|
"elut_trace1": Sequence(Value("int32")), |
|
|
"elut_trace2": Sequence(Value("int32")), |
|
|
"label": ClassLabel(names=["neg", "pos"]), |
|
|
} |
|
|
) |
|
|
|
|
|
elut_tables: Dict[str, Tuple[List[str], Dict[str, List[int]]]] = {} |
|
|
for elut_path in tqdm(elut_files, desc="pairs: elution files"): |
|
|
try: |
|
|
exp_id, frac_names, table = read_elut_table(elut_path) |
|
|
elut_tables[exp_id] = (frac_names, table) |
|
|
except Exception as e: |
|
|
tqdm.write(f"WARNING: Failed to read {elut_path}: {e}") |
|
|
|
|
|
def make_split_gen(split: str, pos_files: List[str], neg_files: List[str]) -> Iterator[dict]: |
|
|
with tqdm(unit="ex", desc=f"{split}: examples") as pbar: |
|
|
def emit_for_pair(a: str, b: str, label: str) -> Iterator[dict]: |
|
|
for exp_id, (_fracs, table) in elut_tables.items(): |
|
|
ta = table.get(a) |
|
|
tb = table.get(b) |
|
|
if ta is None or tb is None: |
|
|
continue |
|
|
yield { |
|
|
"experiment_id": exp_id, |
|
|
"uniprot_id1": a, |
|
|
"uniprot_id2": b, |
|
|
"elut_trace1": ta, |
|
|
"elut_trace2": tb, |
|
|
"label": label, |
|
|
} |
|
|
pbar.update(1) |
|
|
|
|
|
|
|
|
for pf in pos_files: |
|
|
|
|
|
for a, b in read_pair_file(pf): |
|
|
yield from emit_for_pair(a, b, "pos") |
|
|
|
|
|
|
|
|
for nf in neg_files: |
|
|
|
|
|
for a, b in read_pair_file(nf): |
|
|
yield from emit_for_pair(a, b, "neg") |
|
|
|
|
|
dataset_dict = {} |
|
|
for split, (pos_paths, neg_paths) in split_posneg.items(): |
|
|
dataset_dict[split] = Dataset.from_generator( |
|
|
make_split_gen, |
|
|
gen_kwargs={"split": split, "pos_files": pos_paths, "neg_files": neg_paths}, |
|
|
features=features, |
|
|
) |
|
|
return DatasetDict(dataset_dict) |
|
|
|
|
|
|
|
|
def existing(paths: List[str]) -> List[str]: |
|
|
return [p for p in paths if p and os.path.exists(p)] |
|
|
|
|
|
|
|
|
def resolve_elut_files(base_dir: str, user_elut: Optional[List[str]]): |
|
|
if user_elut: |
|
|
return existing([os.path.abspath(p) for p in user_elut]) |
|
|
|
|
|
candidates = [os.path.join(base_dir, f) for f in DEFAULT_ELUT_FILES] |
|
|
files = existing(candidates) |
|
|
if not files: |
|
|
raise FileNotFoundError("No .elut files found. Provide --elut or place defaults under --base_dir") |
|
|
return files |
|
|
|
|
|
|
|
|
def resolve_pair_files( |
|
|
base_dir: str, |
|
|
train_pos: Optional[List[str]], |
|
|
train_neg: Optional[List[str]], |
|
|
test_pos: Optional[List[str]], |
|
|
test_neg: Optional[List[str]] |
|
|
) -> Dict[str, Tuple[List[str], List[str]]]: |
|
|
out = {} |
|
|
if any([train_pos, train_neg, test_pos, test_neg]): |
|
|
if train_pos or train_neg: |
|
|
out["train"] = ( |
|
|
existing([os.path.abspath(p) for p in train_pos or []]), |
|
|
existing([os.path.abspath(p) for p in train_neg or []]) |
|
|
) |
|
|
if test_pos or test_neg: |
|
|
out["test"] = ( |
|
|
existing([os.path.abspath(p) for p in test_pos or []]), |
|
|
existing([os.path.abspath(p) for p in test_neg or []]) |
|
|
) |
|
|
else: |
|
|
|
|
|
for split, (pos_path, neg_path) in DEFAULT_PAIR_FILES.items(): |
|
|
out[split] = ( |
|
|
existing([os.path.join(base_dir, pos_path)]), |
|
|
existing([os.path.join(base_dir, neg_path)]) |
|
|
) |
|
|
if not out: |
|
|
raise ValueError("No pair files specified for 'pairs'.") |
|
|
return out |
|
|
|
|
|
|
|
|
def main(): |
|
|
ap = argparse.ArgumentParser( |
|
|
description="Build CF-MS dataset from .elut tables and .txt files. Uses both 'pairs' and 'proteins' configurations." |
|
|
) |
|
|
ap.add_argument("--view", choices=["proteins", "pairs"], default="pairs", |
|
|
help="Dataset configuration to build (default: pairs).") |
|
|
ap.add_argument("--base_dir", type=str, default=os.getcwd(), |
|
|
help="Directory to search for default files if paths not provided via '--elut' or --{test/train}_{pos/neg}") |
|
|
ap.add_argument("--elut", type=str, nargs="*", |
|
|
help="Path(s) to .elut files. If omitted, defaults under '--base_dir' are used.") |
|
|
ap.add_argument("--train_pos", type=str, nargs="*", |
|
|
help="Path(s) to positive TRAINING pairs .txt") |
|
|
ap.add_argument("--train_neg", type=str, nargs="*", |
|
|
help="Path(s) to negative TRAINING pairs .txt") |
|
|
ap.add_argument("--test_pos", type=str, nargs="*", |
|
|
help="Path(s) to positive TEST pairs .txt") |
|
|
ap.add_argument("--test_neg", type=str, nargs="*", |
|
|
help="Path(s) to positive TEST pairs .txt") |
|
|
ap.add_argument("--out_dir", type=str, default=os.getcwd(), |
|
|
help="Output directory for dataset .parquet files") |
|
|
args = ap.parse_args() |
|
|
|
|
|
base_dir = os.path.abspath(args.base_dir) |
|
|
os.makedirs(args.out_dir, exist_ok=True) |
|
|
|
|
|
elut_files = resolve_elut_files(base_dir, args.elut) |
|
|
|
|
|
|
|
|
if args.view == "proteins": |
|
|
ds = build_proteins_dataset(elut_files) |
|
|
|
|
|
|
|
|
ds.to_parquet(os.path.join(args.out_dir, "proteins_indiv_elut.parquet")) |
|
|
print(f"[ok] proteins dataset saved to {args.out_dir}") |
|
|
else: |
|
|
split_posneg = resolve_pair_files( |
|
|
base_dir, |
|
|
args.train_pos, args.train_neg, |
|
|
args.test_pos, args.test_neg |
|
|
) |
|
|
dataset_dict = build_pairs_dataset(elut_files, split_posneg) |
|
|
ds_train = dataset_dict["train"] |
|
|
ds_test = dataset_dict["test"] |
|
|
ds_train.to_parquet(os.path.join(args.out_dir, "pairs_elut_TRAIN.parquet")) |
|
|
ds_test.to_parquet(os.path.join(args.out_dir, "pairs_elut_TEST.parquet")) |
|
|
|
|
|
for split, ds in dataset_dict.items(): |
|
|
print(f"[ok] {split}: {len(ds):,} examples") |
|
|
print(f"[ok] pairs dataset saved to {args.out_dir}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|