MrLight commited on
Commit
5ff8348
·
verified ·
1 Parent(s): 153491a

Delete msmarco-passage.py

Browse files
Files changed (1) hide show
  1. msmarco-passage.py +0 -105
msmarco-passage.py DELETED
@@ -1,105 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.Wikipedia
15
-
16
- # Lint as: python3
17
- """MsMarco Passage dataset."""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @misc{bajaj2018ms,
25
- title={MS MARCO: A Human Generated MAchine Reading COmprehension Dataset},
26
- author={Payal Bajaj and Daniel Campos and Nick Craswell and Li Deng and Jianfeng Gao and Xiaodong Liu
27
- and Rangan Majumder and Andrew McNamara and Bhaskar Mitra and Tri Nguyen and Mir Rosenberg and Xia Song
28
- and Alina Stoica and Saurabh Tiwary and Tong Wang},
29
- year={2018},
30
- eprint={1611.09268},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CL}
33
- }
34
- """
35
-
36
- _DESCRIPTION = "dataset load script for MSMARCO Passage"
37
-
38
- _DATASET_URLS = {
39
- 'train': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/train.jsonl.gz",
40
- #'train': "https://www.dropbox.com/s/seqqbu90jopvtq5/msmarco_passage_train.json",
41
- 'dev': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dev.jsonl.gz",
42
- 'dl19': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dl19.jsonl.gz",
43
- 'dl20': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dl20.jsonl.gz",
44
- }
45
-
46
-
47
- class MsMarcoPassage(datasets.GeneratorBasedBuilder):
48
- VERSION = datasets.Version("0.0.1")
49
-
50
- BUILDER_CONFIGS = [
51
- datasets.BuilderConfig(version=VERSION,
52
- description="MS MARCO passage train/dev datasets"),
53
- ]
54
-
55
- def _info(self):
56
- features = datasets.Features({
57
- 'query_id': datasets.Value('string'),
58
- 'query': datasets.Value('string'),
59
- 'positive_passages': [
60
- {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
61
- ],
62
- 'negative_passages': [
63
- {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
64
- ],
65
- })
66
- return datasets.DatasetInfo(
67
- # This is the description that will appear on the datasets page.
68
- description=_DESCRIPTION,
69
- # This defines the different columns of the dataset and their types
70
- features=features, # Here we define them above because they are different between the two configurations
71
- supervised_keys=None,
72
- # Homepage of the dataset for documentation
73
- homepage="",
74
- # License for the dataset if available
75
- license="",
76
- # Citation for the dataset
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- if self.config.data_files:
82
- downloaded_files = self.config.data_files
83
- else:
84
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
85
- splits = [
86
- datasets.SplitGenerator(
87
- name=split,
88
- gen_kwargs={
89
- "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
90
- },
91
- ) for split in downloaded_files
92
- ]
93
- return splits
94
-
95
- def _generate_examples(self, files):
96
- """Yields examples."""
97
- for filepath in files:
98
- with open(filepath, encoding="utf-8") as f:
99
- for line in f:
100
- data = json.loads(line)
101
- if data.get('negative_passages') is None:
102
- data['negative_passages'] = []
103
- if data.get('positive_passages') is None:
104
- data['positive_passages'] = []
105
- yield data['query_id'], data