yuvalkirstain commited on
Commit
b7a4e16
·
1 Parent(s): 98850b9

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -1
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"yuvalkirstain--scrolls_t5": {"description": "\nSCROLLS: Standardized CompaRison Over Long Language Sequences.\nA suite of natural language datasets that require reasoning over long texts.\nhttps://scrolls-benchmark.com/\n\nQasper (Dasigi et al., 2021) is a question answering dataset over NLP papers filtered from the Semantic Scholar Open Research Corpus (S2ORC).\nQuestions were written by NLP practitioners after reading only the title and abstract of the papers, \nwhile another set of NLP practitioners annotated the answers given the entire document.\nQasper contains abstractive, extractive, and yes/no questions, as well as unanswerable ones.", "citation": "\n@inproceedings{dasigi-etal-2021-dataset,\n title = \"A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers\",\n author = \"Dasigi, Pradeep and\n Lo, Kyle and\n Beltagy, Iz and\n Cohan, Arman and\n Smith, Noah A. and\n Gardner, Matt\",\n booktitle = \"Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies\",\n month = jun,\n year = \"2021\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.naacl-main.365\",\n doi = \"10.18653/v1/2021.naacl-main.365\",\n pages = \"4599--4610\",\n abstract = \"Readers of academic research papers often read with the goal of answering specific questions. Question Answering systems that can answer those questions can make consumption of the content much more efficient. However, building such tools requires data that reflect the difficulty of the task arising from complex reasoning about claims made in multiple parts of a paper. In contrast, existing information-seeking question answering datasets usually contain questions about generic factoid-type information. We therefore present Qasper, a dataset of 5049 questions over 1585 Natural Language Processing papers. Each question is written by an NLP practitioner who read only the title and abstract of the corresponding paper, and the question seeks information present in the full text. The questions are then answered by a separate set of NLP practitioners who also provide supporting evidence to answers. We find that existing models that do well on other QA tasks do not perform well on answering these questions, underperforming humans by at least 27 F1 points when answering them from entire papers, motivating further research in document-grounded, information-seeking QA, which our dataset is designed to facilitate.\",\n}\n\n@article{ TODO citation here\n}\nNote that each SCROLLS dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n", "homepage": "https://allenai.org/project/qasper", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "pid": {"dtype": "string", "id": null, "_type": "Value"}, "input": {"dtype": "string", "id": null, "_type": "Value"}, "output": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"train": {"name": "train", "num_bytes": 6349735, "num_examples": 2567, "dataset_name": "scrolls_t5"}, "validation": {"name": "validation", "num_bytes": 4286597, "num_examples": 1726, "dataset_name": "scrolls_t5"}, "test": {"name": "test", "num_bytes": 4286597, "num_examples": 1726, "dataset_name": "scrolls_t5"}}, "download_checksums": null, "download_size": 3387382, "post_processing_size": null, "dataset_size": 14922929, "size_in_bytes": 18310311}}
 
1
+ {"yuvalkirstain--scrolls_t5": {"description": "\nSCROLLS: Standardized CompaRison Over Long Language Sequences.\nA suite of natural language datasets that require reasoning over long texts.\nhttps://scrolls-benchmark.com/\n\nSummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).\nGiven a transcript of a specific episode, the goal is to produce the episode's recap.\nThe original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts. \nFor SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows, \nmaking it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows. \nCommunity-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze.", "citation": "\n@misc{chen2021summscreen,\n title={SummScreen: A Dataset for Abstractive Screenplay Summarization}, \n author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},\n year={2021},\n eprint={2104.07091},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n@article{ TODO citation here\n}\nNote that each SCROLLS dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n", "homepage": "https://github.com/mingdachen/SummScreen", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "pid": {"dtype": "string", "id": null, "_type": "Value"}, "input": {"dtype": "string", "id": null, "_type": "Value"}, "output": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"train": {"name": "train", "num_bytes": 8598272, "num_examples": 3673, "dataset_name": "scrolls_t5"}, "validation": {"name": "validation", "num_bytes": 800298, "num_examples": 338, "dataset_name": "scrolls_t5"}, "test": {"name": "test", "num_bytes": 800298, "num_examples": 338, "dataset_name": "scrolls_t5"}}, "download_checksums": null, "download_size": 6358446, "post_processing_size": null, "dataset_size": 10198868, "size_in_bytes": 16557314}}