Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- .gitattributes +31 -0
- data/processed/dev00.jsonl +0 -0
- data/processed/dev01.jsonl +0 -0
- data/processed/dev02.jsonl +0 -0
- data/processed/dev03.jsonl +0 -0
- data/processed/test00.jsonl +0 -0
- data/processed/test01.jsonl +0 -0
- data/processed/test02.jsonl +0 -0
- data/processed/test03.jsonl +0 -0
- data/processed/train00.jsonl +0 -0
- data/processed/train01.jsonl +0 -0
- data/processed/train02.jsonl +0 -0
- data/processed/train03.jsonl +0 -0
- data/processed/train04.jsonl +0 -0
- data/processed/train05.jsonl +0 -0
- data/processed/train06.jsonl +0 -0
- data/processed/train07.jsonl +0 -0
- data/processed/train08.jsonl +0 -0
- data/processed/train09.jsonl +0 -0
- data/processed/train10.jsonl +0 -0
- data/processed/train11.jsonl +0 -0
- data/processed/train12.jsonl +0 -0
- data/processed/train13.jsonl +0 -0
- data/processed/train14.jsonl +0 -0
- data/processed/train15.jsonl +0 -0
- data/processed/train16.jsonl +0 -0
- data/processed/train17.jsonl +0 -0
- data/processed/train18.jsonl +0 -0
- data/processed/train19.jsonl +0 -0
- data/processed/train20.jsonl +0 -0
- data/processed/train21.jsonl +0 -0
- data/processed/train22.jsonl +0 -0
- qg_squad.py +2 -2
.gitattributes
CHANGED
|
@@ -35,3 +35,34 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
data/processed/train21.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
data/processed/dev01.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
data/processed/train03.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
data/processed/train08.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
data/processed/train17.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
data/processed/dev03.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
data/processed/test02.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
data/processed/train07.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
data/processed/train15.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
data/processed/train16.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
data/processed/train20.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
data/processed/test00.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
data/processed/train02.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
data/processed/train06.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
data/processed/train12.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
data/processed/train00.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
data/processed/train18.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
data/processed/train11.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
data/processed/train19.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
data/processed/train04.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
data/processed/train05.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
data/processed/train09.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
data/processed/train10.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
data/processed/dev02.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
data/processed/test01.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
data/processed/train01.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
data/processed/train22.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
data/processed/dev00.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
data/processed/test03.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
data/processed/train13.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
data/processed/train14.jsonl filter=lfs diff=lfs merge=lfs -text
|
data/processed/dev00.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/dev01.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/dev02.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/dev03.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/test00.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/test01.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/test02.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/test03.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train00.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train01.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train02.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train03.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train04.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train05.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train06.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train07.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train08.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train09.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train10.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train11.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train12.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train13.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train14.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train15.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train16.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train17.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train18.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train19.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train20.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train21.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/processed/train22.jsonl
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
qg_squad.py
CHANGED
|
@@ -2,7 +2,7 @@ import json
|
|
| 2 |
import datasets
|
| 3 |
|
| 4 |
logger = datasets.logging.get_logger(__name__)
|
| 5 |
-
_VERSION = "
|
| 6 |
_NAME = "qg_squad"
|
| 7 |
_CITATION = """
|
| 8 |
@inproceedings{ushio-etal-2022-generative,
|
|
@@ -52,7 +52,7 @@ class QGSquad(datasets.GeneratorBasedBuilder):
|
|
| 52 |
description=_DESCRIPTION,
|
| 53 |
features=datasets.Features(
|
| 54 |
{
|
| 55 |
-
"answer": datasets.Value("string"),
|
| 56 |
"question": datasets.Value("string"),
|
| 57 |
"sentence": datasets.Value("string"),
|
| 58 |
"paragraph": datasets.Value("string"),
|
|
|
|
| 2 |
import datasets
|
| 3 |
|
| 4 |
logger = datasets.logging.get_logger(__name__)
|
| 5 |
+
_VERSION = "5.0.0"
|
| 6 |
_NAME = "qg_squad"
|
| 7 |
_CITATION = """
|
| 8 |
@inproceedings{ushio-etal-2022-generative,
|
|
|
|
| 52 |
description=_DESCRIPTION,
|
| 53 |
features=datasets.Features(
|
| 54 |
{
|
| 55 |
+
"answer": datasets.Value("string"), "paragraph_question": datasets.Value("string"),
|
| 56 |
"question": datasets.Value("string"),
|
| 57 |
"sentence": datasets.Value("string"),
|
| 58 |
"paragraph": datasets.Value("string"),
|