Datasets:
Merge branch 'main' of https://huggingface.co/datasets/joelito/MultiLegalPile_Wikipedia_Filtered into main
Browse files- MultiLegalPile_Wikipedia_Filtered.py +121 -0
- README.md +209 -0
- prepare_legal_data.py +173 -0
MultiLegalPile_Wikipedia_Filtered.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MultiLegalPile Wikipedia Filtered"""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
from huggingface_hub.file_download import hf_hub_url
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import lzma as xz
|
| 10 |
+
except ImportError:
|
| 11 |
+
import pylzma as xz
|
| 12 |
+
|
| 13 |
+
datasets.logging.set_verbosity_info()
|
| 14 |
+
logger = datasets.logging.get_logger(__name__)
|
| 15 |
+
|
| 16 |
+
_CITATION = """
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
_DESCRIPTION = """
|
| 20 |
+
A filtered version of the MultiLegalPile dataset, together with wikipedia articles.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
_URL = "https://huggingface.co/datasets/joelito/MultiLegalPile_Wikipedia_Filtered"
|
| 24 |
+
|
| 25 |
+
_LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr",
|
| 26 |
+
"hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
|
| 27 |
+
|
| 28 |
+
_TYPES = ["caselaw", "contracts", "legislation", "other", "wikipedia"]
|
| 29 |
+
|
| 30 |
+
_JURISDICTONS = ["Austria", "Belgium", "Bulgaria", "Croatia", "Czechia", "Denmark", "Estonia", "Finland",
|
| 31 |
+
"France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg",
|
| 32 |
+
"Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden",
|
| 33 |
+
"EU", "Switzerland", "UK", "US", "Canada", "N/A"]
|
| 34 |
+
|
| 35 |
+
# IMPORTANT: Increase this once larger datasets are available (en_caselaw has 11 at the moment)
|
| 36 |
+
_HIGHEST_NUMBER_OF_SHARDS = 11
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class MultiLegalPileWikipediaFilteredConfig(datasets.BuilderConfig):
|
| 40 |
+
"""BuilderConfig for MultiLegalPileWikipediaFiltered."""
|
| 41 |
+
|
| 42 |
+
def __init__(self, name: str, **kwargs):
|
| 43 |
+
"""BuilderConfig for MultiLegalPileWikipediaFiltered.
|
| 44 |
+
Args:
|
| 45 |
+
name: combination of language and type with _
|
| 46 |
+
language: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all
|
| 47 |
+
type: One of caselaw,contracts,legislation,other,wikipedia or all
|
| 48 |
+
**kwargs: keyword arguments forwarded to super.
|
| 49 |
+
"""
|
| 50 |
+
super(MultiLegalPileWikipediaFilteredConfig, self).__init__(**kwargs)
|
| 51 |
+
self.name = name
|
| 52 |
+
self.language = name.split("_")[0]
|
| 53 |
+
self.type = name.split("_")[1]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class MultiLegalPileWikipediaFiltered(datasets.GeneratorBasedBuilder):
|
| 57 |
+
"""
|
| 58 |
+
MultiLegalPileWikipediaFiltered:
|
| 59 |
+
A filtered dataset of multilingual legal data and wikipedias in the EU languages
|
| 60 |
+
"""
|
| 61 |
+
BUILDER_CONFIG_CLASS = MultiLegalPileWikipediaFilteredConfig
|
| 62 |
+
|
| 63 |
+
BUILDER_CONFIGS = [MultiLegalPileWikipediaFilteredConfig(f"{language}_{type}")
|
| 64 |
+
for type in _TYPES + ["all"]
|
| 65 |
+
for language in _LANGUAGES + ["all"]]
|
| 66 |
+
|
| 67 |
+
def _info(self):
|
| 68 |
+
return datasets.DatasetInfo(
|
| 69 |
+
description=_DESCRIPTION,
|
| 70 |
+
features=datasets.Features(
|
| 71 |
+
{
|
| 72 |
+
"language": datasets.Value("string"), # one of _LANGUAGES
|
| 73 |
+
"type": datasets.Value("string"), # one of _TYPES
|
| 74 |
+
"jurisdiction": datasets.Value("string"), # one of _JURISDICTONS
|
| 75 |
+
"text": datasets.Value("string"),
|
| 76 |
+
}
|
| 77 |
+
),
|
| 78 |
+
supervised_keys=None,
|
| 79 |
+
homepage=_URL,
|
| 80 |
+
citation=_CITATION,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
def _split_generators(self, dl_manager):
|
| 84 |
+
def download_url(file_name):
|
| 85 |
+
url = hf_hub_url(repo_id="joelito/MultiLegalPile_Wikipedia_Filtered",
|
| 86 |
+
filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")
|
| 87 |
+
return dl_manager.download(url)
|
| 88 |
+
|
| 89 |
+
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
|
| 90 |
+
types = _TYPES if self.config.type == "all" else [self.config.type]
|
| 91 |
+
|
| 92 |
+
split_generators = []
|
| 93 |
+
for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION]:
|
| 94 |
+
filepaths = []
|
| 95 |
+
for language in languages:
|
| 96 |
+
for type in types:
|
| 97 |
+
for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
|
| 98 |
+
try:
|
| 99 |
+
filepaths.append(download_url(f"{language}_{type}_{split}_{shard}"))
|
| 100 |
+
except:
|
| 101 |
+
break # we found the last shard
|
| 102 |
+
split_generators.append(
|
| 103 |
+
datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": filepaths})
|
| 104 |
+
)
|
| 105 |
+
return split_generators
|
| 106 |
+
|
| 107 |
+
def _generate_examples(self, filepaths):
|
| 108 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
| 109 |
+
id_ = 0
|
| 110 |
+
for filepath in filepaths:
|
| 111 |
+
logger.info("Generating examples from = %s", filepath)
|
| 112 |
+
try:
|
| 113 |
+
with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
| 114 |
+
for line in f:
|
| 115 |
+
if line:
|
| 116 |
+
example = json.loads(line)
|
| 117 |
+
if example is not None and isinstance(example, dict):
|
| 118 |
+
yield id_, example
|
| 119 |
+
id_ += 1
|
| 120 |
+
except Exception:
|
| 121 |
+
logger.exception("Error while processing file %s", filepath)
|
README.md
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
annotations_creators:
|
| 3 |
+
- other
|
| 4 |
+
language_creators:
|
| 5 |
+
- found
|
| 6 |
+
language:
|
| 7 |
+
- bg
|
| 8 |
+
- cs
|
| 9 |
+
- da
|
| 10 |
+
- de
|
| 11 |
+
- el
|
| 12 |
+
- en
|
| 13 |
+
- es
|
| 14 |
+
- et
|
| 15 |
+
- fi
|
| 16 |
+
- fr
|
| 17 |
+
- ga
|
| 18 |
+
- hr
|
| 19 |
+
- hu
|
| 20 |
+
- it
|
| 21 |
+
- lt
|
| 22 |
+
- lv
|
| 23 |
+
- mt
|
| 24 |
+
- nl
|
| 25 |
+
- pl
|
| 26 |
+
- pt
|
| 27 |
+
- ro
|
| 28 |
+
- sk
|
| 29 |
+
- sl
|
| 30 |
+
- sv
|
| 31 |
+
license:
|
| 32 |
+
- cc-by-4.0
|
| 33 |
+
multilinguality:
|
| 34 |
+
- multilingual
|
| 35 |
+
paperswithcode_id: null
|
| 36 |
+
pretty_name: "MultiLegalPile_Wikipedia_Filtered: A filtered version of the MultiLegalPile dataset, together with wikipedia articles."
|
| 37 |
+
size_categories:
|
| 38 |
+
- 10M<n<100M
|
| 39 |
+
source_datasets:
|
| 40 |
+
- original
|
| 41 |
+
task_categories:
|
| 42 |
+
- fill-mask
|
| 43 |
+
|
| 44 |
+
---
|
| 45 |
+
|
| 46 |
+
# Dataset Card for MultiLegalPile_Wikipedia_Filtered: A filtered version of the MultiLegalPile dataset, together with wikipedia articles
|
| 47 |
+
|
| 48 |
+
## Table of Contents
|
| 49 |
+
|
| 50 |
+
- [Table of Contents](#table-of-contents)
|
| 51 |
+
- [Dataset Description](#dataset-description)
|
| 52 |
+
- [Dataset Summary](#dataset-summary)
|
| 53 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
| 54 |
+
- [Languages](#languages)
|
| 55 |
+
- [Dataset Structure](#dataset-structure)
|
| 56 |
+
- [Data Instances](#data-instances)
|
| 57 |
+
- [Data Fields](#data-fields)
|
| 58 |
+
- [Data Splits](#data-splits)
|
| 59 |
+
- [Dataset Creation](#dataset-creation)
|
| 60 |
+
- [Curation Rationale](#curation-rationale)
|
| 61 |
+
- [Source Data](#source-data)
|
| 62 |
+
- [Annotations](#annotations)
|
| 63 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
| 64 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
| 65 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
| 66 |
+
- [Discussion of Biases](#discussion-of-biases)
|
| 67 |
+
- [Other Known Limitations](#other-known-limitations)
|
| 68 |
+
- [Additional Information](#additional-information)
|
| 69 |
+
- [Dataset Curators](#dataset-curators)
|
| 70 |
+
- [Licensing Information](#licensing-information)
|
| 71 |
+
- [Citation Information](#citation-information)
|
| 72 |
+
- [Contributions](#contributions)
|
| 73 |
+
|
| 74 |
+
## Dataset Description
|
| 75 |
+
|
| 76 |
+
- **Homepage:**
|
| 77 |
+
- **Repository:**
|
| 78 |
+
- **Paper:**
|
| 79 |
+
- **Leaderboard:**
|
| 80 |
+
- **Point of Contact:** [Joel Niklaus](mailto:[email protected])
|
| 81 |
+
|
| 82 |
+
### Dataset Summary
|
| 83 |
+
|
| 84 |
+
The Multi_Legal_Pile is a large-scale multilingual legal dataset suited for pretraining language models.
|
| 85 |
+
It spans over 24 languages and four legal text types.
|
| 86 |
+
|
| 87 |
+
### Supported Tasks and Leaderboards
|
| 88 |
+
|
| 89 |
+
The dataset supports the tasks of fill-mask.
|
| 90 |
+
|
| 91 |
+
### Languages
|
| 92 |
+
|
| 93 |
+
The following languages are supported:
|
| 94 |
+
bg, cs, da, de, el, en, es, et, fi, fr, ga, hr, hu, it, lt, lv, mt, nl, pl, pt, ro, sk, sl, sv
|
| 95 |
+
|
| 96 |
+
## Dataset Structure
|
| 97 |
+
|
| 98 |
+
It is structured in the following format: {language}_{text_type}_{shard}.jsonl.xz
|
| 99 |
+
|
| 100 |
+
text_type is one of the following:
|
| 101 |
+
|
| 102 |
+
- caselaw
|
| 103 |
+
- contracts
|
| 104 |
+
- legislation
|
| 105 |
+
- other
|
| 106 |
+
- wikipedia
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
Use the dataset like this:
|
| 110 |
+
```python
|
| 111 |
+
from datasets import load_dataset
|
| 112 |
+
|
| 113 |
+
config = 'en_contracts' # {language}_{text_type}
|
| 114 |
+
dataset = load_dataset('joelito/Multi_Legal_Pile', config, split='train', streaming=True)
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
'config' is a combination of language and text_type, e.g. 'en_contracts' or 'de_caselaw'.
|
| 118 |
+
To load all the languages or all the text_types, use 'all' instead of the language or text_type (e.g., '
|
| 119 |
+
all_legislation').
|
| 120 |
+
|
| 121 |
+
### Data Instances
|
| 122 |
+
|
| 123 |
+
The file format is jsonl.xz and there is a `train` and `validation` split available.
|
| 124 |
+
Since some configurations are very small or non-existent, they might not contain a train split or not be present at all.
|
| 125 |
+
|
| 126 |
+
The complete dataset consists of five large subsets:
|
| 127 |
+
- [Native Multi Legal Pile](https://huggingface.co/datasets/joelito/Multi_Legal_Pile)
|
| 128 |
+
- [Eurlex Resources](https://huggingface.co/datasets/joelito/eurlex_resources)
|
| 129 |
+
- [MC4 Legal](https://huggingface.co/datasets/joelito/mc4_legal)
|
| 130 |
+
- [Pile of Law](https://huggingface.co/datasets/pile-of-law/pile-of-law)
|
| 131 |
+
- [EU Wikipedias](https://huggingface.co/datasets/joelito/EU_Wikipedias)
|
| 132 |
+
|
| 133 |
+
### Data Fields
|
| 134 |
+
|
| 135 |
+
[More Information Needed]
|
| 136 |
+
|
| 137 |
+
### Data Splits
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Dataset Creation
|
| 142 |
+
|
| 143 |
+
This dataset has been created by combining the following datasets:
|
| 144 |
+
Native Multi Legal Pile, Eurlex Resources, MC4 Legal, Pile of Law, EU Wikipedias.
|
| 145 |
+
It has been filtered to remove short documents (less than 64 whitespace-separated tokens) and
|
| 146 |
+
documents with more than 30% punctuation or numbers (see prepare_legal_data.py for more details).
|
| 147 |
+
|
| 148 |
+
### Curation Rationale
|
| 149 |
+
|
| 150 |
+
[More Information Needed]
|
| 151 |
+
|
| 152 |
+
### Source Data
|
| 153 |
+
|
| 154 |
+
#### Initial Data Collection and Normalization
|
| 155 |
+
|
| 156 |
+
[More Information Needed]
|
| 157 |
+
|
| 158 |
+
#### Who are the source language producers?
|
| 159 |
+
|
| 160 |
+
[More Information Needed]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
### Annotations
|
| 164 |
+
|
| 165 |
+
#### Annotation process
|
| 166 |
+
|
| 167 |
+
[More Information Needed]
|
| 168 |
+
|
| 169 |
+
#### Who are the annotators?
|
| 170 |
+
|
| 171 |
+
[More Information Needed]
|
| 172 |
+
|
| 173 |
+
### Personal and Sensitive Information
|
| 174 |
+
|
| 175 |
+
[More Information Needed]
|
| 176 |
+
|
| 177 |
+
## Considerations for Using the Data
|
| 178 |
+
|
| 179 |
+
### Social Impact of Dataset
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
### Discussion of Biases
|
| 184 |
+
|
| 185 |
+
[More Information Needed]
|
| 186 |
+
|
| 187 |
+
### Other Known Limitations
|
| 188 |
+
|
| 189 |
+
[More Information Needed]
|
| 190 |
+
|
| 191 |
+
## Additional Information
|
| 192 |
+
|
| 193 |
+
### Dataset Curators
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
### Licensing Information
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
|
| 201 |
+
### Citation Information
|
| 202 |
+
|
| 203 |
+
```
|
| 204 |
+
TODO add citation
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
### Contributions
|
| 208 |
+
|
| 209 |
+
Thanks to [@JoelNiklaus](https://github.com/joelniklaus) for adding this dataset.
|
prepare_legal_data.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# No chunks, one doc per line
|
| 2 |
+
|
| 3 |
+
# remove new lines, etc.
|
| 4 |
+
# create a corpus of min 200-400 GB ==> ~100B tokens
|
| 5 |
+
# max file size: 4GB because of huggingface
|
| 6 |
+
# validation set: ~100M tokens ==> 200-400MB
|
| 7 |
+
|
| 8 |
+
import glob
|
| 9 |
+
import json
|
| 10 |
+
import multiprocessing
|
| 11 |
+
|
| 12 |
+
import tqdm
|
| 13 |
+
import os
|
| 14 |
+
import re
|
| 15 |
+
from multiprocessing import Pool
|
| 16 |
+
|
| 17 |
+
from datasets import load_dataset
|
| 18 |
+
from tokenizers import normalizers
|
| 19 |
+
|
| 20 |
+
_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr',
|
| 21 |
+
'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv']
|
| 22 |
+
_DOMAIN_TYPES = ['legislation', 'caselaw', 'contracts', 'other', 'wikipedia']
|
| 23 |
+
|
| 24 |
+
custom_normalizer = normalizers.NFKD()
|
| 25 |
+
|
| 26 |
+
VALIDATION_SIZE = 1_000 # ~1MB per configuration ==> some low-resource configs will only have a validation file
|
| 27 |
+
|
| 28 |
+
filtered_dir = os.path.join('data', 'filtered')
|
| 29 |
+
os.makedirs(filtered_dir, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def preprocess_dataset(languages=None, domain_types=None):
|
| 33 |
+
lang_type_datasets = []
|
| 34 |
+
# set defaults if they are not set
|
| 35 |
+
if languages is None:
|
| 36 |
+
languages = _LANGUAGES
|
| 37 |
+
if domain_types is None:
|
| 38 |
+
domain_types = _DOMAIN_TYPES
|
| 39 |
+
|
| 40 |
+
for LANG in languages:
|
| 41 |
+
for DOMAIN_TYPE in domain_types:
|
| 42 |
+
try:
|
| 43 |
+
if DOMAIN_TYPE == 'wikipedia':
|
| 44 |
+
# get from EU_Wikipedias
|
| 45 |
+
dataset = load_dataset("joelito/EU_Wikipedias", date="20221120", language=LANG,
|
| 46 |
+
split='train', streaming=True, use_auth_token=True)
|
| 47 |
+
else:
|
| 48 |
+
# get from Multi_Legal_Pile
|
| 49 |
+
dataset = load_dataset("joelito/Multi_Legal_Pile", f'{LANG}_{DOMAIN_TYPE}',
|
| 50 |
+
split='train', streaming=True, use_auth_token=True)
|
| 51 |
+
dataset = dataset.shuffle(seed=42, buffer_size=10_000)
|
| 52 |
+
print(f'Found data for `{DOMAIN_TYPE}` in language `{LANG}`.')
|
| 53 |
+
except:
|
| 54 |
+
print(f'There is no data for `{DOMAIN_TYPE}` in language `{LANG}`.')
|
| 55 |
+
continue
|
| 56 |
+
lang_type_datasets.append(dataset)
|
| 57 |
+
return lang_type_datasets
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def write_samples(dataset_number):
|
| 61 |
+
dataset, dataset_name = dataset_number
|
| 62 |
+
if len(dataset_name.split('_')) == 1: # wikipedia
|
| 63 |
+
language = dataset_name.split('.')[1]
|
| 64 |
+
domain_type = "wikipedia"
|
| 65 |
+
dataset_name = f"{language}_{domain_type}" # reformat the config name so that we have wikipedia in the name
|
| 66 |
+
else:
|
| 67 |
+
language, domain_type = dataset_name.split('_')
|
| 68 |
+
total_count, temp_count, all_samples, file_number = 0, 0, 0, 0
|
| 69 |
+
out_file = open_file(dataset_name, file_number, "validation") # we save the first examples to the validation set
|
| 70 |
+
print(f'Processing for dataset {dataset_name} started!')
|
| 71 |
+
# Read each document
|
| 72 |
+
for sample in tqdm.tqdm(dataset):
|
| 73 |
+
try:
|
| 74 |
+
text = normalize_text(sample['text'])
|
| 75 |
+
if "validation" in out_file.name and temp_count > VALIDATION_SIZE:
|
| 76 |
+
# if we are saving to eval, and we have enough samples in the eval set, switch to train
|
| 77 |
+
out_file.close()
|
| 78 |
+
temp_count = 0
|
| 79 |
+
out_file = open_file(dataset_name, file_number, "train")
|
| 80 |
+
# on average approx. 2GB per file, compresses (with xz) to around ~500MB (xz: ~75% compression ratio)
|
| 81 |
+
if "train" in out_file.name and temp_count > 500_000: # err on the small side of the file size
|
| 82 |
+
# if we are saving to train, and we reached the max size per file, switch to the next file
|
| 83 |
+
out_file.close()
|
| 84 |
+
file_number += 1
|
| 85 |
+
temp_count = 0
|
| 86 |
+
out_file = open_file(dataset_name, file_number, "train")
|
| 87 |
+
# if the text is usable for pretraining, save it
|
| 88 |
+
if is_text_usable(text):
|
| 89 |
+
jurisdiction = sample.get('jurisdiction', "N/A") # set defaults for wikipedia
|
| 90 |
+
type = sample.get("type", "wikipedia") # set defaults for wikipedia
|
| 91 |
+
entry = {"language": sample["language"], "type": type, "jurisdiction": jurisdiction, "text": text}
|
| 92 |
+
out_file.write(json.dumps(entry) + '\n')
|
| 93 |
+
total_count += 1
|
| 94 |
+
temp_count += 1
|
| 95 |
+
all_samples += 1
|
| 96 |
+
except:
|
| 97 |
+
continue
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
out_file.close()
|
| 101 |
+
except:
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
print(f'Processing for dataset {dataset_name} finished with {total_count}/{all_samples}!')
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def is_text_usable(text):
|
| 109 |
+
# Compute percentage of alphabetical characters in relation to full sequence length
|
| 110 |
+
punctuation = '!\"#$%&\'()*+,\-\./:;<=>?@\[\\\]\^_`{\|}~'
|
| 111 |
+
alpha_text = re.sub(rf'[{punctuation}\d]', '', text) # remove numbers and punctuation
|
| 112 |
+
alpha_percent = len(alpha_text) / len(text)
|
| 113 |
+
# Compute total chunk length
|
| 114 |
+
text_length = len(text.split())
|
| 115 |
+
# Ignore sequences with more than 30% numbers or short sequences (less than 64 tokens)
|
| 116 |
+
return alpha_percent > 0.7 and text_length > 64
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def normalize_text(text):
|
| 120 |
+
# Normalize the document
|
| 121 |
+
text = custom_normalizer.normalize_str(text)
|
| 122 |
+
# Replace multiple newline and whitespaces
|
| 123 |
+
return re.sub(r'(\n )+', r'\n ', re.sub(r'( *[\n\r]+ *)+', r'\n ', re.sub(r'[\t ]+', r' ', text)))
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def open_file(dataset_name, file_number, split):
|
| 127 |
+
return open(os.path.join(filtered_dir, f'{dataset_name}_{split}_{file_number}.jsonl'), 'w', encoding='utf8')
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def clean_and_filter_documents():
|
| 131 |
+
# Load all datasets across languages and types
|
| 132 |
+
lang_type_datasets = preprocess_dataset(languages=None, domain_types=None)
|
| 133 |
+
# also pass in dataset_name
|
| 134 |
+
lang_type_datasets = [(dataset, dataset.config_name) for dataset in lang_type_datasets]
|
| 135 |
+
print(lang_type_datasets)
|
| 136 |
+
|
| 137 |
+
# Launch pool to preprocess datasets in parallel
|
| 138 |
+
max_num_processes = min(multiprocessing.cpu_count() - 2, len(lang_type_datasets))
|
| 139 |
+
num_processes = max(max_num_processes, 1)
|
| 140 |
+
print(f'Launching a Pool with maximum {num_processes} processes...')
|
| 141 |
+
with Pool(num_processes) as pool:
|
| 142 |
+
pool.map(write_samples, lang_type_datasets)
|
| 143 |
+
|
| 144 |
+
# Compress datasets
|
| 145 |
+
print(f"Compressing datasets at {filtered_dir}")
|
| 146 |
+
# Do this at the end because we use multithreading
|
| 147 |
+
for path in glob.glob(os.path.join(filtered_dir, '*.jsonl')):
|
| 148 |
+
print(f"Compressing {path}")
|
| 149 |
+
os.system(f'xz -zkf -T0 {path}') # -TO to use multithreading
|
| 150 |
+
print(f"Removing uncompressed file at {path}")
|
| 151 |
+
os.system(f'rm {path}') # remove uncompressed file to save space
|
| 152 |
+
|
| 153 |
+
print(f"Finished preparing legal data")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
if __name__ == '__main__':
|
| 158 |
+
"""
|
| 159 |
+
Run with
|
| 160 |
+
export PYTHONPATH=. && python prepare_legal_data.py | tee prepare_legal_data.log
|
| 161 |
+
"""
|
| 162 |
+
clean_and_filter_documents()
|
| 163 |
+
|
| 164 |
+
# Get locally
|
| 165 |
+
# def get_file(LANG, DOMAIN_TYPE, split, number):
|
| 166 |
+
# base_folder = "data/mlm_dataset/chunks_512"
|
| 167 |
+
# return f'{base_folder}/{LANG}_{DOMAIN_TYPE}_{split}_{number}.jsonl.xz'
|
| 168 |
+
|
| 169 |
+
# files = [get_file(LANG, DOMAIN_TYPE, 'train', i) for i in range(1, 5)]
|
| 170 |
+
# files = [f for f in files if os.path.exists(f)] # make sure the file actually exists
|
| 171 |
+
# dataset = load_dataset("json", data_files={'train': files}, split='train', streaming=True)
|
| 172 |
+
|
| 173 |
+
# TODO write dataset cards for chunked, eu wikipedia and filtered dataset
|