Describe where the data comes from
Browse files
README.md
CHANGED
|
@@ -17,3 +17,52 @@ configs:
|
|
| 17 |
- split: train
|
| 18 |
path: data/train-*
|
| 19 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
- split: train
|
| 18 |
path: data/train-*
|
| 19 |
---
|
| 20 |
+
|
| 21 |
+
This is a simple recipes dataset, obtained by formatting/cleaning [this one](formido/recipes-20k), that I think it was just made by scrapping the food.com website.
|
| 22 |
+
Here's the cleanup script I used to obtain it.
|
| 23 |
+
|
| 24 |
+
```python
|
| 25 |
+
from datasets import load_dataset
|
| 26 |
+
|
| 27 |
+
def clean_recipe(recipe):
|
| 28 |
+
recipe = recipe.replace(" , ", ", ")
|
| 29 |
+
recipe = recipe.replace('"', "'")
|
| 30 |
+
recipe = recipe.replace("\\'", "'")
|
| 31 |
+
recipe = recipe.strip("\\']")
|
| 32 |
+
recipe = recipe.strip("['")
|
| 33 |
+
splitted = recipe.split("\', \'")
|
| 34 |
+
recipe = "\n".join(map(lambda x: "- " + (x.capitalize()), splitted))
|
| 35 |
+
return recipe
|
| 36 |
+
|
| 37 |
+
def clean_name(name):
|
| 38 |
+
name = name.capitalize()
|
| 39 |
+
name = name.replace(" ", " ")
|
| 40 |
+
return name
|
| 41 |
+
|
| 42 |
+
def preprocess_function(examples):
|
| 43 |
+
recipes = examples["output"]
|
| 44 |
+
names = examples["input"]
|
| 45 |
+
|
| 46 |
+
clean_recipes = []
|
| 47 |
+
clean_names = []
|
| 48 |
+
for recipe, name in zip(recipes, names):
|
| 49 |
+
# Sanitize the name and recipe string
|
| 50 |
+
clean_recipes.append(clean_recipe(recipe))
|
| 51 |
+
clean_names.append(clean_name(name))
|
| 52 |
+
|
| 53 |
+
return {"recipes": clean_recipes, "names": clean_names}
|
| 54 |
+
|
| 55 |
+
def split_dataset():
|
| 56 |
+
from transformers import set_seed
|
| 57 |
+
set_seed(42)
|
| 58 |
+
dataset_id = "formido/recipes-20k"
|
| 59 |
+
dataset = load_dataset(dataset_id)
|
| 60 |
+
dataset = dataset.map(preprocess_function, batched=True, remove_columns=dataset["train"].column_names)
|
| 61 |
+
dataset.push_to_hub("simple_recipes")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
split_dataset()
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
|