Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
LukasHug ahmad21omar commited on
Commit
adc0cb6
·
verified ·
1 Parent(s): 3d11967

Upload 19 files (#2)

Browse files

- Upload 19 files (e207b808b534a78ba1d755b4817bc2e6d6018e32)


Co-authored-by: Ahmad Omar <[email protected]>

.gitattributes CHANGED
@@ -1,59 +1,59 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,412 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
+ dataset_info:
3
+ - config_name: v1-All
4
+ features:
5
+ - name: id
6
+ dtype: int64
7
+ - name: prompt
8
+ dtype: string
9
+ - name: ground-truth rule
10
+ dtype: string
11
+ - name: validation program
12
+ dtype: string
13
+ - name: symbols
14
+ dtype: string
15
+ - name: curriculum level
16
+ dtype: int64
17
+ - name: curriculum tier
18
+ dtype: string
19
+ - name: rule sampling
20
+ dtype: string
21
+ - name: rule complexity
22
+ dtype: string
23
+ - name: background sampling
24
+ dtype: string
25
+ - name: problem size
26
+ dtype: int64
27
+ - name: vocabulary predicates
28
+ dtype: int64
29
+ - name: vocabulary car constants
30
+ dtype: string
31
+ splits:
32
+ - name: train
33
+ num_bytes: 902877201
34
+ num_examples: 18053
35
+ - name: validation
36
+ num_bytes: 9130582
37
+ num_examples: 200
38
+ - name: test
39
+ num_bytes: 45234132
40
+ num_examples: 1000
41
+ download_size: 194015707
42
+ dataset_size: 957241915
43
+ - config_name: v1-Basic
44
+ features:
45
+ - name: id
46
+ dtype: int64
47
+ - name: prompt
48
+ dtype: string
49
+ - name: ground-truth rule
50
+ dtype: string
51
+ - name: validation program
52
+ dtype: string
53
+ - name: symbols
54
+ dtype: string
55
+ - name: curriculum level
56
+ dtype: int64
57
+ - name: curriculum tier
58
+ dtype: string
59
+ - name: rule sampling
60
+ dtype: string
61
+ - name: rule complexity
62
+ dtype: string
63
+ - name: background sampling
64
+ dtype: string
65
+ - name: problem size
66
+ dtype: int64
67
+ - name: vocabulary predicates
68
+ dtype: int64
69
+ - name: vocabulary car constants
70
+ dtype: string
71
+ splits:
72
+ - name: train
73
+ num_bytes: 14781833
74
+ num_examples: 3053
75
+ - name: validation
76
+ num_bytes: 205941
77
+ num_examples: 50
78
+ - name: test
79
+ num_bytes: 1025404
80
+ num_examples: 250
81
+ download_size: 1913967
82
+ dataset_size: 16013178
83
+ - config_name: v1-Easy
84
+ features:
85
+ - name: id
86
+ dtype: int64
87
+ - name: prompt
88
+ dtype: string
89
+ - name: ground-truth rule
90
+ dtype: string
91
+ - name: validation program
92
+ dtype: string
93
+ - name: symbols
94
+ dtype: string
95
+ - name: curriculum level
96
+ dtype: int64
97
+ - name: curriculum tier
98
+ dtype: string
99
+ - name: rule sampling
100
+ dtype: string
101
+ - name: rule complexity
102
+ dtype: string
103
+ - name: background sampling
104
+ dtype: string
105
+ - name: problem size
106
+ dtype: int64
107
+ - name: vocabulary predicates
108
+ dtype: int64
109
+ - name: vocabulary car constants
110
+ dtype: string
111
+ splits:
112
+ - name: train
113
+ num_bytes: 53246139
114
+ num_examples: 5000
115
+ - name: validation
116
+ num_bytes: 533441
117
+ num_examples: 50
118
+ - name: test
119
+ num_bytes: 2666285
120
+ num_examples: 250
121
+ download_size: 8745354
122
+ dataset_size: 56445865
123
+ - config_name: v1-Hard
124
+ features:
125
+ - name: id
126
+ dtype: int64
127
+ - name: prompt
128
+ dtype: string
129
+ - name: ground-truth rule
130
+ dtype: string
131
+ - name: validation program
132
+ dtype: string
133
+ - name: symbols
134
+ dtype: string
135
+ - name: curriculum level
136
+ dtype: int64
137
+ - name: curriculum tier
138
+ dtype: string
139
+ - name: rule sampling
140
+ dtype: string
141
+ - name: rule complexity
142
+ dtype: string
143
+ - name: background sampling
144
+ dtype: string
145
+ - name: problem size
146
+ dtype: int64
147
+ - name: vocabulary predicates
148
+ dtype: int64
149
+ - name: vocabulary car constants
150
+ dtype: string
151
+ splits:
152
+ - name: train
153
+ num_bytes: 591818820
154
+ num_examples: 5000
155
+ - name: validation
156
+ num_bytes: 5954736
157
+ num_examples: 50
158
+ - name: test
159
+ num_bytes: 29517961
160
+ num_examples: 250
161
+ download_size: 131997522
162
+ dataset_size: 627291517
163
+ - config_name: v1-Medium
164
+ features:
165
+ - name: id
166
+ dtype: int64
167
+ - name: prompt
168
+ dtype: string
169
+ - name: ground-truth rule
170
+ dtype: string
171
+ - name: validation program
172
+ dtype: string
173
+ - name: symbols
174
+ dtype: string
175
+ - name: curriculum level
176
+ dtype: int64
177
+ - name: curriculum tier
178
+ dtype: string
179
+ - name: rule sampling
180
+ dtype: string
181
+ - name: rule complexity
182
+ dtype: string
183
+ - name: background sampling
184
+ dtype: string
185
+ - name: problem size
186
+ dtype: int64
187
+ - name: vocabulary predicates
188
+ dtype: int64
189
+ - name: vocabulary car constants
190
+ dtype: string
191
+ splits:
192
+ - name: train
193
+ num_bytes: 243030409
194
+ num_examples: 5000
195
+ - name: validation
196
+ num_bytes: 2436464
197
+ num_examples: 50
198
+ - name: test
199
+ num_bytes: 12024482
200
+ num_examples: 250
201
+ download_size: 51529927
202
+ dataset_size: 257491355
203
+ configs:
204
+ - config_name: v1-All
205
+ data_files:
206
+ - split: train
207
+ path: v1-All/train-*
208
+ - split: validation
209
+ path: v1-All/validation-*
210
+ - split: test
211
+ path: v1-All/test-*
212
+ - config_name: v1-Basic
213
+ data_files:
214
+ - split: train
215
+ path: v1-Basic/train-*
216
+ - split: validation
217
+ path: v1-Basic/validation-*
218
+ - split: test
219
+ path: v1-Basic/test-*
220
+ - config_name: v1-Easy
221
+ data_files:
222
+ - split: train
223
+ path: v1-Easy/train-*
224
+ - split: validation
225
+ path: v1-Easy/validation-*
226
+ - split: test
227
+ path: v1-Easy/test-*
228
+ - config_name: v1-Hard
229
+ data_files:
230
+ - split: train
231
+ path: v1-Hard/train-*
232
+ - split: validation
233
+ path: v1-Hard/validation-*
234
+ - split: test
235
+ path: v1-Hard/test-*
236
+ - config_name: v1-Medium
237
+ data_files:
238
+ - split: train
239
+ path: v1-Medium/train-*
240
+ - split: validation
241
+ path: v1-Medium/validation-*
242
+ - split: test
243
+ path: v1-Medium/test-*
244
+ tags:
245
+ - logic
246
+ - inductive
247
+ - reasoning
248
+ ---
249
+
250
+ <div style="display: flex; justify-content: flex-start;"><img src="https://raw.githubusercontent.com/ml-research/ScalableLogicalReasoning/master/images/SLR-Bench2.jpg" alt="Preview" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>
251
+
252
+ ## Dataset Description
253
+ - **Language(s) (NLP):** Dutch
254
+ - **Point of Contact:** [Lukas Helff](mailto:[email protected])
255
+ - **License:** [CC BY](https://creativecommons.org/licenses/by/4.0/)
256
+
257
+ # 🧠 SLR-Bench-Dutch: Scalable Logical Reasoning Benchmark (Dutch Edition)
258
+ [![Eval & Reward Model](https://img.shields.io/badge/%F0%9F%A4%96%20Reward%20Model-HF-blueviolet)](https://huggingface.co/spaces/AIML-TUDA/VerifiableRewardsForScalableLogicalReasoning)
259
+ [![GitHub](https://img.shields.io/badge/Code-GitHub-blue)](https://github.com/ml-research/ScalableLogicalReasoning)
260
+ [![arXiv](https://img.shields.io/badge/arXiv-2506.15787-b31b1b.svg)](https://arxiv.org/abs/2506.15787)
261
+
262
+
263
+ ## SLR-Bench Multilingual Versions:
264
+ [![SLR-Bench 🇬🇧](https://img.shields.io/badge/SLR--Bench-English-orange)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench)
265
+ [![SLR-Bench 🇩🇪](https://img.shields.io/badge/SLR--Bench-German-red)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-German)
266
+ [![SLR-Bench 🇪🇸](https://img.shields.io/badge/SLR--Bench-Spanish-yellow)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-Spanish)
267
+ [![SLR-Bench 🇪🇸](https://img.shields.io/badge/SLR--Bench-French-blue)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-French)
268
+ [![SLR-Bench 🇪🇸](https://img.shields.io/badge/SLR--Bench-Portuguese-darkred)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-Portuguese)
269
+ [![SLR-Bench 🇪🇸](https://img.shields.io/badge/SLR--Bench-Italian-darkblue)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-Italian)
270
+ [![SLR-Bench 🇪🇸](https://img.shields.io/badge/SLR--Bench-Dutch-darkorange)](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench-Dutch)
271
+
272
+ **SLR-Bench-Dutch** is the **Dutch-language pendant** of the original [**SLR-Bench**](https://huggingface.co/datasets/AIML-TUDA/SLR-Bench) dataset.
273
+ It follows the same symbolic structure, evaluation framework, and curriculum as the English version but provides all **natural-language task prompts translated into Dutch**.
274
+
275
+ This enables systematic evaluation and training of Large Language Models (LLMs) in logical reasoning in Dutch, supporting both *multilingual reasoning* and *cross-lingual generalization* research.
276
+
277
+ ## DS Overview
278
+ - **Curriculum:** 20 complexity levels, grouped into 4 broad tiers (basic, easy, medium, hard)
279
+ - **Tasks:** >19,000, each comprising: A *natural language* prompt, an executable *validation program* for automatic evaluation, and a *latent ground-truth rule*.
280
+ - **Application:** SLR-Bench can used to evaluate conventional and reasoning LLMs (e.g., GPT-4o, Llama-3, Gemini, DeepSeek-R1) and to train models via curriculum learning.
281
+
282
+
283
+ ## Key Features of SLR
284
+
285
+ - 🔨 **Automatic Task Generation:** Synthesize new inductive reasoning tasks with controllable complexity, novel logic rules, and natural language prompts—no need for human annotation.
286
+ - 🧩 **Programmable & Scalable:** Specify your own logic vocabulary, grammar, rule distributions, and task parameters; supports curriculum-style scaling and out-of-distribution task creation.
287
+ - 🧠 **Symbolic, Automated Evaluation:** Deterministically verify LLM outputs via the validation program, not MCQA, LLM judge, or exact matching.
288
+ - 📈 **Curriculum Learning:** Use SLR-Bench, a structured 20-level benchmark, for evaluating and training models across a span of logical challenges.
289
+
290
+ ---
291
+
292
+ ## Quick Start
293
+
294
+ ### Loading the Dataset
295
+ ```python
296
+ from datasets import load_dataset
297
+ # Load SLR-Bench test split
298
+ ds = load_dataset("AIML-TUDA/SLR-Bench-Dutch", "v1-All", split="test")
299
+ ```
300
+ ### Evaluate using SLR-Bench
301
+ Requires the [`evaluate`](https://huggingface.co/docs/evaluate/) library and a Prolog interpreter installed on your system (e.g., [SWI-Prolog](https://www.swi-prolog.org/)).
302
+ Install the required dependencies via:
303
+
304
+ ```bash
305
+ pip install evaluate
306
+ sudo apt-get install swi-prolog
307
+ ```
308
+
309
+ #### Example Usage
310
+
311
+ ```python
312
+ from evaluate import load
313
+ symbolic_judge = load("AIML-TUDA/VerifiableRewardsForScalableLogicalReasoning")
314
+ rules = ds["ground-truth rule"] # For demo only—use model predictions in practice
315
+ references = [
316
+ {
317
+ "validation_program": p,
318
+ "evaluation_config": {
319
+ "positive_predicate": "oost",
320
+ "negative_predicate": "west"
321
+ }
322
+ } for p in ds["validation program"]
323
+ ]
324
+
325
+ results = symbolic_judge.compute(predictions=rules, references=references)
326
+ print(results)
327
+ ```
328
+
329
+ *Note: For real evaluation, replace `rules` with your model's predicted rules. Here, we use ground-truth rules for demonstration only.*
330
+
331
+ Example results:
332
+ ```python
333
+ {'accuracy': 1.0,
334
+ 'partial_score': 1.0,
335
+ 'syntax_score': 1.0,
336
+ 'detailed_results': [{'is_correct': True,'partial_score': 1.0,'syntax_valid': True,'error': None,'exec_time1': 0.014362812042236328},
337
+ {'is_correct': True,'partial_score': 1.0,'syntax_valid': True,'error': None,'exec_time1': 0.012364625930786133}]
338
+ }
339
+ ```
340
+
341
+ ---
342
+
343
+ ## **Dataset Columns**
344
+
345
+ | Column Name | Type | Description |
346
+ |-----------------------------|-----------|-----------------------------------------------------------------------------------------------------------------------------|
347
+ | **id** | `int64` | Unique identifier for each dataset entry (row). |
348
+ | **prompt** | `string` | The instruction prompt of the logical reasoning task. |
349
+ | **ground-truth rule** | `string` | The latent logical rule that solves the given task. |
350
+ | **validation program** | `string` | The executable logic program used by the symbolic judge to verify candidate model solutions for the task. |
351
+ | **symbols** | `string` | Symbolic representation of the bckground knowledge |
352
+ | **curriculum level** | `int64` | The specific level (1-20) in the SLR-Bench curriculum that this task belongs to, reflecting difficulty. |
353
+ | **curriculum tier** | `string` | The broader difficulty tier grouping multiple levels (e.g., "basic", "easy", "medium", "hard"). |
354
+ | **rule sampling** | `string` | The policy or method used to generate the ground-truth rule (e.g., "uniform", "llm-guided"). |
355
+ | **rule complexity** | `string` | The length of the logic rule, counting the number of used predicates without the has_car predicate. |
356
+ | **background sampling** | `string` | The policy used to sample background knowledge for the task (e.g., "mirror", "uniform"). |
357
+ | **problem size** | `int64` | Total number of labeled examples (positive + negative) provided in the task instance. |
358
+ | **vocabulary predicates** | `int64` | Number of unique predicate symbols available in the vocabulary for constructing rules and background knowledge. |
359
+ | **vocabulary car constants**| `string` | List of car constant symbols (e.g., "car1", "car2", ...) available in the vocabulary for the task. |
360
+
361
+
362
+ ---
363
+ ## SLR-Bench Curriculum
364
+
365
+ | Stage | Level | #Consts | #Preds | κ (Problem Size) | Bπ (Background) | Rlen (Rule len) | Rsample (Rule Sample) | Comb. Size |
366
+ | --------- | ----- | ------- | ------ | ---------------- | --------------- | --------------- | --------------------- | ---------------- |
367
+ | **Basic** | 1 | 1 | 5 | 2 | mirror | 1 | uniform | 10³ |
368
+ | | 2 | 1 | 5 | 2 | mirror | 1-2 | uniform | 10³ |
369
+ | | 3 | 1 | 5 | 4 | mirror | 1-2 | uniform | 10⁵ |
370
+ | | 4 | 2 | 5 | 4 | mirror | 1-2 | uniform | 10¹⁰ |
371
+ | | 5 | 2 | 5 | 6 | mirror | 1-2 | uniform | 10¹⁶ |
372
+ | **Easy** | 6 | 2 | 5 | 6 | uniform | 1-2 | uniform/llm | 10¹⁶ |
373
+ | | 7 | 2 | 6 | 6 | uniform | 1-2 | uniform/llm | 10²⁴ |
374
+ | | 8 | 2-3 | 6 | 8 | uniform | 1-2 | uniform/llm | 10³² |
375
+ | | 9 | 2-3 | 6 | 10 | uniform | 2-3 | uniform/llm | 10⁴⁰ |
376
+ | | 10 | 2-3 | 7 | 12 | uniform | 2-3 | uniform/llm | 10⁵⁵ |
377
+ | **Medium**| 11 | 2-4 | 7 | 14 | uniform | 2-3 | uniform/llm | 10⁶⁵ |
378
+ | | 12 | 2-4 | 9 | 16 | uniform | 3-4 | uniform/llm | 10¹²⁰ |
379
+ | | 13 | 4-6 | 9 | 18 | uniform | 3-4 | uniform/llm | 10²⁷¹ |
380
+ | | 14 | 4-6 | 9 | 20 | uniform | 4-5 | uniform/llm | 10³⁰⁰ |
381
+ | | 15 | 4-6 | 9 | 22 | uniform | 4-5 | uniform/llm | 10³³⁰ |
382
+ | **Hard** | 16 | 5-6 | 10 | 24 | uniform | 4-5 | uniform/llm | 10⁵⁰⁷ |
383
+ | | 17 | 5-6 | 10 | 26 | uniform | 4-5 | uniform/llm | 10⁵⁴⁹ |
384
+ | | 18 | 5-6 | 12 | 28 | uniform | 4-5 | uniform/llm | 10⁸⁰⁵ |
385
+ | | 19 | 5-6 | 12 | 30 | uniform | 5 | uniform/llm | 10⁸⁶¹ |
386
+ | | 20 | 5-6 | 12 | 32 | uniform | 5 | uniform/llm | 10⁹¹⁹ |
387
+
388
+ *SLR-Bench Curriculum: level-wise configurations, detailing language and task parameters for each difficulty stage. Language complexity is systematically increased by expanding the number of car constants and predicates. Task configuration grows via adapting problem size, background sampling, rule length, and rule sampling strategy. The final column reports the approximate combinatorial size of unique tasks available at each level.*
389
+
390
+ ---
391
+
392
+
393
+ ## Licensing Information
394
+
395
+ SLR-Bench is made available under the [CC BY](https://creativecommons.org/licenses/by/4.0/) license.
396
+
397
+
398
+ ## Citation
399
+
400
+ If you use this dataset or framework, please cite:
401
+
402
+ ```bibtex
403
+ @incollection{helff2025slrautomatedsynthesisscalable,
404
+ title={SLR: Automated Synthesis for Scalable Logical Reasoning},
405
+ author={Lukas Helff and Ahmad Omar and Felix Friedrich and Antonia Wüst and Hikaru Shindo and Rupert Mitchell and Tim Woydt and Patrick Schramowski and Wolfgang Stammer and Kristian Kersting},
406
+ year={2025},
407
+ booktitle ={Working Notes of the NeurIPS Workshop on Foundations of Reasoning in Language Models},
408
+ url={https://arxiv.org/abs/2506.15787},
409
+ }
410
+ ```
411
+
412
  ---
v1-All/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b16b2a76cdcec33011121fc87ff1acb84e7a0ebc0692315f16fd143fb32161
3
+ size 9137521
v1-All/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:053b8a843100ddeb84c4450a43f66c78d53f7cb41f6779a72f181aa7ef4afcfd
3
+ size 13702818
v1-All/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b4ba3c78c59ada6a6dddfe7345c433611284d0e7c88f36776a2ea732b297f6
3
+ size 169316165
v1-All/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3781001fcbb25ed00609b123cd24a7af54b52b3a63aa8cd767051b2715eb9894
3
+ size 1859203
v1-Basic/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da5fd9c85cd3aa571c744d0aa68f94431b0c6d3c733fd9c1fd2ea8383f4b64e2
3
+ size 132948
v1-Basic/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53e67d24cbc940ca4422f625c47244bb3ef54f50a90f280a60c178f2a164a4f
3
+ size 1728871
v1-Basic/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a9581276d29ebe7f51aaab3d7f8534f51980a6a0385494b9c649ff4b28e66c3
3
+ size 52148
v1-Easy/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73d4acff094b6cf97673d56cbc0bf4105de1547c9c56b1d59b2d28d0ade9804
3
+ size 439068
v1-Easy/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2305cec92e1549bd4f3c37d7e6897e4c4a1d4dac96f2e697ea75df9c665176f3
3
+ size 8194586
v1-Easy/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9925fda4e8b45bae3264ea033c5f39b6d956646d0d0cc61d258f28a495ddabc0
3
+ size 111700
v1-Hard/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6848aa1f0f1075a05c7f7945afc786ffab5632e82fa2e9eaa1b20bdd298f68f
3
+ size 6217725
v1-Hard/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ccc1df8cd0fdecc005c3ba8c85d7c7c87202a1bc64974c87ae9bc07b28b3fd
3
+ size 52237224
v1-Hard/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c082c79298c3fe7d152097fde1cd9b34202f8e30d19d9049c47c326a86d57d
3
+ size 72282126
v1-Hard/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b1cadaa3a49cd538f8fbc0c86ab195d38b13c55bca7679fc806fe59b7d81ff
3
+ size 1260447
v1-Medium/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121c6ec15fd25b45cf60a773a5c87aa66d7ed426863a25a6138cc67b1218d3be
3
+ size 2420118
v1-Medium/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2f41953b597a5d5e985e42bced12857780546ccd2859bd7b32e33c0f4fc30ea
3
+ size 48601407
v1-Medium/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6514fc587d1b994e58d2d95f8f43173f0ad7feff530e27640d09f31020030135
3
+ size 508402