Update README.md
Browse files
README.md
CHANGED
|
@@ -41,7 +41,7 @@ import torchaudio
|
|
| 41 |
from datasets import load_dataset
|
| 42 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
| 43 |
|
| 44 |
-
test_dataset = load_dataset("common_voice", "fr")
|
| 45 |
processor = Wav2Vec2Processor.from_pretrained("Nhut/wav2vec2-large-xlsr-french")
|
| 46 |
model = Wav2Vec2ForCTC.from_pretrained("Nhut/wav2vec2-large-xlsr-french")
|
| 47 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
|
@@ -108,11 +108,11 @@ result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
|
| 108 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
| 109 |
```
|
| 110 |
|
| 111 |
-
**Test Result**:
|
| 112 |
## Training
|
| 113 |
|
| 114 |
V1 of the Common Voice `train`, `validation` datasets were used for training.
|
| 115 |
|
| 116 |
## Testing
|
| 117 |
|
| 118 |
-
V6.1 of the Common Voice `Test` dataset were used for training.
|
|
|
|
| 41 |
from datasets import load_dataset
|
| 42 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
| 43 |
|
| 44 |
+
test_dataset = load_dataset("common_voice", "fr", split="test[:20%]")
|
| 45 |
processor = Wav2Vec2Processor.from_pretrained("Nhut/wav2vec2-large-xlsr-french")
|
| 46 |
model = Wav2Vec2ForCTC.from_pretrained("Nhut/wav2vec2-large-xlsr-french")
|
| 47 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
|
|
|
| 108 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
| 109 |
```
|
| 110 |
|
| 111 |
+
**Test Result**: 29.31 %
|
| 112 |
## Training
|
| 113 |
|
| 114 |
V1 of the Common Voice `train`, `validation` datasets were used for training.
|
| 115 |
|
| 116 |
## Testing
|
| 117 |
|
| 118 |
+
20% of V6.1 of the Common Voice `Test` dataset were used for training.
|