from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC import gradio as gr import torch import soundfile as sf import librosa # load model and processor model_name = "OthmaneJ/distil-wav2vec2" processor = Wav2Vec2Processor.from_pretrained(model_name) model = Wav2Vec2ForCTC.from_pretrained(model_name) # define function to read in sound file # def map_to_array(file): # speech, sample_rate = sf.read(file) # return speech, sample_rate # tokenize def inference(audio): # read in sound file speech, _ = librosa.load(audio.name,sr=16_000) input_values = processor(speech, sampling_rate=16_000, return_tensors="pt", padding="longest").input_values # Batch size 1 # retrieve logits logits = model(input_values).logits # take argmax and decode predicted_ids = torch.argmax(logits, dim=-1) transcription = processor.batch_decode(predicted_ids) return transcription[0] inputs = gr.inputs.Audio(label="Input Audio", type="file") outputs = gr.outputs.Textbox(label="Output Text") inputs = gr.inputs.Audio(label="Input Audio", type="file") outputs = [gr.outputs.Textbox(label="Output Text"),gr.outputs.Textbox(label="Output Text")] title = "Distilled wav2vec 2.0" description = "Gradio demo for Robust wav2vec 2.0. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files" article = "
Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training | Github Repo
" examples=[['poem.wav']] gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()