Spaces:
Sleeping
Sleeping
| # import gradio as gr | |
| # gr.load("models/openai/whisper-large-v3").launch() | |
| import gradio as gr | |
| import whisper | |
| # Load the Whisper model | |
| model = whisper.load_model("large") | |
| def transcribe_audio(audio): | |
| # Load the audio file | |
| audio = whisper.load_audio(audio) | |
| # Transcribe the audio using Whisper | |
| result = model.transcribe(audio) | |
| return result["text"] | |
| # Create a Gradio interface with both microphone and file upload inputs | |
| interface = gr.Interface( | |
| fn=transcribe_audio, | |
| inputs=[ | |
| gr.Audio(source="microphone", type="filepath", label="Record using Microphone"), | |
| gr.Audio(source="upload", type="filepath", label="Upload Audio File") | |
| ], | |
| outputs="text", | |
| live=True, | |
| description="Speak into your microphone or upload an audio file to see the transcription in real-time." | |
| ) | |
| # Launch the Gradio app | |
| interface.launch() | |