import gradio as gr from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration # load mode model_name = "openai/whisper-medium" processor = WhisperProcessor.from_pretrained(model_name,language="lo") model = WhisperForConditionalGeneration.from_pretrained(model_dir) asr = pipeline( "automatic-speech-recognition", model= "LuoYiSULIXAY/whisper-lao-finetuned_laonlp", tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, device=-1 ) def transcribe(audio): result = asr(audio, generate_kwargs={"language": "lao", "task": "transcribe"}) return result["text"] demo = gr.Interface( fn=transcribe, inputs=gr.Audio(type="filepath",streaming=False), # ✅ 正确写法 outputs="text", title="Whisper Lao", description="Realtime demo for Lao speech recognition using a fine-tuned Whisper model.", ) demo.launch(share=True)