#!/bin/bash question_file=examples/asr/test-clean/question.json answer_file=examples/asr/test-clean/answer_full_data.json model_path=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/llm/Qwen2.5-3B-Instruct speech_encoder_path=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/speech_encoder/whisper-medium speech_projector_path=/data1/speech/anhnmt2/Speech2Speech/half-streaming-speech-nlp/checkpoints/omni_whisper-medium_Qwen2.5-3B_pretrained-asr-full-data-augment-0.2/checkpoint-56000/speech_projector.bin prompt_version=qwen # vietnamese ver # question_file=examples/asr/viet-bud/question.json # answer_file=examples/asr/viet-bud/answer.json # model_path=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/llm/Qwen2.5-1.5B-Instruct # speech_encoder_path=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/speech_encoder/PhoWhisper-medium # speech_projector_path=/data1/speech/thinhnt/Llama-Omni/half_streaming/checkpoints/qwen1.5B-phowhisper_medium-pretrained_continue/checkpoint-80000/speech_projector.bin python3 ../infer_asr.py \ --model-path $model_path \ --speech-encoder-path $speech_encoder_path \ --speech-projector-path $speech_projector_path \ --question-file $question_file \ --answer-file $answer_file \ --num-chunks 1 \ --chunk-idx 0 \ --temperature 0 \ --conv-mode $prompt_version \ --input_type mel \ --mel_size 80 python3 ../get_wer.py $answer_file