streaming-speech / omni_speech /infer /scripts /infer_llm_lora_multiturn.sh
NMCxyz's picture
Add files using upload-large-folder tool
b4f5cb4 verified
#!/bin/bash
question_file=examples/multiturn/question.json
answer_file=examples/multiturn/answer.json
model_base=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/llm/Qwen2.5-3B-Instruct
model_path=/data1/speech/anhnmt2/Speech2Speech/half-streaming-speech-nlp/checkpoints/omni_whisper-medium_Qwen2.5-3B_pretrained-sft-lora
prompt_version=qwen
python3 infer_llm_multiturn.py \
--model-path $model_path \
--model-base $model_base \
--question-file $question_file \
--answer-file $answer_file \
--num-chunks 1 \
--chunk-idx 0 \
--temperature 0 \
--conv-mode $prompt_version \
--input_type mel \
--mel_size 80 \
--is_lora