| read -p "Enter config (0 for fine-tuned, 1 for base): " CONFIG | |
| export CONFIG=${CONFIG:-0} | |
| if [ $CONFIG -eq 0 ]; then | |
| export CUDA_VISIBLE_DEVICES=1 | |
| export MODEL_NAME="/home/data2/sltian/code/evaluation_agent_dev/LLaMA-Factory/saves/qwen2.5-3b/eval-agent-vbench-base-table" #"ea-dev/eval-agent_qwen2.5-3b_instruct_ckpt471_base" | |
| export PORT=12333 | |
| export GPU_MEMORY_UTILIZATION=0.7 | |
| else | |
| export CUDA_VISIBLE_DEVICES=2 | |
| export MODEL_NAME="qwen/Qwen2.5-3B-Instruct" | |
| export PORT=12334 | |
| export GPU_MEMORY_UTILIZATION=0.7 | |
| fi | |
| # Launch fine-tuned Qwen2.5-3B eval agent model server | |
| echo "Starting fine-tuned Qwen2.5-3B eval agent server on 0.0.0.0:${PORT}..." | |
| echo "Model: ${MODEL_NAME}" | |
| echo "GPU Memory Utilization: ${GPU_MEMORY_UTILIZATION}" | |
| python -m vllm.entrypoints.openai.api_server \ | |
| --model ${MODEL_NAME} \ | |
| --host 0.0.0.0 \ | |
| --port ${PORT} \ | |
| --gpu-memory-utilization ${GPU_MEMORY_UTILIZATION} \ | |
| --trust-remote-code true \ | |
| --max-model-len 16384 \ | |
| --served-model-name eval-agent |