llm_fastapi / Dockerfile
sreejith8100's picture
Update Dockerfile
abbeb5a verified
# Base image with PyTorch 2.4.0 + CUDA 12.1
FROM pytorch/pytorch:2.4.0-cuda12.1-cudnn9-runtime
# Install system dependencies
RUN apt-get update && apt-get install -y wget git && rm -rf /var/lib/apt/lists/*
# Add non-root user
RUN useradd -m -u 1000 user
# Switch to non-root user
USER user
WORKDIR /app
# Environment variables
ENV PATH="/home/user/.local/bin:$PATH"
ENV TRANSFORMERS_CACHE=/home/user/.cache/huggingface
ENV TORCH_CUDA_ARCH_LIST="8.0+PTX"
ENV MODEL_DIR=/app/models/minicpmv
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --upgrade pip setuptools wheel
RUN pip install --no-cache-dir -r requirements.txt
# Download MiniCPM-V-4 model at build time
RUN python -c "\
from huggingface_hub import snapshot_download; \
snapshot_download('openbmb/MiniCPM-V-4', local_dir='/app/models/minicpmv', local_dir_use_symlinks=False) \
"
# Copy app code
COPY . .
# Expose port
EXPOSE 7860
# Start FastAPI
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]