version: '3.8' services: # Main application with GPU support backgroundfx-gpu: build: context: .. dockerfile: docker/Dockerfile image: backgroundfx-pro:gpu container_name: backgroundfx-gpu runtime: nvidia environment: - NVIDIA_VISIBLE_DEVICES=0 - CUDA_VISIBLE_DEVICES=0 - GRADIO_SERVER_NAME=0.0.0.0 - GRADIO_SERVER_PORT=7860 - MODEL_CACHE_DIR=/app/models - TORCH_HOME=/app/models/.cache - LOG_LEVEL=INFO - MAX_WORKERS=4 volumes: - model-cache:/app/models - uploads:/app/uploads - outputs:/app/outputs - ./config:/app/config:ro ports: - "7860:7860" # Gradio UI - "8000:8000" # REST API networks: - backgroundfx-net healthcheck: test: ["CMD", "curl", "-f", "http://localhost:7860/health"] interval: 30s timeout: 10s retries: 3 start_period: 60s deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] limits: memory: 16G cpus: '8' restart: unless-stopped # CPU-only variant for development/testing backgroundfx-cpu: build: context: .. dockerfile: docker/Dockerfile.cpu image: backgroundfx-pro:cpu container_name: backgroundfx-cpu profiles: ["cpu"] environment: - GRADIO_SERVER_NAME=0.0.0.0 - GRADIO_SERVER_PORT=7860 - MODEL_CACHE_DIR=/app/models - TORCH_HOME=/app/models/.cache - DEVICE=cpu - LOG_LEVEL=INFO volumes: - model-cache:/app/models - uploads:/app/uploads - outputs:/app/outputs - ./config:/app/config:ro ports: - "7861:7860" # Different port for CPU version networks: - backgroundfx-net deploy: resources: limits: memory: 8G cpus: '4' restart: unless-stopped # Redis for caching and job queue redis: image: redis:7-alpine container_name: backgroundfx-redis command: redis-server --appendonly yes volumes: - redis-data:/data ports: - "6379:6379" networks: - backgroundfx-net healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 10s timeout: 5s retries: 5 restart: unless-stopped # Nginx reverse proxy nginx: image: nginx:alpine container_name: backgroundfx-nginx profiles: ["production"] volumes: - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro - ./nginx/ssl:/etc/nginx/ssl:ro - nginx-cache:/var/cache/nginx ports: - "80:80" - "443:443" networks: - backgroundfx-net depends_on: - backgroundfx-gpu restart: unless-stopped # Model downloader service model-downloader: build: context: .. dockerfile: docker/Dockerfile.models image: backgroundfx-pro:models container_name: backgroundfx-models profiles: ["setup"] environment: - MODEL_DIR=/models volumes: - model-cache:/models command: ["python", "download_models.py", "--all"] networks: - backgroundfx-net # Monitoring with Prometheus prometheus: image: prom/prometheus:latest container_name: backgroundfx-prometheus profiles: ["monitoring"] volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro - prometheus-data:/prometheus ports: - "9090:9090" networks: - backgroundfx-net restart: unless-stopped # Grafana for visualization grafana: image: grafana/grafana:latest container_name: backgroundfx-grafana profiles: ["monitoring"] environment: - GF_SECURITY_ADMIN_PASSWORD=admin - GF_INSTALL_PLUGINS=redis-datasource volumes: - grafana-data:/var/lib/grafana - ./monitoring/grafana:/etc/grafana/provisioning:ro ports: - "3000:3000" networks: - backgroundfx-net depends_on: - prometheus restart: unless-stopped networks: backgroundfx-net: driver: bridge volumes: model-cache: driver: local uploads: driver: local outputs: driver: local redis-data: driver: local nginx-cache: driver: local prometheus-data: driver: local grafana-data: driver: local