Skip to main content

Docker Compose Files

Ready-to-use Docker Compose templates for running various services and applications. Includes networking, volume management, and resource optimization for home labs.

Traefik Reverse Proxy

Production-ready Traefik setup with automatic SSL, Docker provider, dashboard, and health endpoint for monitoring.

traefik.yml
services:
  traefik:
    image: traefik:v3.0
    container_name: traefik
    restart: unless-stopped
    security_opt:
      - no-new-privileges:true
    command:
      - "--api.dashboard=true"
      - "--ping=true"
      - "--providers.docker=true"
      - "--providers.docker.exposedbydefault=false"
      - "--entrypoints.web.address=:80"
      - "--entrypoints.websecure.address=:443"
      - "[email protected]"
      - "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
      - "--certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web"
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:8080/ping"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "1.0"
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - "/var/run/docker.sock:/var/run/docker.sock:ro"
      - "./letsencrypt:/letsencrypt"
    networks:
      - traefik-public
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.dashboard.rule=Host(`traefik.example.com`)"
      - "traefik.http.routers.dashboard.service=api@internal"
      - "traefik.http.routers.dashboard.tls.certresolver=letsencrypt"

networks:
  traefik-public:
    external: true
traefikreverse-proxyssldocker

Monitoring Stack

Prometheus, Grafana, and Node Exporter for comprehensive homelab monitoring with healthchecks and resource limits.

monitoring.yml
services:
  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    restart: unless-stopped
    volumes:
      - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
      - prometheus_data:/prometheus
    command:
      - "--config.file=/etc/prometheus/prometheus.yml"
      - "--storage.tsdb.path=/prometheus"
      - "--storage.tsdb.retention.time=30d"
      - "--web.enable-lifecycle"
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:9090/-/healthy"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "1.0"
    ports:
      - "9090:9090"
    networks:
      - monitoring

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    restart: unless-stopped
    volumes:
      - grafana_data:/var/lib/grafana
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=changeme
      - GF_USERS_ALLOW_SIGN_UP=false
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:3000/api/health"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 20s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    ports:
      - "3000:3000"
    networks:
      - monitoring
    depends_on:
      prometheus:
        condition: service_healthy

  node-exporter:
    image: prom/node-exporter:latest
    container_name: node-exporter
    restart: unless-stopped
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - "--path.procfs=/host/proc"
      - "--path.sysfs=/host/sys"
      - "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:9100/metrics"]
      interval: 30s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 128M
          cpus: "0.25"
    ports:
      - "9100:9100"
    networks:
      - monitoring

volumes:
  prometheus_data:
  grafana_data:

networks:
  monitoring:
    driver: bridge
prometheusgrafanamonitoringmetrics

Media Server Stack

Complete arr-stack with Plex, Sonarr, Radarr, Prowlarr, and qBittorrent on a shared download network.

media-stack.yml
services:
  plex:
    image: lscr.io/linuxserver/plex:latest
    container_name: plex
    restart: unless-stopped
    network_mode: host
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
      - VERSION=docker
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:32400/identity"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "2.0"
    volumes:
      - plex_config:/config
      - /mnt/media/tv:/tv
      - /mnt/media/movies:/movies

  sonarr:
    image: lscr.io/linuxserver/sonarr:latest
    container_name: sonarr
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8989/ping"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - sonarr_config:/config
      - /mnt/media/tv:/tv
      - /mnt/downloads:/downloads
    ports:
      - "8989:8989"
    networks:
      - media

  radarr:
    image: lscr.io/linuxserver/radarr:latest
    container_name: radarr
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:7878/ping"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - radarr_config:/config
      - /mnt/media/movies:/movies
      - /mnt/downloads:/downloads
    ports:
      - "7878:7878"
    networks:
      - media

  prowlarr:
    image: lscr.io/linuxserver/prowlarr:latest
    container_name: prowlarr
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9696/ping"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - prowlarr_config:/config
    ports:
      - "9696:9696"
    networks:
      - media

  qbittorrent:
    image: lscr.io/linuxserver/qbittorrent:latest
    container_name: qbittorrent
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
      - WEBUI_PORT=8080
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8080"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - qbittorrent_config:/config
      - /mnt/downloads:/downloads
    ports:
      - "8080:8080"
      - "6881:6881"
      - "6881:6881/udp"
    networks:
      - media

volumes:
  plex_config:
  sonarr_config:
  radarr_config:
  prowlarr_config:
  qbittorrent_config:

networks:
  media:
    driver: bridge
plexsonarrradarrmedia-server

Homepage Dashboard

Beautiful, customizable dashboard for your homelab with service integrations and Docker socket autodiscovery.

homepage.yml
services:
  homepage:
    image: ghcr.io/gethomepage/homepage:latest
    container_name: homepage
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:3000"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - ./homepage/config:/app/config
      - /var/run/docker.sock:/var/run/docker.sock:ro
    ports:
      - "3001:3000"
    networks:
      - traefik-public
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.homepage.rule=Host(`dash.example.com`)"
      - "traefik.http.routers.homepage.tls.certresolver=letsencrypt"
      - "traefik.http.services.homepage.loadbalancer.server.port=3000"

networks:
  traefik-public:
    external: true
dashboardhomepagehomelabmonitoring

Pi-hole DNS

Network-wide ad blocking with Pi-hole. Includes Unbound for recursive DNS resolution.

pihole.yml
services:
  pihole:
    image: pihole/pihole:latest
    container_name: pihole
    restart: unless-stopped
    hostname: pihole
    environment:
      - TZ=America/Denver
      - WEBPASSWORD=changeme
      - PIHOLE_DNS_=127.0.0.1#5335
      - DNSSEC=true
    healthcheck:
      test: ["CMD", "dig", "+short", "+norecurse", "+retry=0", "@127.0.0.1", "pi.hole"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.5"
    volumes:
      - ./pihole/etc-pihole:/etc/pihole
      - ./pihole/etc-dnsmasq.d:/etc/dnsmasq.d
    ports:
      - "53:53/tcp"
      - "53:53/udp"
      - "8080:80/tcp"
    cap_add:
      - NET_ADMIN
    networks:
      - dns

  unbound:
    image: mvance/unbound:latest
    container_name: unbound
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "drill", "@127.0.0.1", "cloudflare.com"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - ./unbound:/opt/unbound/etc/unbound
    ports:
      - "5335:53/tcp"
      - "5335:53/udp"
    networks:
      - dns

networks:
  dns:
    driver: bridge
piholednsad-blockingunbound

Gitea Code Hosting

Self-hosted Git service with PostgreSQL backend, SSH access, healthchecks, and service-healthy dependencies.

gitea.yml
services:
  gitea:
    image: gitea/gitea:latest
    container_name: gitea
    restart: unless-stopped
    environment:
      - USER_UID=1000
      - USER_GID=1000
      - GITEA__database__DB_TYPE=postgres
      - GITEA__database__HOST=gitea-db:5432
      - GITEA__database__NAME=gitea
      - GITEA__database__USER=gitea
      - GITEA__database__PASSWD=gitea_password
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - ./gitea/data:/data
      - /etc/timezone:/etc/timezone:ro
      - /etc/localtime:/etc/localtime:ro
    ports:
      - "3000:3000"
      - "2222:22"
    networks:
      - gitea
    depends_on:
      gitea-db:
        condition: service_healthy

  gitea-db:
    image: postgres:15-alpine
    container_name: gitea-db
    restart: unless-stopped
    environment:
      - POSTGRES_USER=gitea
      - POSTGRES_PASSWORD=gitea_password
      - POSTGRES_DB=gitea
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U gitea"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - ./gitea/postgres:/var/lib/postgresql/data
    networks:
      - gitea

networks:
  gitea:
    driver: bridge
giteagitcode-hostingpostgres

Authelia SSO

Full single sign-on and 2FA gateway with Redis session storage. Plugs into Traefik as forward-auth middleware.

authelia.yml
services:
  authelia:
    image: authelia/authelia:latest
    container_name: authelia
    restart: unless-stopped
    environment:
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:9091/api/health"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - ./authelia/config:/config
    ports:
      - "9091:9091"
    networks:
      - traefik-public
    depends_on:
      authelia-redis:
        condition: service_healthy
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.authelia.rule=Host(`auth.example.com`)"
      - "traefik.http.routers.authelia.tls.certresolver=letsencrypt"
      - "traefik.http.services.authelia.loadbalancer.server.port=9091"
      - "traefik.http.middlewares.authelia.forwardAuth.address=http://authelia:9091/api/authz/forward-auth"
      - "traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true"
      - "traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Email,Remote-Name"

  authelia-redis:
    image: redis:7-alpine
    container_name: authelia-redis
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - authelia_redis_data:/data
    networks:
      - traefik-public

volumes:
  authelia_redis_data:

networks:
  traefik-public:
    external: true
autheliasso2fasecurity

Immich

Self-hosted Google Photos replacement with machine learning for face detection, object recognition, and smart search.

immich.yml
services:
  immich-server:
    image: ghcr.io/immich-app/immich-server:release
    container_name: immich-server
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - DB_HOSTNAME=immich-postgres
      - DB_USERNAME=immich
      - DB_PASSWORD=immich_db_password
      - DB_DATABASE_NAME=immich
      - REDIS_HOSTNAME=immich-redis
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:2283/api/server/ping"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "2.0"
    volumes:
      - /mnt/photos/upload:/usr/src/app/upload
      - /etc/localtime:/etc/localtime:ro
    ports:
      - "2283:2283"
    networks:
      - immich
    depends_on:
      immich-redis:
        condition: service_healthy
      immich-postgres:
        condition: service_healthy

  immich-machine-learning:
    image: ghcr.io/immich-app/immich-machine-learning:release
    container_name: immich-machine-learning
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:3003/ping')"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 60s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "2.0"
    volumes:
      - immich_ml_cache:/cache
    networks:
      - immich

  immich-redis:
    image: redis:7-alpine
    container_name: immich-redis
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    networks:
      - immich

  immich-postgres:
    image: tensorchord/pgvecto-rs:pg16-v0.2.0
    container_name: immich-postgres
    restart: unless-stopped
    environment:
      - POSTGRES_USER=immich
      - POSTGRES_PASSWORD=immich_db_password
      - POSTGRES_DB=immich
      - POSTGRES_INITDB_ARGS="--data-checksums"
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U immich"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - immich_postgres_data:/var/lib/postgresql/data
    networks:
      - immich

volumes:
  immich_ml_cache:
  immich_postgres_data:

networks:
  immich:
    driver: bridge
immichphotosbackupmedia

Vaultwarden

Lightweight Bitwarden-compatible password manager with SMTP email, websocket push, admin panel, and automated backup sidecar.

vaultwarden.yml
services:
  vaultwarden:
    image: vaultwarden/server:latest
    container_name: vaultwarden
    restart: unless-stopped
    environment:
      # Admin panel — generate with: echo -n 'your-password' | argon2 randomsalt -e
      - ADMIN_TOKEN=$$argon2id$$v=19$$m=65540,t=3,p=4$$bXlzYWx0$$hash
      - WEBSOCKET_ENABLED=true
      - SIGNUPS_ALLOWED=false
      - IP_HEADER=X-Forwarded-For
      - LOG_FILE=/data/vaultwarden.log
      - LOG_LEVEL=warn
      # SMTP for emergency access, email 2FA, and org invites
      - SMTP_HOST=10.42.0.5
      - SMTP_PORT=587
      - SMTP_SECURITY=starttls
      - [email protected]
      - SMTP_FROM_NAME=Vaultwarden
      - [email protected]
      - SMTP_PASSWORD=smtp_password_here
      # Push notifications for live sync
      - PUSH_ENABLED=true
      - PUSH_INSTALLATION_ID=your-installation-id
      - PUSH_INSTALLATION_KEY=your-installation-key
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:80/alive"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - vaultwarden_data:/data
      - vaultwarden_backups:/backups
    ports:
      - "8222:80"
      - "3012:3012"
    networks:
      - traefik-public
    labels:
      - "traefik.enable=true"
      # Main web UI
      - "traefik.http.routers.vaultwarden.rule=Host(`vault.example.com`)"
      - "traefik.http.routers.vaultwarden.tls.certresolver=letsencrypt"
      - "traefik.http.services.vaultwarden.loadbalancer.server.port=80"
      # Websocket route for live sync
      - "traefik.http.routers.vaultwarden-ws.rule=Host(`vault.example.com`) && Path(`/notifications/hub`)"
      - "traefik.http.routers.vaultwarden-ws.tls.certresolver=letsencrypt"
      - "traefik.http.services.vaultwarden-ws.loadbalancer.server.port=3012"

  # Backup sidecar — runs nightly at 3 AM
  # Backs up the SQLite database using the .backup command
  # which creates a consistent snapshot without locking
  #
  # vaultwarden-backup:
  #   image: bruceforce/vaultwarden-backup:latest
  #   container_name: vaultwarden-backup
  #   restart: unless-stopped
  #   environment:
  #     - CRON_TIME=0 3 * * *
  #     - TIMESTAMP=true
  #     - DELETE_AFTER=30
  #   volumes:
  #     - vaultwarden_data:/data:ro
  #     - vaultwarden_backups:/backups
  #   depends_on:
  #     vaultwarden:
  #       condition: service_healthy
  #
  # Manual backup command:
  #   docker exec vaultwarden sqlite3 /data/db.sqlite3 ".backup '/backups/backup.sqlite3'"

volumes:
  vaultwarden_data:
  vaultwarden_backups:

networks:
  traefik-public:
    external: true
vaultwardenbitwardenpasswordssecurity

Uptime Kuma

Slick status monitoring dashboard with HTTP, TCP, DNS, ping checks, push monitors, and notification provider integrations.

uptime-kuma.yml
services:
  uptime-kuma:
    image: louislam/uptime-kuma:latest
    container_name: uptime-kuma
    restart: unless-stopped
    environment:
      # Allow embedding in iframes (Homepage, Grafana dashboards)
      - UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN=true
    healthcheck:
      test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3001"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.5"
    logging:
      driver: json-file
      options:
        max-size: "10m"
        max-file: "3"
    volumes:
      - uptime_kuma_data:/app/data
    ports:
      - "3002:3001"
    networks:
      - monitoring
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.uptime.rule=Host(`status.example.com`)"
      - "traefik.http.routers.uptime.tls.certresolver=letsencrypt"
      - "traefik.http.services.uptime.loadbalancer.server.port=3001"

# Push monitor setup (after first launch):
#   1. Add monitor > type "Push" > copy the push URL
#   2. From any server, cron a heartbeat:
#      */5 * * * * curl -s "http://10.42.0.10:3002/api/push/<token>?status=up&msg=OK"
#
# Notification providers worth setting up:
#   - Ntfy (self-hosted push):  ntfy://10.42.0.15/alerts
#   - SMTP (email fallback):    smtp://10.42.0.5:587
#   - Gotify:                   gotify://10.42.0.20/message?token=<key>
#   - Slack/Discord webhooks:   paste the webhook URL directly
#
# Group monitors by tag for status page sections:
#   Infrastructure, Networking, Media, Databases

volumes:
  uptime_kuma_data:

networks:
  monitoring:
    driver: bridge
uptime-kumamonitoringstatus

Jellyfin

Free and open-source media server. Full GPU transcoding support, no license keys or tracking.

jellyfin.yml
services:
  jellyfin:
    image: jellyfin/jellyfin:latest
    container_name: jellyfin
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8096/health"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "2.0"
    volumes:
      - jellyfin_config:/config
      - jellyfin_cache:/cache
      - /mnt/media/tv:/media/tv
      - /mnt/media/movies:/media/movies
      - /mnt/media/music:/media/music
    ports:
      - "8096:8096"
    networks:
      - media
    # GPU passthrough for hardware transcoding:
    # NVIDIA — uncomment the section below
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: nvidia
    #           count: 1
    #           capabilities: [gpu]
    # Intel QSV / VAAPI — uncomment the device line below
    # devices:
    #   - /dev/dri:/dev/dri

volumes:
  jellyfin_config:
  jellyfin_cache:

networks:
  media:
    driver: bridge
jellyfinmedia-serverstreaming

Paperless-ngx

Document management system with OCR, tagging, and full-text search. Tika and Gotenberg handle PDF and Office docs. Network-segmented frontend/backend.

paperless-ngx.yml
services:
  paperless:
    image: ghcr.io/paperless-ngx/paperless-ngx:latest
    container_name: paperless
    restart: unless-stopped
    environment:
      - PAPERLESS_REDIS=redis://paperless-redis:6379
      - PAPERLESS_DBHOST=paperless-postgres
      - PAPERLESS_DBUSER=paperless
      - PAPERLESS_DBPASS=paperless_db_password
      - PAPERLESS_TIKA_ENABLED=1
      - PAPERLESS_TIKA_GOTENBERG_ENDPOINT=http://gotenberg:3000
      - PAPERLESS_TIKA_ENDPOINT=http://tika:9998
      - PAPERLESS_OCR_LANGUAGE=eng
      - PAPERLESS_TIME_ZONE=America/Denver
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8000"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "1.0"
    volumes:
      - paperless_data:/usr/src/paperless/data
      - paperless_media:/usr/src/paperless/media
      - ./paperless/export:/usr/src/paperless/export
      - ./paperless/consume:/usr/src/paperless/consume
    ports:
      - "8000:8000"
    networks:
      - paperless-frontend
      - paperless-backend
    depends_on:
      paperless-redis:
        condition: service_healthy
      paperless-postgres:
        condition: service_healthy
      gotenberg:
        condition: service_started
      tika:
        condition: service_started

  paperless-redis:
    image: redis:7-alpine
    container_name: paperless-redis
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    networks:
      - paperless-backend

  paperless-postgres:
    image: postgres:16-alpine
    container_name: paperless-postgres
    restart: unless-stopped
    environment:
      - POSTGRES_USER=paperless
      - POSTGRES_PASSWORD=paperless_db_password
      - POSTGRES_DB=paperless
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U paperless"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - paperless_postgres_data:/var/lib/postgresql/data
    networks:
      - paperless-backend

  gotenberg:
    image: gotenberg/gotenberg:8
    container_name: gotenberg
    restart: unless-stopped
    command:
      - "gotenberg"
      - "--chromium-disable-javascript=true"
      - "--chromium-allow-list=file:///tmp/.*"
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
      interval: 30s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    networks:
      - paperless-backend

  tika:
    image: apache/tika:latest
    container_name: tika
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:9998/"]
      interval: 30s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    networks:
      - paperless-backend

volumes:
  paperless_data:
  paperless_media:
  paperless_postgres_data:

# Network segmentation: reverse proxy reaches paperless
# on the frontend network. Databases, Redis, Tika, and
# Gotenberg live on the backend network. The proxy cannot
# reach the database directly.
networks:
  paperless-frontend:
    driver: bridge
  paperless-backend:
    driver: bridge
paperlessdocumentsocr

Nextcloud

Self-hosted cloud platform with file sync, calendar, contacts, and office suite. FPM variant with Nginx sidecar, network-segmented frontend/backend.

nextcloud.yml
services:
  nextcloud:
    image: nextcloud:fpm-alpine
    container_name: nextcloud
    restart: unless-stopped
    environment:
      - MYSQL_HOST=nextcloud-db
      - MYSQL_DATABASE=nextcloud
      - MYSQL_USER=nextcloud
      - MYSQL_PASSWORD=nextcloud_db_password
      - REDIS_HOST=nextcloud-redis
      - NEXTCLOUD_TRUSTED_DOMAINS=cloud.example.com
    healthcheck:
      test: ["CMD-SHELL", "php -r 'exit(file_get_contents("http://localhost/status.php") ? 0 : 1);' || exit 1"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 60s
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: "1.5"
    volumes:
      - nextcloud_html:/var/www/html
      - nextcloud_data:/var/www/html/data
    networks:
      - nextcloud-frontend
      - nextcloud-backend
    depends_on:
      nextcloud-db:
        condition: service_healthy
      nextcloud-redis:
        condition: service_healthy

  nextcloud-nginx:
    image: nginx:alpine
    container_name: nextcloud-nginx
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:80"]
      interval: 30s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 128M
          cpus: "0.25"
    volumes:
      - nextcloud_html:/var/www/html:ro
      - ./nextcloud/nginx.conf:/etc/nginx/nginx.conf:ro
    ports:
      - "8081:80"
    networks:
      - nextcloud-frontend
      - traefik-public
    depends_on:
      nextcloud:
        condition: service_healthy
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.nextcloud.rule=Host(`cloud.example.com`)"
      - "traefik.http.routers.nextcloud.tls.certresolver=letsencrypt"
      - "traefik.http.services.nextcloud.loadbalancer.server.port=80"

  nextcloud-db:
    image: mariadb:11
    container_name: nextcloud-db
    restart: unless-stopped
    command: --transaction-isolation=READ-COMMITTED --log-bin=binlog --binlog-format=ROW
    environment:
      - MYSQL_ROOT_PASSWORD=nextcloud_root_password
      - MYSQL_DATABASE=nextcloud
      - MYSQL_USER=nextcloud
      - MYSQL_PASSWORD=nextcloud_db_password
    healthcheck:
      test: ["CMD-SHELL", "healthcheck.sh --connect --innodb_initialized"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - nextcloud_db_data:/var/lib/mysql
    networks:
      - nextcloud-backend

  nextcloud-redis:
    image: redis:7-alpine
    container_name: nextcloud-redis
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    networks:
      - nextcloud-backend

volumes:
  nextcloud_html:
  nextcloud_data:
  nextcloud_db_data:

# Network segmentation: Nginx and Nextcloud share the
# frontend network with Traefik. MariaDB and Redis are
# isolated on the backend network. Traefik cannot reach
# the database directly — only Nextcloud bridges both.
networks:
  nextcloud-frontend:
    driver: bridge
  nextcloud-backend:
    driver: bridge
  traefik-public:
    external: true
nextcloudcloudfilesoffice

WireGuard VPN

Fast kernel-level VPN server using the LinuxServer image. Auto-generates peer configs and QR codes.

wireguard.yml
services:
  wireguard:
    image: lscr.io/linuxserver/wireguard:latest
    container_name: wireguard
    restart: unless-stopped
    cap_add:
      - NET_ADMIN
      - SYS_MODULE
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
      - SERVERURL=vpn.example.com
      - SERVERPORT=51820
      - PEERS=phone,laptop,tablet
      - PEERDNS=10.42.0.1
      - INTERNAL_SUBNET=10.13.13.0
      - ALLOWEDIPS=0.0.0.0/0
    healthcheck:
      test: ["CMD-SHELL", "wg show | grep -q interface || exit 1"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 128M
          cpus: "0.25"
    volumes:
      - wireguard_config:/config
      - /lib/modules:/lib/modules:ro
    ports:
      - "51820:51820/udp"
    sysctls:
      - net.ipv4.conf.all.src_valid_mark=1

volumes:
  wireguard_config:
wireguardvpnnetworking

Actual Budget

Privacy-first personal finance app with Postgres server-mode sync, file size limits, automated backups, and healthcheck.

actual-budget.yml
services:
  actual-budget:
    image: actualbudget/actual-server:latest
    container_name: actual-budget
    restart: unless-stopped
    environment:
      - ACTUAL_UPLOAD_FILE_SYNC_SIZE_LIMIT_MB=20
      - ACTUAL_UPLOAD_FILE_SIZE_LIMIT_MB=20
    healthcheck:
      test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5006/"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    logging:
      driver: json-file
      options:
        max-size: "10m"
        max-file: "3"
    ports:
      - "5006:5006"
    volumes:
      - actual_data:/data
      - actual_backups:/data/backups
    networks:
      - actual
    depends_on:
      actual-db:
        condition: service_healthy

  actual-db:
    image: postgres:16-alpine
    container_name: actual-db
    restart: unless-stopped
    environment:
      - POSTGRES_USER=actual
      - POSTGRES_PASSWORD=actual_db_password
      - POSTGRES_DB=actual
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U actual"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.25"
    volumes:
      - actual_postgres_data:/var/lib/postgresql/data
    networks:
      - actual

  # Automated SQLite backup sidecar — runs nightly at 2 AM
  # Copies the user-files directory which contains per-user
  # SQLite databases. Keeps 14 days of snapshots.
  #
  # actual-backup:
  #   image: alpine:latest
  #   container_name: actual-backup
  #   restart: unless-stopped
  #   entrypoint: /bin/sh
  #   command: >
  #     -c 'while true; do
  #       sleep 1d;
  #       STAMP=$$(date +%Y%m%d-%H%M%S);
  #       cp -r /data/user-files /backups/user-files-$$STAMP;
  #       find /backups -maxdepth 1 -mtime +14 -exec rm -rf {} +;
  #       echo "[$$STAMP] Backup complete";
  #     done'
  #   volumes:
  #     - actual_data:/data:ro
  #     - actual_backups:/backups

volumes:
  actual_data:
  actual_backups:
  actual_postgres_data:

networks:
  actual:
    driver: bridge
actual-budgetfinancebudgeting

Headscale + UI

Self-hosted Tailscale control plane. Run your own coordination server with DERP relay config, ACL policies, and a web management UI.

headscale.yml
services:
  headscale:
    image: headscale/headscale:latest
    container_name: headscale
    restart: unless-stopped
    command: serve
    environment:
      - TZ=America/Denver
    healthcheck:
      test: ["CMD", "headscale", "health"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - ./headscale/config:/etc/headscale
      - headscale_data:/var/lib/headscale
      # Mount your ACL policy file:
      # ./headscale/config/acl.hujson defines who can
      # reach what. See headscale docs for hujson format.
    ports:
      - "8080:8080"
      - "9090:9090"
    networks:
      - headscale

  headscale-ui:
    image: ghcr.io/gurucomputing/headscale-ui:latest
    container_name: headscale-ui
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:80"]
      interval: 30s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 128M
          cpus: "0.1"
    ports:
      - "8443:80"
    networks:
      - headscale

# headscale config.yaml key settings:
#   server_url: https://hs.example.com:8080
#   listen_addr: 0.0.0.0:8080
#   metrics_listen_addr: 0.0.0.0:9090
#   private_key_path: /var/lib/headscale/private.key
#   noise:
#     private_key_path: /var/lib/headscale/noise_private.key
#   database:
#     type: sqlite3
#     sqlite.path: /var/lib/headscale/db.sqlite
#   derp:
#     server:
#       enabled: true
#       region_id: 999
#       stun_listen_addr: 0.0.0.0:3478
#     urls:
#       - https://controlplane.tailscale.com/derpmap/default
#   dns:
#     base_domain: tail.example.com
#     nameservers:
#       - 10.42.0.1

volumes:
  headscale_data:

networks:
  headscale:
    driver: bridge
headscaletailscalevpnnetworking

Ntfy Push Notifications

Self-hosted push notification server with auth, attachment storage, email fallback, and UnifiedPush support for mobile apps.

ntfy.yml
services:
  ntfy:
    image: binwiederhier/ntfy:latest
    container_name: ntfy
    restart: unless-stopped
    command: serve
    environment:
      - TZ=America/Denver
      - NTFY_BASE_URL=https://ntfy.example.com
      - NTFY_CACHE_FILE=/var/lib/ntfy/cache.db
      - NTFY_AUTH_FILE=/var/lib/ntfy/auth.db
      - NTFY_AUTH_DEFAULT_ACCESS=deny-all
      - NTFY_BEHIND_PROXY=true
      - NTFY_ATTACHMENT_CACHE_DIR=/var/lib/ntfy/attachments
      - NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT=1G
      - NTFY_ATTACHMENT_FILE_SIZE_LIMIT=15M
      # SMTP for email notification fallback
      - NTFY_SMTP_SENDER_ADDR=10.42.0.5:587
      - [email protected]
      - NTFY_SMTP_SENDER_PASS=smtp_password_here
      - [email protected]
      # Upstream server for UnifiedPush relay
      - NTFY_UPSTREAM_BASE_URL=https://ntfy.sh
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:80/v1/health"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - ntfy_cache:/var/lib/ntfy
    ports:
      - "2586:80"
    networks:
      - ntfy

# Usage examples:
#   Publish:  curl -d "Backup done" https://ntfy.example.com/backups
#   Subscribe: curl -s https://ntfy.example.com/backups/json
#
#   Create user:
#     docker exec ntfy ntfy user add --role=admin admin
#
#   Grant topic access:
#     docker exec ntfy ntfy access admin 'alerts' rw
#     docker exec ntfy ntfy access everyone 'announcements' ro
#
#   Use with Uptime Kuma:
#     Notification type: ntfy
#     Server URL: http://10.42.0.15:2586
#     Topic: alerts

volumes:
  ntfy_cache:

networks:
  ntfy:
    driver: bridge
ntfynotificationspushutilities

Caddy Reverse Proxy

Automatic HTTPS reverse proxy with Caddyfile config, Cloudflare DNS challenge, and zero-config certificate management.

caddy.yml
services:
  caddy:
    image: caddy:2-alpine
    container_name: caddy
    restart: unless-stopped
    environment:
      - CLOUDFLARE_API_TOKEN=your_cf_api_token_here
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:2019/config/"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.5"
    ports:
      - "80:80"
      - "443:443"
      - "443:443/udp"
    volumes:
      - ./Caddyfile:/etc/caddy/Caddyfile:ro
      - caddy_data:/data
      - caddy_config:/config
    networks:
      - caddy-public

# If you need the Cloudflare DNS plugin for wildcard certs,
# build a custom image:
#
# FROM caddy:2-builder AS builder
# RUN xcaddy build --with github.com/caddy-dns/cloudflare
# FROM caddy:2-alpine
# COPY --from=builder /usr/bin/caddy /usr/bin/caddy
#
# Then reference your local build:
#   image: caddy-cf:latest
#   build: ./caddy-build

# --- Caddyfile example (mount at ./Caddyfile) ---
#
# {
#     email [email protected]
#     admin off
# }
#
# grafana.example.com {
#     reverse_proxy 10.42.0.20:3000
# }
#
# gitea.example.com {
#     reverse_proxy 10.42.0.25:3000
# }
#
# nextcloud.example.com {
#     reverse_proxy 10.42.0.30:8081
#     header Strict-Transport-Security "max-age=31536000;"
# }
#
# *.example.com {
#     tls {
#         dns cloudflare {env.CLOUDFLARE_API_TOKEN}
#     }
#     respond "Not configured" 404
# }

volumes:
  caddy_data:
  caddy_config:

networks:
  caddy-public:
    driver: bridge
caddyreverse-proxysslnetworking

Stirling-PDF

All-in-one PDF toolkit with merge, split, convert, OCR, compress, and watermark. Runs entirely local with no phone-home.

stirling-pdf.yml
services:
  stirling-pdf:
    image: frooodle/s-pdf:latest
    container_name: stirling-pdf
    restart: unless-stopped
    environment:
      - DOCKER_ENABLE_SECURITY=false
      - SECURITY_ENABLE_LOGIN=false
      - SYSTEM_DEFAULTLOCALE=en_US
      - UI_APP_NAME=PDF Tools
      - UI_HOME_DESCRIPTION=Self-hosted PDF toolkit
      - UI_APP_NAVBAR_NAME=PDF Tools
      - SYSTEM_DISABLE_EXTERNAL_SCRIPTS=true
      - METRICS_ENABLED=false
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8080/api/v1/info/status"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "1.0"
    volumes:
      - stirling_data:/usr/share/tessdata
      - stirling_config:/configs
      - ./stirling/customFiles:/customFiles
    ports:
      - "8084:8080"
    networks:
      - stirling

  # Calibre provides additional OCR and ebook conversion
  # capabilities when linked to Stirling-PDF
  calibre:
    image: lscr.io/linuxserver/calibre:latest
    container_name: stirling-calibre
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    networks:
      - stirling

volumes:
  stirling_data:
  stirling_config:

networks:
  stirling:
    driver: bridge
stirling-pdfpdfdocumentsutilities

Mealie Recipe Manager

Recipe manager and meal planner with PostgreSQL backend, SMTP invites, OIDC login via Authelia, and ingredient parsing.

mealie.yml
services:
  mealie:
    image: ghcr.io/mealie-recipes/mealie:latest
    container_name: mealie
    restart: unless-stopped
    environment:
      - PUID=1000
      - PGID=1000
      - TZ=America/Denver
      - BASE_URL=https://meals.example.com
      - ALLOW_SIGNUP=false
      # PostgreSQL backend (not SQLite)
      - DB_ENGINE=postgres
      - POSTGRES_USER=mealie
      - POSTGRES_PASSWORD=mealie_db_password
      - POSTGRES_SERVER=mealie-db
      - POSTGRES_PORT=5432
      - POSTGRES_DB=mealie
      # SMTP for invitations and password reset
      - SMTP_HOST=10.42.0.5
      - SMTP_PORT=587
      - SMTP_AUTH_STRATEGY=TLS
      - SMTP_FROM_NAME=Mealie
      - [email protected]
      - [email protected]
      - SMTP_PASSWORD=smtp_password_here
      # OIDC login via Authelia (or any OIDC provider)
      - OIDC_AUTH_ENABLED=true
      - OIDC_SIGNUP_ENABLED=false
      - OIDC_CONFIGURATION_URL=https://auth.example.com/.well-known/openid-configuration
      - OIDC_CLIENT_ID=mealie
      - OIDC_AUTO_REDIRECT=false
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9000/api/app/about"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 20s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    ports:
      - "9925:9000"
    volumes:
      - mealie_data:/app/data
    networks:
      - mealie
    depends_on:
      mealie-db:
        condition: service_healthy

  mealie-db:
    image: postgres:16-alpine
    container_name: mealie-db
    restart: unless-stopped
    environment:
      - POSTGRES_USER=mealie
      - POSTGRES_PASSWORD=mealie_db_password
      - POSTGRES_DB=mealie
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U mealie"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.25"
    volumes:
      - mealie_postgres_data:/var/lib/postgresql/data
    networks:
      - mealie

volumes:
  mealie_data:
  mealie_postgres_data:

networks:
  mealie:
    driver: bridge
mealierecipesmeal-planningutilities

Forgejo + Woodpecker CI

Gitea community fork with SSH passthrough and a full CI/CD pipeline via Woodpecker server and agent, connected through OAuth.

forgejo-ci.yml
services:
  forgejo:
    image: codeberg.org/forgejo/forgejo:latest
    container_name: forgejo
    restart: unless-stopped
    environment:
      - USER_UID=1000
      - USER_GID=1000
      - FORGEJO__database__DB_TYPE=postgres
      - FORGEJO__database__HOST=forgejo-db:5432
      - FORGEJO__database__NAME=forgejo
      - FORGEJO__database__USER=forgejo
      - FORGEJO__database__PASSWD=forgejo_db_password
      - FORGEJO__server__SSH_DOMAIN=git.example.com
      - FORGEJO__server__SSH_PORT=2222
      - FORGEJO__server__ROOT_URL=https://git.example.com
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - forgejo_data:/data
      - /etc/timezone:/etc/timezone:ro
      - /etc/localtime:/etc/localtime:ro
    ports:
      - "3000:3000"
      - "2222:22"
    networks:
      - forgejo
    depends_on:
      forgejo-db:
        condition: service_healthy

  forgejo-db:
    image: postgres:16-alpine
    container_name: forgejo-db
    restart: unless-stopped
    environment:
      - POSTGRES_USER=forgejo
      - POSTGRES_PASSWORD=forgejo_db_password
      - POSTGRES_DB=forgejo
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U forgejo"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - forgejo_postgres_data:/var/lib/postgresql/data
    networks:
      - forgejo

  woodpecker-server:
    image: woodpeckerci/woodpecker-server:latest
    container_name: woodpecker-server
    restart: unless-stopped
    environment:
      - WOODPECKER_OPEN=false
      - WOODPECKER_HOST=https://ci.example.com
      - WOODPECKER_FORGEJO=true
      - WOODPECKER_FORGEJO_URL=http://forgejo:3000
      - WOODPECKER_FORGEJO_CLIENT=your-oauth2-client-id
      - WOODPECKER_FORGEJO_SECRET=your-oauth2-client-secret
      - WOODPECKER_AGENT_SECRET=shared-agent-secret-change-me
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:8000/healthz"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 15s
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - woodpecker_data:/var/lib/woodpecker
    ports:
      - "8000:8000"
    networks:
      - forgejo
    depends_on:
      forgejo:
        condition: service_healthy

  woodpecker-agent:
    image: woodpeckerci/woodpecker-agent:latest
    container_name: woodpecker-agent
    restart: unless-stopped
    environment:
      - WOODPECKER_SERVER=woodpecker-server:9000
      - WOODPECKER_AGENT_SECRET=shared-agent-secret-change-me
      - WOODPECKER_MAX_WORKFLOWS=2
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "1.0"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - forgejo
    depends_on:
      woodpecker-server:
        condition: service_healthy

# OAuth2 setup in Forgejo:
#   1. Login as admin > Site Administration > Applications
#   2. Create OAuth2 app:
#      Name: Woodpecker CI
#      Redirect URI: https://ci.example.com/authorize
#   3. Copy Client ID and Secret into the env vars above

volumes:
  forgejo_data:
  forgejo_postgres_data:
  woodpecker_data:

networks:
  forgejo:
    driver: bridge
forgejogitwoodpeckerci-cdutilities

CrowdSec + Traefik Bouncer

Modern intrusion prevention replacing Fail2ban. CrowdSec detects attacks, the Traefik bouncer blocks them, and crowd intelligence shares threat data across the network.

crowdsec.yml
services:
  crowdsec:
    image: crowdsecurity/crowdsec:latest
    container_name: crowdsec
    restart: unless-stopped
    environment:
      - TZ=America/Denver
      # Install the Traefik log parser and HTTP scenarios
      - COLLECTIONS=crowdsecurity/traefik crowdsecurity/http-cve
      # Enroll in the CrowdSec console for threat intelligence
      - ENROLL_KEY=your-enrollment-key-from-app.crowdsec.net
      - ENROLL_INSTANCE_NAME=homelab
    healthcheck:
      test: ["CMD", "cscli", "version"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: "0.5"
    volumes:
      - crowdsec_config:/etc/crowdsec
      - crowdsec_data:/var/lib/crowdsec/data
      # Mount Traefik access logs for parsing
      - ./traefik/logs:/var/log/traefik:ro
    ports:
      - "8085:8080"
    networks:
      - crowdsec

  crowdsec-traefik-bouncer:
    image: fbonalair/traefik-crowdsec-bouncer:latest
    container_name: crowdsec-bouncer
    restart: unless-stopped
    environment:
      # Generate with: docker exec crowdsec cscli bouncers add traefik-bouncer
      - CROWDSEC_BOUNCER_API_KEY=your-bouncer-api-key
      - CROWDSEC_AGENT_HOST=crowdsec:8080
      - GIN_MODE=release
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:8080/api/v1/ping"]
      interval: 30s
      timeout: 5s
      retries: 3
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 128M
          cpus: "0.1"
    networks:
      - crowdsec
      - traefik-public
    depends_on:
      crowdsec:
        condition: service_healthy

# Traefik integration — add these labels to your Traefik container:
#
#   labels:
#     - "traefik.http.middlewares.crowdsec.forwardauth.address=http://crowdsec-bouncer:8080/api/v1/forwardAuth"
#     - "traefik.http.middlewares.crowdsec.forwardauth.trustForwardHeader=true"
#
# Then apply the middleware to any router:
#     - "traefik.http.routers.myapp.middlewares=crowdsec@docker"
#
# Enable Traefik access logging (required for CrowdSec parsing):
#   command:
#     - "--accesslog=true"
#     - "--accesslog.filepath=/var/log/traefik/access.log"
#
# Useful commands:
#   docker exec crowdsec cscli decisions list     # View active bans
#   docker exec crowdsec cscli alerts list        # View alerts
#   docker exec crowdsec cscli hub update         # Update collections
#   docker exec crowdsec cscli metrics             # View parsed logs

volumes:
  crowdsec_config:
  crowdsec_data:

networks:
  crowdsec:
    driver: bridge
  traefik-public:
    external: true
crowdsecintrusion-detectiontraefiksecurity

Database Backup Sidecar

Reusable backup pattern for PostgreSQL and MariaDB. Runs pg_dump or mariadb-dump on a cron, compresses with zstd, rotates old backups, and optionally pushes to S3.

db-backup-sidecar.yml
services:
  # ─── PostgreSQL + Backup Sidecar ─────────────────
  postgres:
    image: postgres:16-alpine
    container_name: postgres
    restart: unless-stopped
    environment:
      - POSTGRES_USER=appuser
      - POSTGRES_PASSWORD=app_db_password
      - POSTGRES_DB=appdb
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U appuser"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 10s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - postgres_data:/var/lib/postgresql/data
    networks:
      - database

  postgres-backup:
    image: prodrigestivill/postgres-backup-local:16-alpine
    container_name: postgres-backup
    restart: unless-stopped
    environment:
      - POSTGRES_HOST=postgres
      - POSTGRES_DB=appdb
      - POSTGRES_USER=appuser
      - POSTGRES_PASSWORD=app_db_password
      - SCHEDULE=@daily
      - BACKUP_NUM_KEEP=7
      - BACKUP_DIR=/backups
      - HEALTHCHECK_PORT=8080
      - POSTGRES_EXTRA_OPTS=--clean --if-exists
    healthcheck:
      test: ["CMD", "wget", "--spider", "--quiet", "http://localhost:8080/"]
      interval: 60s
      timeout: 5s
      retries: 3
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - postgres_backups:/backups
    networks:
      - database
    depends_on:
      postgres:
        condition: service_healthy

  # ─── MariaDB + Backup Sidecar ────────────────────
  mariadb:
    image: mariadb:11
    container_name: mariadb
    restart: unless-stopped
    environment:
      - MYSQL_ROOT_PASSWORD=root_password
      - MYSQL_DATABASE=appdb
      - MYSQL_USER=appuser
      - MYSQL_PASSWORD=app_db_password
    healthcheck:
      test: ["CMD-SHELL", "healthcheck.sh --connect --innodb_initialized"]
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 30s
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: "0.5"
    volumes:
      - mariadb_data:/var/lib/mysql
    networks:
      - database

  mariadb-backup:
    image: alpine:latest
    container_name: mariadb-backup
    restart: unless-stopped
    entrypoint: /bin/sh
    command: >
      -c 'apk add --no-cache mariadb-client zstd && while true; do
        STAMP=$$(date +%Y%m%d-%H%M%S);
        echo "[$$STAMP] Starting MariaDB backup...";
        mariadb-dump -h mariadb -u appuser -papp_db_password
        --single-transaction --routines --triggers appdb
        | zstd -T0 -9 > /backups/appdb-$$STAMP.sql.zst;
        echo "[$$STAMP] Backup complete: appdb-$$STAMP.sql.zst";
        find /backups -name "*.sql.zst" -mtime +7 -delete;
        echo "[$$STAMP] Old backups pruned. Sleeping until tomorrow.";
        sleep 86400;
      done'
    deploy:
      resources:
        limits:
          memory: 256M
          cpus: "0.25"
    volumes:
      - mariadb_backups:/backups
    networks:
      - database
    depends_on:
      mariadb:
        condition: service_healthy

  # ─── Optional: Push backups to S3 ────────────────
  # s3-sync:
  #   image: amazon/aws-cli:latest
  #   container_name: backup-s3-sync
  #   restart: unless-stopped
  #   environment:
  #     - AWS_ACCESS_KEY_ID=your-access-key
  #     - AWS_SECRET_ACCESS_KEY=your-secret-key
  #     - AWS_DEFAULT_REGION=us-east-1
  #   entrypoint: /bin/sh
  #   command: >
  #     -c 'while true; do
  #       aws s3 sync /backups/postgres s3://my-bucket/postgres/ --delete;
  #       aws s3 sync /backups/mariadb s3://my-bucket/mariadb/ --delete;
  #       sleep 86400;
  #     done'
  #   volumes:
  #     - postgres_backups:/backups/postgres:ro
  #     - mariadb_backups:/backups/mariadb:ro

volumes:
  postgres_data:
  postgres_backups:
  mariadb_data:
  mariadb_backups:

networks:
  database:
    driver: bridge
postgresmariadbbackupdatabaseutilities