docker-stacks/immich/docker-compose.yml

108 lines
3.8 KiB
YAML
Raw Normal View History

2024-11-04 23:07:34 +00:00
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
services:
immich-server:
container_name: immich_server
image: 'ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}'
extends:
file: 'hwaccel.transcoding.yml'
service: '${HWACCEL_TRANSCODING:-cpu}' # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- '${UPLOAD_LOCATION:-./upload}:/usr/src/app/upload'
- '/etc/localtime:/etc/localtime:ro'
env_file:
- .env
ports:
- '${BIND_ADDRESS:-127.0.0.1}:${BIND_PORT:-2283}:2283'
2024-11-04 23:07:34 +00:00
depends_on:
- redis
- database
restart: unless-stopped
# healthcheck:
# disable: false
logging:
driver: 'json-file'
options:
max-size: '${LOG_MAX_SIZE:-5m}'
max-file: '${LOG_MAX_FILE:-5}'
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: 'ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}'
extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
file: 'hwaccel.ml.yml'
service: '${HWACCEL_ML:-cpu}' # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- '${MODEL_CACHE_LOCATION:-./model-cache}:/cache'
env_file:
- .env
restart: unless-stopped
# healthcheck:
# disable: false
logging:
driver: 'json-file'
options:
max-size: '${LOG_MAX_SIZE:-5m}'
max-file: '${LOG_MAX_FILE:-5}'
redis:
container_name: immich_redis
image: 'docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5'
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
logging:
driver: 'json-file'
options:
max-size: '${LOG_MAX_SIZE:-5m}'
max-file: '${LOG_MAX_FILE:-5}'
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: '${DB_PASSWORD}'
POSTGRES_USER: '${DB_USERNAME}'
POSTGRES_DB: '${DB_DATABASE_NAME}'
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- '${DB_DATA_LOCATION:-./db}:/var/lib/postgresql/data'
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
#start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: unless-stopped
logging:
driver: 'json-file'
options:
max-size: '${LOG_MAX_SIZE:-5m}'
max-file: '${LOG_MAX_FILE:-5}'