- x-shared-env: &shared-api-worker-env
- services:
- # API service
- api:
- image: langgenius/dify-api:1.9.0
- restart: always
- environment:
- # Use the shared environment variables.
- <<: *shared-api-worker-env
- # Startup mode, 'api' starts the API server.
- MODE: api
- SENTRY_DSN: ${API_SENTRY_DSN:-}
- SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
- SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
- PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
- PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
- PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
- INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
- depends_on:
- db:
- condition: service_healthy
- redis:
- condition: service_started
- volumes:
- # Mount the storage directory to the container, for storing user files.
- - ./volumes/app/storage:/app/api/storage
- networks:
- - ssrf_proxy_network
- - default
-
- # worker service
- # The Celery worker for processing the queue.
- worker:
- image: langgenius/dify-api:1.9.0
- restart: always
- environment:
- # Use the shared environment variables.
- <<: *shared-api-worker-env
- # Startup mode, 'worker' starts the Celery worker for processing the queue.
- MODE: worker
- SENTRY_DSN: ${API_SENTRY_DSN:-}
- SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
- SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
- PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
- INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
- depends_on:
- db:
- condition: service_healthy
- redis:
- condition: service_started
- volumes:
- # Mount the storage directory to the container, for storing user files.
- - ./volumes/app/storage:/app/api/storage
- networks:
- - ssrf_proxy_network
- - default
-
- # worker_beat service
- # Celery beat for scheduling periodic tasks.
- worker_beat:
- image: langgenius/dify-api:1.9.0
- restart: always
- environment:
- # Use the shared environment variables.
- <<: *shared-api-worker-env
- # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
- MODE: beat
- depends_on:
- db:
- condition: service_healthy
- redis:
- condition: service_started
- networks:
- - ssrf_proxy_network
- - default
-
- # Frontend web application.
- web:
- image: langgenius/dify-web:1.9.0
- restart: always
- environment:
- CONSOLE_API_URL: ${CONSOLE_API_URL:-}
- APP_API_URL: ${APP_API_URL:-}
- SENTRY_DSN: ${WEB_SENTRY_DSN:-}
- NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
- TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
- CSP_WHITELIST: ${CSP_WHITELIST:-}
- ALLOW_EMBED: ${ALLOW_EMBED:-false}
- ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
- MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
- MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
- TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
- INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
- PM2_INSTANCES: ${PM2_INSTANCES:-2}
- LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
- MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
- MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
- MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
- MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
- ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
- ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
- ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
- # The postgres database.
- db:
- image: postgres:15-alpine
- restart: always
- environment:
- POSTGRES_USER: ${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
- POSTGRES_DB: ${POSTGRES_DB:-dify}
- PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
- command: >
- postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
- -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
- -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
- -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
- -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
- volumes:
- - ./volumes/db/data:/var/lib/postgresql/data
- healthcheck:
- test:
- [
- "CMD",
- "pg_isready",
- "-h",
- "db",
- "-U",
- "${PGUSER:-postgres}",
- "-d",
- "${POSTGRES_DB:-dify}",
- ]
- interval: 1s
- timeout: 3s
- retries: 60
-
- # The redis cache.
- redis:
- image: redis:6-alpine
- restart: always
- environment:
- REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
- volumes:
- # Mount the redis data directory to the container.
- - ./volumes/redis/data:/data
- # Set the redis password when startup redis server.
- command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
- healthcheck:
- test:
- [
- "CMD-SHELL",
- "redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
- ]
-
- # The DifySandbox
- sandbox:
- image: langgenius/dify-sandbox:0.2.12
- restart: always
- environment:
- # The DifySandbox configurations
- # Make sure you are changing this key for your deployment with a strong key.
- # You can generate a strong key using `openssl rand -base64 42`.
- API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
- GIN_MODE: ${SANDBOX_GIN_MODE:-release}
- WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
- ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
- HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
- HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
- SANDBOX_PORT: ${SANDBOX_PORT:-8194}
- PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
- volumes:
- - ./volumes/sandbox/dependencies:/dependencies
- - ./volumes/sandbox/conf:/conf
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
- networks:
- - ssrf_proxy_network
-
- # plugin daemon
- plugin_daemon:
- image: langgenius/dify-plugin-daemon:0.3.0-local
- restart: always
- environment:
- # Use the shared environment variables.
- <<: *shared-api-worker-env
- DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
- SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
- SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
- MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
- PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
- DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
- DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
- PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
- PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
- PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
- FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
- PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
- PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
- PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
- PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
- PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
- PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
- PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
- PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
- PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
- PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
- PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
- S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
- S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
- S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
- S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
- AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
- AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
- AWS_REGION: ${PLUGIN_AWS_REGION:-}
- AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
- AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
- TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
- TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
- TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
- ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
- ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
- ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
- ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
- ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
- ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
- VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
- VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
- VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
- VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
- SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
- SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
- ports:
- - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
- volumes:
- - ./volumes/plugin_daemon:/app/storage
- depends_on:
- db:
- condition: service_healthy
-
- # ssrf_proxy server
- # for more information, please refer to
- # https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
- ssrf_proxy:
- image: ubuntu/squid:latest
- restart: always
- volumes:
- - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- entrypoint:
- [
- "sh",
- "-c",
- "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
- ]
- environment:
- # pls clearly modify the squid env vars to fit your network environment.
- HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
- COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
- REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
- SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
- SANDBOX_PORT: ${SANDBOX_PORT:-8194}
- networks:
- - ssrf_proxy_network
- - default
-
- # Certbot service
- # use `docker-compose --profile certbot up` to start the certbot service.
- certbot:
- image: certbot/certbot
- profiles:
- - certbot
- volumes:
- - ./volumes/certbot/conf:/etc/letsencrypt
- - ./volumes/certbot/www:/var/www/html
- - ./volumes/certbot/logs:/var/log/letsencrypt
- - ./volumes/certbot/conf/live:/etc/letsencrypt/live
- - ./certbot/update-cert.template.txt:/update-cert.template.txt
- - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
- environment:
- - CERTBOT_EMAIL=${CERTBOT_EMAIL}
- - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
- entrypoint: ["/docker-entrypoint.sh"]
- command: ["tail", "-f", "/dev/null"]
-
- # The nginx reverse proxy.
- # used for reverse proxying the API service and Web service.
- nginx:
- image: nginx:latest
- restart: always
- volumes:
- - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
- - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
- - ./nginx/https.conf.template:/etc/nginx/https.conf.template
- - ./nginx/conf.d:/etc/nginx/conf.d
- - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- - ./nginx/ssl:/etc/ssl # cert dir (legacy)
- - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- - ./volumes/certbot/conf:/etc/letsencrypt
- - ./volumes/certbot/www:/var/www/html
- entrypoint:
- [
- "sh",
- "-c",
- "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
- ]
- environment:
- NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
- NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
- NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
- NGINX_PORT: ${NGINX_PORT:-80}
- # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
- # and modify the env vars below in .env if HTTPS_ENABLED is true.
- NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
- NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
- NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
- NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
- NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
- NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
- NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
- NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
- NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
- CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
- depends_on:
- - api
- - web
- ports:
- - "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
- - "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
-
- # The Weaviate vector store.
- weaviate:
- image: semitechnologies/weaviate:1.19.0
- profiles:
- - ""
- - weaviate
- restart: always
- volumes:
- # Mount the Weaviate data directory to the con tainer.
- - ./volumes/weaviate:/var/lib/weaviate
- environment:
- # The Weaviate configurations
- # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
- PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
- QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
- AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
- DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
- CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
- AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
- AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
- AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
- AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
- AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
-
- # Qdrant vector store.
- # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
- qdrant:
- image: langgenius/qdrant:v1.7.3
- profiles:
- - qdrant
- restart: always
- volumes:
- - ./volumes/qdrant:/qdrant/storage
- environment:
- QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
-
- # The Couchbase vector store.
- couchbase-server:
- build: ./couchbase-server
- profiles:
- - couchbase
- restart: always
- environment:
- - CLUSTER_NAME=dify_search
- - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
- - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
- - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
- - COUCHBASE_BUCKET_RAMSIZE=512
- - COUCHBASE_RAM_SIZE=2048
- - COUCHBASE_EVENTING_RAM_SIZE=512
- - COUCHBASE_INDEX_RAM_SIZE=512
- - COUCHBASE_FTS_RAM_SIZE=1024
- hostname: couchbase-server
- container_name: couchbase-server
- working_dir: /opt/couchbase
- stdin_open: true
- tty: true
- entrypoint: [""]
- command: sh -c "/opt/couchbase/init/init-cbserver.sh"
- volumes:
- - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
- healthcheck:
- # ensure bucket was created before proceeding
- test:
- [
- "CMD-SHELL",
- "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
- ]
- interval: 10s
- retries: 10
- start_period: 30s
- timeout: 10s
-
- # The pgvector vector database.
- pgvector:
- image: pgvector/pgvector:pg16
- profiles:
- - pgvector
- restart: always
- environment:
- PGUSER: ${PGVECTOR_PGUSER:-postgres}
- # The password for the default postgres user.
- POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
- # The name of the default postgres database.
- POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
- # postgres data directory
- PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
- # pg_bigm module for full text search
- PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
- PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
- volumes:
- - ./volumes/pgvector/data:/var/lib/postgresql/data
- - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
- entrypoint: ["/docker-entrypoint.sh"]
- healthcheck:
- test: ["CMD", "pg_isready"]
- interval: 1s
- timeout: 3s
- retries: 30
-
- # get image from https://www.vastdata.com.cn/
- vastbase:
- image: vastdata/vastbase-vector
- profiles:
- - vastbase
- restart: always
- environment:
- - VB_DBCOMPATIBILITY=PG
- - VB_DB=dify
- - VB_USERNAME=dify
- - VB_PASSWORD=Difyai123456
- ports:
- - "5434:5432"
- volumes:
- - ./vastbase/lic:/home/vastbase/vastbase/lic
- - ./vastbase/data:/home/vastbase/data
- - ./vastbase/backup:/home/vastbase/backup
- - ./vastbase/backup_log:/home/vastbase/backup_log
- healthcheck:
- test: ["CMD", "pg_isready"]
- interval: 1s
- timeout: 3s
- retries: 30
-
- # pgvecto-rs vector store
- pgvecto-rs:
- image: tensorchord/pgvecto-rs:pg16-v0.3.0
- profiles:
- - pgvecto-rs
- restart: always
- environment:
- PGUSER: ${PGVECTOR_PGUSER:-postgres}
- # The password for the default postgres user.
- POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
- # The name of the default postgres database.
- POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
- # postgres data directory
- PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
- volumes:
- - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
- healthcheck:
- test: ["CMD", "pg_isready"]
- interval: 1s
- timeout: 3s
- retries: 30
-
- # Chroma vector database
- chroma:
- image: ghcr.io/chroma-core/chroma:0.5.20
- profiles:
- - chroma
- restart: always
- volumes:
- - ./volumes/chroma:/chroma/chroma
- environment:
- CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
- CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
- IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
-
- # OceanBase vector database
- oceanbase:
- image: oceanbase/oceanbase-ce:4.3.5-lts
- container_name: oceanbase
- profiles:
- - oceanbase
- restart: always
- volumes:
- - ./volumes/oceanbase/data:/root/ob
- - ./volumes/oceanbase/conf:/root/.obd/cluster
- - ./volumes/oceanbase/init.d:/root/boot/init.d
- environment:
- OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
- OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
- OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
- OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
- OB_SERVER_IP: 127.0.0.1
- MODE: mini
- LANG: en_US.UTF-8
- ports:
- - "${OCEANBASE_VECTOR_PORT:-2881}:2881"
- healthcheck:
- test:
- [
- "CMD-SHELL",
- 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"',
- ]
- interval: 10s
- retries: 30
- start_period: 30s
- timeout: 10s
-
- # Oracle vector database
- oracle:
- image: container-registry.oracle.com/database/free:latest
- profiles:
- - oracle
- restart: always
- volumes:
- - source: oradata
- type: volume
- target: /opt/oracle/oradata
- - ./startupscripts:/opt/oracle/scripts/startup
- environment:
- ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
- ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
-
- # Milvus vector database services
- etcd:
- container_name: milvus-etcd
- image: quay.io/coreos/etcd:v3.5.5
- profiles:
- - milvus
- environment:
- ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
- ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
- ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
- ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
- volumes:
- - ./volumes/milvus/etcd:/etcd
- command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
- healthcheck:
- test: ["CMD", "etcdctl", "endpoint", "health"]
- interval: 30s
- timeout: 20s
- retries: 3
- networks:
- - milvus
-
- minio:
- container_name: milvus-minio
- image: minio/minio:RELEASE.2023-03-20T20-16-18Z
- profiles:
- - milvus
- environment:
- MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
- volumes:
- - ./volumes/milvus/minio:/minio_data
- command: minio server /minio_data --console-address ":9001"
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
- interval: 30s
- timeout: 20s
- retries: 3
- networks:
- - milvus
-
- milvus-standalone:
- container_name: milvus-standalone
- image: milvusdb/milvus:v2.5.15
- profiles:
- - milvus
- command: ["milvus", "run", "standalone"]
- environment:
- ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
- MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
- common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
- volumes:
- - ./volumes/milvus/milvus:/var/lib/milvus
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
- interval: 30s
- start_period: 90s
- timeout: 20s
- retries: 3
- depends_on:
- - etcd
- - minio
- ports:
- - 19530:19530
- - 9091:9091
- networks:
- - milvus
-
- # Opensearch vector database
- opensearch:
- container_name: opensearch
- image: opensearchproject/opensearch:latest
- profiles:
- - opensearch
- environment:
- discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
- bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
- OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
- OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
- ulimits:
- memlock:
- soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
- hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
- nofile:
- soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
- hard: ${OPENSEARCH_NOFILE_HARD:-65536}
- volumes:
- - ./volumes/opensearch/data:/usr/share/opensearch/data
- networks:
- - opensearch-net
-
- opensearch-dashboards:
- container_name: opensearch-dashboards
- image: opensearchproject/opensearch-dashboards:latest
- profiles:
- - opensearch
- environment:
- OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
- volumes:
- - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
- networks:
- - opensearch-net
- depends_on:
- - opensearch
-
- # opengauss vector database.
- opengauss:
- image: opengauss/opengauss:7.0.0-RC1
- profiles:
- - opengauss
- privileged: true
- restart: always
- environment:
- GS_USERNAME: ${OPENGAUSS_USER:-postgres}
- GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
- GS_PORT: ${OPENGAUSS_PORT:-6600}
- GS_DB: ${OPENGAUSS_DATABASE:-dify}
- volumes:
- - ./volumes/opengauss/data:/var/lib/opengauss/data
- healthcheck:
- test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
- interval: 10s
- timeout: 10s
- retries: 10
- ports:
- - ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
-
- # MyScale vector database
- myscale:
- container_name: myscale
- image: myscale/myscaledb:1.6.4
- profiles:
- - myscale
- restart: always
- tty: true
- volumes:
- - ./volumes/myscale/data:/var/lib/clickhouse
- - ./volumes/myscale/log:/var/log/clickhouse-server
- - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
- ports:
- - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
-
- # Matrixone vector store.
- matrixone:
- hostname: matrixone
- image: matrixorigin/matrixone:2.1.1
- profiles:
- - matrixone
- restart: always
- volumes:
- - ./volumes/matrixone/data:/mo-data
- ports:
- - ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001}
-
- # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
- # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
- elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
- container_name: elasticsearch
- profiles:
- - elasticsearch
- - elasticsearch-ja
- restart: always
- volumes:
- - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- - dify_es01_data:/usr/share/elasticsearch/data
- environment:
- ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
- VECTOR_STORE: ${VECTOR_STORE:-}
- cluster.name: dify-es-cluster
- node.name: dify-es0
- discovery.type: single-node
- xpack.license.self_generated.type: basic
- xpack.security.enabled: "true"
- xpack.security.enrollment.enabled: "false"
- xpack.security.http.ssl.enabled: "false"
- ports:
- - ${ELASTICSEARCH_PORT:-9200}:9200
- deploy:
- resources:
- limits:
- memory: 2g
- entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
- healthcheck:
- test:
- ["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
- interval: 30s
- timeout: 10s
- retries: 50
-
- # https://www.elastic.co/guide/en/kibana/current/docker.html
- # https://www.elastic.co/guide/en/kibana/current/settings.html
- kibana:
- image: docker.elastic.co/kibana/kibana:8.14.3
- container_name: kibana
- profiles:
- - elasticsearch
- depends_on:
- - elasticsearch
- restart: always
- environment:
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
- NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
- XPACK_SECURITY_ENABLED: "true"
- XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
- XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
- XPACK_FLEET_ISAIRGAPPED: "true"
- I18N_LOCALE: zh-CN
- SERVER_PORT: "5601"
- ELASTICSEARCH_HOSTS: http://elasticsearch:9200
- ports:
- - ${KIBANA_PORT:-5601}:5601
- healthcheck:
- test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
- interval: 30s
- timeout: 10s
- retries: 3
-
- # unstructured .
- # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
- unstructured:
- image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
- profiles:
- - unstructured
- restart: always
- volumes:
- - ./volumes/unstructured:/app/data
-
- networks:
- # create a network between sandbox, api and ssrf_proxy, and can not access outside.
- ssrf_proxy_network:
- driver: bridge
- internal: true
- milvus:
- driver: bridge
- opensearch-net:
- driver: bridge
- internal: true
-
- volumes:
- oradata:
- dify_es01_data:
|