You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

docker-compose-template.yaml 26KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. x-shared-env: &shared-api-worker-env
  2. services:
  3. # API service
  4. api:
  5. image: langgenius/dify-api:1.4.3
  6. restart: always
  7. environment:
  8. # Use the shared environment variables.
  9. <<: *shared-api-worker-env
  10. # Startup mode, 'api' starts the API server.
  11. MODE: api
  12. SENTRY_DSN: ${API_SENTRY_DSN:-}
  13. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  14. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  15. PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
  16. PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
  17. PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  18. INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  19. depends_on:
  20. db:
  21. condition: service_healthy
  22. redis:
  23. condition: service_started
  24. volumes:
  25. # Mount the storage directory to the container, for storing user files.
  26. - ./volumes/app/storage:/app/api/storage
  27. networks:
  28. - ssrf_proxy_network
  29. - default
  30. # worker service
  31. # The Celery worker for processing the queue.
  32. worker:
  33. image: langgenius/dify-api:1.4.3
  34. restart: always
  35. environment:
  36. # Use the shared environment variables.
  37. <<: *shared-api-worker-env
  38. # Startup mode, 'worker' starts the Celery worker for processing the queue.
  39. MODE: worker
  40. SENTRY_DSN: ${API_SENTRY_DSN:-}
  41. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  42. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  43. PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  44. INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  45. depends_on:
  46. db:
  47. condition: service_healthy
  48. redis:
  49. condition: service_started
  50. volumes:
  51. # Mount the storage directory to the container, for storing user files.
  52. - ./volumes/app/storage:/app/api/storage
  53. networks:
  54. - ssrf_proxy_network
  55. - default
  56. # Frontend web application.
  57. web:
  58. image: langgenius/dify-web:1.4.3
  59. restart: always
  60. environment:
  61. CONSOLE_API_URL: ${CONSOLE_API_URL:-}
  62. APP_API_URL: ${APP_API_URL:-}
  63. SENTRY_DSN: ${WEB_SENTRY_DSN:-}
  64. NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
  65. TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
  66. CSP_WHITELIST: ${CSP_WHITELIST:-}
  67. ALLOW_EMBED: ${ALLOW_EMBED:-false}
  68. MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
  69. MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
  70. TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
  71. INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
  72. PM2_INSTANCES: ${PM2_INSTANCES:-2}
  73. LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
  74. MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
  75. MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
  76. MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
  77. ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
  78. ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
  79. ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
  80. # The postgres database.
  81. db:
  82. image: postgres:15-alpine
  83. restart: always
  84. environment:
  85. POSTGRES_USER: ${POSTGRES_USER:-postgres}
  86. POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
  87. POSTGRES_DB: ${POSTGRES_DB:-dify}
  88. PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
  89. command: >
  90. postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
  91. -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
  92. -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
  93. -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
  94. -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
  95. volumes:
  96. - ./volumes/db/data:/var/lib/postgresql/data
  97. healthcheck:
  98. test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ]
  99. interval: 1s
  100. timeout: 3s
  101. retries: 60
  102. # The redis cache.
  103. redis:
  104. image: redis:6-alpine
  105. restart: always
  106. environment:
  107. REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
  108. volumes:
  109. # Mount the redis data directory to the container.
  110. - ./volumes/redis/data:/data
  111. # Set the redis password when startup redis server.
  112. command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
  113. healthcheck:
  114. test: [ 'CMD', 'redis-cli', 'ping' ]
  115. # The DifySandbox
  116. sandbox:
  117. image: langgenius/dify-sandbox:0.2.12
  118. restart: always
  119. environment:
  120. # The DifySandbox configurations
  121. # Make sure you are changing this key for your deployment with a strong key.
  122. # You can generate a strong key using `openssl rand -base64 42`.
  123. API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
  124. GIN_MODE: ${SANDBOX_GIN_MODE:-release}
  125. WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
  126. ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
  127. HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
  128. HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
  129. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  130. PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
  131. volumes:
  132. - ./volumes/sandbox/dependencies:/dependencies
  133. - ./volumes/sandbox/conf:/conf
  134. healthcheck:
  135. test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
  136. networks:
  137. - ssrf_proxy_network
  138. # plugin daemon
  139. plugin_daemon:
  140. image: langgenius/dify-plugin-daemon:0.1.2-local
  141. restart: always
  142. environment:
  143. # Use the shared environment variables.
  144. <<: *shared-api-worker-env
  145. DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
  146. SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
  147. SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
  148. MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  149. PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
  150. DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
  151. DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  152. PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
  153. PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
  154. PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
  155. FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
  156. PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
  157. PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
  158. PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
  159. PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
  160. PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
  161. PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
  162. PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
  163. PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
  164. PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
  165. S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
  166. S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-}
  167. S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
  168. S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
  169. AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
  170. AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
  171. AWS_REGION: ${PLUGIN_AWS_REGION:-}
  172. AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
  173. AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
  174. TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
  175. TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
  176. TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
  177. ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
  178. ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
  179. ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
  180. ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
  181. ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
  182. ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
  183. VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
  184. VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
  185. VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
  186. VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
  187. ports:
  188. - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
  189. volumes:
  190. - ./volumes/plugin_daemon:/app/storage
  191. depends_on:
  192. db:
  193. condition: service_healthy
  194. # ssrf_proxy server
  195. # for more information, please refer to
  196. # https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
  197. ssrf_proxy:
  198. image: ubuntu/squid:latest
  199. restart: always
  200. volumes:
  201. - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
  202. - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  203. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  204. environment:
  205. # pls clearly modify the squid env vars to fit your network environment.
  206. HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
  207. COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
  208. REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
  209. SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
  210. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  211. networks:
  212. - ssrf_proxy_network
  213. - default
  214. # Certbot service
  215. # use `docker-compose --profile certbot up` to start the certbot service.
  216. certbot:
  217. image: certbot/certbot
  218. profiles:
  219. - certbot
  220. volumes:
  221. - ./volumes/certbot/conf:/etc/letsencrypt
  222. - ./volumes/certbot/www:/var/www/html
  223. - ./volumes/certbot/logs:/var/log/letsencrypt
  224. - ./volumes/certbot/conf/live:/etc/letsencrypt/live
  225. - ./certbot/update-cert.template.txt:/update-cert.template.txt
  226. - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
  227. environment:
  228. - CERTBOT_EMAIL=${CERTBOT_EMAIL}
  229. - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
  230. - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
  231. entrypoint: [ '/docker-entrypoint.sh' ]
  232. command: [ 'tail', '-f', '/dev/null' ]
  233. # The nginx reverse proxy.
  234. # used for reverse proxying the API service and Web service.
  235. nginx:
  236. image: nginx:latest
  237. restart: always
  238. volumes:
  239. - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
  240. - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
  241. - ./nginx/https.conf.template:/etc/nginx/https.conf.template
  242. - ./nginx/conf.d:/etc/nginx/conf.d
  243. - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  244. - ./nginx/ssl:/etc/ssl # cert dir (legacy)
  245. - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
  246. - ./volumes/certbot/conf:/etc/letsencrypt
  247. - ./volumes/certbot/www:/var/www/html
  248. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  249. environment:
  250. NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
  251. NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
  252. NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
  253. NGINX_PORT: ${NGINX_PORT:-80}
  254. # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
  255. # and modify the env vars below in .env if HTTPS_ENABLED is true.
  256. NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
  257. NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
  258. NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
  259. NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
  260. NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
  261. NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
  262. NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
  263. NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
  264. NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
  265. CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
  266. depends_on:
  267. - api
  268. - web
  269. ports:
  270. - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
  271. - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
  272. # The Weaviate vector store.
  273. weaviate:
  274. image: semitechnologies/weaviate:1.19.0
  275. profiles:
  276. - ''
  277. - weaviate
  278. restart: always
  279. volumes:
  280. # Mount the Weaviate data directory to the con tainer.
  281. - ./volumes/weaviate:/var/lib/weaviate
  282. environment:
  283. # The Weaviate configurations
  284. # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
  285. PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
  286. QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
  287. AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
  288. DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
  289. CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
  290. AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
  291. AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
  292. AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
  293. AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
  294. AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
  295. # Qdrant vector store.
  296. # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
  297. qdrant:
  298. image: langgenius/qdrant:v1.7.3
  299. profiles:
  300. - qdrant
  301. restart: always
  302. volumes:
  303. - ./volumes/qdrant:/qdrant/storage
  304. environment:
  305. QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
  306. # The Couchbase vector store.
  307. couchbase-server:
  308. build: ./couchbase-server
  309. profiles:
  310. - couchbase
  311. restart: always
  312. environment:
  313. - CLUSTER_NAME=dify_search
  314. - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
  315. - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
  316. - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
  317. - COUCHBASE_BUCKET_RAMSIZE=512
  318. - COUCHBASE_RAM_SIZE=2048
  319. - COUCHBASE_EVENTING_RAM_SIZE=512
  320. - COUCHBASE_INDEX_RAM_SIZE=512
  321. - COUCHBASE_FTS_RAM_SIZE=1024
  322. hostname: couchbase-server
  323. container_name: couchbase-server
  324. working_dir: /opt/couchbase
  325. stdin_open: true
  326. tty: true
  327. entrypoint: [ "" ]
  328. command: sh -c "/opt/couchbase/init/init-cbserver.sh"
  329. volumes:
  330. - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
  331. healthcheck:
  332. # ensure bucket was created before proceeding
  333. test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
  334. interval: 10s
  335. retries: 10
  336. start_period: 30s
  337. timeout: 10s
  338. # The pgvector vector database.
  339. pgvector:
  340. image: pgvector/pgvector:pg16
  341. profiles:
  342. - pgvector
  343. restart: always
  344. environment:
  345. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  346. # The password for the default postgres user.
  347. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  348. # The name of the default postgres database.
  349. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  350. # postgres data directory
  351. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  352. # pg_bigm module for full text search
  353. PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
  354. PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
  355. volumes:
  356. - ./volumes/pgvector/data:/var/lib/postgresql/data
  357. - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
  358. entrypoint: [ '/docker-entrypoint.sh' ]
  359. healthcheck:
  360. test: [ 'CMD', 'pg_isready' ]
  361. interval: 1s
  362. timeout: 3s
  363. retries: 30
  364. # get image from https://www.vastdata.com.cn/
  365. vastbase:
  366. image: vastdata/vastbase-vector
  367. profiles:
  368. - vastbase
  369. restart: always
  370. environment:
  371. - VB_DBCOMPATIBILITY=PG
  372. - VB_DB=dify
  373. - VB_USERNAME=dify
  374. - VB_PASSWORD=Difyai123456
  375. ports:
  376. - '5434:5432'
  377. volumes:
  378. - ./vastbase/lic:/home/vastbase/vastbase/lic
  379. - ./vastbase/data:/home/vastbase/data
  380. - ./vastbase/backup:/home/vastbase/backup
  381. - ./vastbase/backup_log:/home/vastbase/backup_log
  382. healthcheck:
  383. test: [ 'CMD', 'pg_isready' ]
  384. interval: 1s
  385. timeout: 3s
  386. retries: 30
  387. # pgvecto-rs vector store
  388. pgvecto-rs:
  389. image: tensorchord/pgvecto-rs:pg16-v0.3.0
  390. profiles:
  391. - pgvecto-rs
  392. restart: always
  393. environment:
  394. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  395. # The password for the default postgres user.
  396. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  397. # The name of the default postgres database.
  398. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  399. # postgres data directory
  400. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  401. volumes:
  402. - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
  403. healthcheck:
  404. test: [ 'CMD', 'pg_isready' ]
  405. interval: 1s
  406. timeout: 3s
  407. retries: 30
  408. # Chroma vector database
  409. chroma:
  410. image: ghcr.io/chroma-core/chroma:0.5.20
  411. profiles:
  412. - chroma
  413. restart: always
  414. volumes:
  415. - ./volumes/chroma:/chroma/chroma
  416. environment:
  417. CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
  418. CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
  419. IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
  420. # OceanBase vector database
  421. oceanbase:
  422. image: oceanbase/oceanbase-ce:4.3.5-lts
  423. container_name: oceanbase
  424. profiles:
  425. - oceanbase
  426. restart: always
  427. volumes:
  428. - ./volumes/oceanbase/data:/root/ob
  429. - ./volumes/oceanbase/conf:/root/.obd/cluster
  430. - ./volumes/oceanbase/init.d:/root/boot/init.d
  431. environment:
  432. OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
  433. OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  434. OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  435. OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
  436. OB_SERVER_IP: 127.0.0.1
  437. MODE: mini
  438. ports:
  439. - "${OCEANBASE_VECTOR_PORT:-2881}:2881"
  440. healthcheck:
  441. test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ]
  442. interval: 10s
  443. retries: 30
  444. start_period: 30s
  445. timeout: 10s
  446. # Oracle vector database
  447. oracle:
  448. image: container-registry.oracle.com/database/free:latest
  449. profiles:
  450. - oracle
  451. restart: always
  452. volumes:
  453. - source: oradata
  454. type: volume
  455. target: /opt/oracle/oradata
  456. - ./startupscripts:/opt/oracle/scripts/startup
  457. environment:
  458. ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
  459. ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
  460. # Milvus vector database services
  461. etcd:
  462. container_name: milvus-etcd
  463. image: quay.io/coreos/etcd:v3.5.5
  464. profiles:
  465. - milvus
  466. environment:
  467. ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
  468. ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
  469. ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
  470. ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
  471. volumes:
  472. - ./volumes/milvus/etcd:/etcd
  473. command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
  474. healthcheck:
  475. test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
  476. interval: 30s
  477. timeout: 20s
  478. retries: 3
  479. networks:
  480. - milvus
  481. minio:
  482. container_name: milvus-minio
  483. image: minio/minio:RELEASE.2023-03-20T20-16-18Z
  484. profiles:
  485. - milvus
  486. environment:
  487. MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
  488. MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
  489. volumes:
  490. - ./volumes/milvus/minio:/minio_data
  491. command: minio server /minio_data --console-address ":9001"
  492. healthcheck:
  493. test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
  494. interval: 30s
  495. timeout: 20s
  496. retries: 3
  497. networks:
  498. - milvus
  499. milvus-standalone:
  500. container_name: milvus-standalone
  501. image: milvusdb/milvus:v2.5.0-beta
  502. profiles:
  503. - milvus
  504. command: [ 'milvus', 'run', 'standalone' ]
  505. environment:
  506. ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
  507. MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
  508. common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
  509. volumes:
  510. - ./volumes/milvus/milvus:/var/lib/milvus
  511. healthcheck:
  512. test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
  513. interval: 30s
  514. start_period: 90s
  515. timeout: 20s
  516. retries: 3
  517. depends_on:
  518. - etcd
  519. - minio
  520. ports:
  521. - 19530:19530
  522. - 9091:9091
  523. networks:
  524. - milvus
  525. # Opensearch vector database
  526. opensearch:
  527. container_name: opensearch
  528. image: opensearchproject/opensearch:latest
  529. profiles:
  530. - opensearch
  531. environment:
  532. discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
  533. bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
  534. OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
  535. OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
  536. ulimits:
  537. memlock:
  538. soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
  539. hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
  540. nofile:
  541. soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
  542. hard: ${OPENSEARCH_NOFILE_HARD:-65536}
  543. volumes:
  544. - ./volumes/opensearch/data:/usr/share/opensearch/data
  545. networks:
  546. - opensearch-net
  547. opensearch-dashboards:
  548. container_name: opensearch-dashboards
  549. image: opensearchproject/opensearch-dashboards:latest
  550. profiles:
  551. - opensearch
  552. environment:
  553. OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
  554. volumes:
  555. - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
  556. networks:
  557. - opensearch-net
  558. depends_on:
  559. - opensearch
  560. # opengauss vector database.
  561. opengauss:
  562. image: opengauss/opengauss:7.0.0-RC1
  563. profiles:
  564. - opengauss
  565. privileged: true
  566. restart: always
  567. environment:
  568. GS_USERNAME: ${OPENGAUSS_USER:-postgres}
  569. GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
  570. GS_PORT: ${OPENGAUSS_PORT:-6600}
  571. GS_DB: ${OPENGAUSS_DATABASE:-dify}
  572. volumes:
  573. - ./volumes/opengauss/data:/var/lib/opengauss/data
  574. healthcheck:
  575. test: [ "CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1" ]
  576. interval: 10s
  577. timeout: 10s
  578. retries: 10
  579. ports:
  580. - ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
  581. # MyScale vector database
  582. myscale:
  583. container_name: myscale
  584. image: myscale/myscaledb:1.6.4
  585. profiles:
  586. - myscale
  587. restart: always
  588. tty: true
  589. volumes:
  590. - ./volumes/myscale/data:/var/lib/clickhouse
  591. - ./volumes/myscale/log:/var/log/clickhouse-server
  592. - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
  593. ports:
  594. - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
  595. # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
  596. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
  597. elasticsearch:
  598. image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
  599. container_name: elasticsearch
  600. profiles:
  601. - elasticsearch
  602. - elasticsearch-ja
  603. restart: always
  604. volumes:
  605. - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  606. - dify_es01_data:/usr/share/elasticsearch/data
  607. environment:
  608. ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
  609. VECTOR_STORE: ${VECTOR_STORE:-}
  610. cluster.name: dify-es-cluster
  611. node.name: dify-es0
  612. discovery.type: single-node
  613. xpack.license.self_generated.type: basic
  614. xpack.security.enabled: 'true'
  615. xpack.security.enrollment.enabled: 'false'
  616. xpack.security.http.ssl.enabled: 'false'
  617. ports:
  618. - ${ELASTICSEARCH_PORT:-9200}:9200
  619. deploy:
  620. resources:
  621. limits:
  622. memory: 2g
  623. entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
  624. healthcheck:
  625. test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
  626. interval: 30s
  627. timeout: 10s
  628. retries: 50
  629. # https://www.elastic.co/guide/en/kibana/current/docker.html
  630. # https://www.elastic.co/guide/en/kibana/current/settings.html
  631. kibana:
  632. image: docker.elastic.co/kibana/kibana:8.14.3
  633. container_name: kibana
  634. profiles:
  635. - elasticsearch
  636. depends_on:
  637. - elasticsearch
  638. restart: always
  639. environment:
  640. XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
  641. NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
  642. XPACK_SECURITY_ENABLED: 'true'
  643. XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
  644. XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
  645. XPACK_FLEET_ISAIRGAPPED: 'true'
  646. I18N_LOCALE: zh-CN
  647. SERVER_PORT: '5601'
  648. ELASTICSEARCH_HOSTS: http://elasticsearch:9200
  649. ports:
  650. - ${KIBANA_PORT:-5601}:5601
  651. healthcheck:
  652. test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
  653. interval: 30s
  654. timeout: 10s
  655. retries: 3
  656. # unstructured .
  657. # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
  658. unstructured:
  659. image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
  660. profiles:
  661. - unstructured
  662. restart: always
  663. volumes:
  664. - ./volumes/unstructured:/app/data
  665. networks:
  666. # create a network between sandbox, api and ssrf_proxy, and can not access outside.
  667. ssrf_proxy_network:
  668. driver: bridge
  669. internal: true
  670. milvus:
  671. driver: bridge
  672. opensearch-net:
  673. driver: bridge
  674. internal: true
  675. volumes:
  676. oradata:
  677. dify_es01_data: