Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

docker-compose-template.yaml 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. x-shared-env: &shared-api-worker-env
  2. services:
  3. # API service
  4. api:
  5. image: langgenius/dify-api:0.15.7
  6. restart: always
  7. environment:
  8. # Use the shared environment variables.
  9. <<: *shared-api-worker-env
  10. # Startup mode, 'api' starts the API server.
  11. MODE: api
  12. SENTRY_DSN: ${API_SENTRY_DSN:-}
  13. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  14. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  15. depends_on:
  16. - db
  17. - redis
  18. volumes:
  19. # Mount the storage directory to the container, for storing user files.
  20. - ./volumes/app/storage:/app/api/storage
  21. networks:
  22. - ssrf_proxy_network
  23. - default
  24. # worker service
  25. # The Celery worker for processing the queue.
  26. worker:
  27. image: langgenius/dify-api:0.15.7
  28. restart: always
  29. environment:
  30. # Use the shared environment variables.
  31. <<: *shared-api-worker-env
  32. # Startup mode, 'worker' starts the Celery worker for processing the queue.
  33. MODE: worker
  34. SENTRY_DSN: ${API_SENTRY_DSN:-}
  35. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  36. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  37. depends_on:
  38. - db
  39. - redis
  40. volumes:
  41. # Mount the storage directory to the container, for storing user files.
  42. - ./volumes/app/storage:/app/api/storage
  43. networks:
  44. - ssrf_proxy_network
  45. - default
  46. # Frontend web application.
  47. web:
  48. image: langgenius/dify-web:0.15.7
  49. restart: always
  50. environment:
  51. CONSOLE_API_URL: ${CONSOLE_API_URL:-}
  52. APP_API_URL: ${APP_API_URL:-}
  53. SENTRY_DSN: ${WEB_SENTRY_DSN:-}
  54. NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
  55. TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
  56. CSP_WHITELIST: ${CSP_WHITELIST:-}
  57. ALLOW_EMBED: ${ALLOW_EMBED:-false}
  58. TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
  59. INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
  60. # The postgres database.
  61. db:
  62. image: postgres:15-alpine
  63. restart: always
  64. environment:
  65. PGUSER: ${PGUSER:-postgres}
  66. POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
  67. POSTGRES_DB: ${POSTGRES_DB:-dify}
  68. PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
  69. command: >
  70. postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
  71. -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
  72. -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
  73. -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
  74. -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
  75. volumes:
  76. - ./volumes/db/data:/var/lib/postgresql/data
  77. healthcheck:
  78. test: [ 'CMD', 'pg_isready' ]
  79. interval: 1s
  80. timeout: 3s
  81. retries: 30
  82. # The redis cache.
  83. redis:
  84. image: redis:6-alpine
  85. restart: always
  86. environment:
  87. REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
  88. volumes:
  89. # Mount the redis data directory to the container.
  90. - ./volumes/redis/data:/data
  91. # Set the redis password when startup redis server.
  92. command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
  93. healthcheck:
  94. test: [ 'CMD', 'redis-cli', 'ping' ]
  95. # The DifySandbox
  96. sandbox:
  97. image: langgenius/dify-sandbox:0.2.11
  98. restart: always
  99. environment:
  100. # The DifySandbox configurations
  101. # Make sure you are changing this key for your deployment with a strong key.
  102. # You can generate a strong key using `openssl rand -base64 42`.
  103. API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
  104. GIN_MODE: ${SANDBOX_GIN_MODE:-release}
  105. WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
  106. ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
  107. HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
  108. HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
  109. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  110. volumes:
  111. - ./volumes/sandbox/dependencies:/dependencies
  112. healthcheck:
  113. test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
  114. networks:
  115. - ssrf_proxy_network
  116. # ssrf_proxy server
  117. # for more information, please refer to
  118. # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
  119. ssrf_proxy:
  120. image: ubuntu/squid:latest
  121. restart: always
  122. volumes:
  123. - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
  124. - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  125. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  126. environment:
  127. # pls clearly modify the squid env vars to fit your network environment.
  128. HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
  129. COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
  130. REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
  131. SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
  132. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  133. networks:
  134. - ssrf_proxy_network
  135. - default
  136. # Certbot service
  137. # use `docker-compose --profile certbot up` to start the certbot service.
  138. certbot:
  139. image: certbot/certbot
  140. profiles:
  141. - certbot
  142. volumes:
  143. - ./volumes/certbot/conf:/etc/letsencrypt
  144. - ./volumes/certbot/www:/var/www/html
  145. - ./volumes/certbot/logs:/var/log/letsencrypt
  146. - ./volumes/certbot/conf/live:/etc/letsencrypt/live
  147. - ./certbot/update-cert.template.txt:/update-cert.template.txt
  148. - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
  149. environment:
  150. - CERTBOT_EMAIL=${CERTBOT_EMAIL}
  151. - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
  152. - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
  153. entrypoint: [ '/docker-entrypoint.sh' ]
  154. command: [ 'tail', '-f', '/dev/null' ]
  155. # The nginx reverse proxy.
  156. # used for reverse proxying the API service and Web service.
  157. nginx:
  158. image: nginx:latest
  159. restart: always
  160. volumes:
  161. - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
  162. - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
  163. - ./nginx/https.conf.template:/etc/nginx/https.conf.template
  164. - ./nginx/conf.d:/etc/nginx/conf.d
  165. - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  166. - ./nginx/ssl:/etc/ssl # cert dir (legacy)
  167. - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
  168. - ./volumes/certbot/conf:/etc/letsencrypt
  169. - ./volumes/certbot/www:/var/www/html
  170. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  171. environment:
  172. NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
  173. NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
  174. NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
  175. NGINX_PORT: ${NGINX_PORT:-80}
  176. # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
  177. # and modify the env vars below in .env if HTTPS_ENABLED is true.
  178. NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
  179. NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
  180. NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
  181. NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
  182. NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
  183. NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
  184. NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
  185. NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
  186. NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
  187. CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
  188. depends_on:
  189. - api
  190. - web
  191. ports:
  192. - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
  193. - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
  194. # The TiDB vector store.
  195. # For production use, please refer to https://github.com/pingcap/tidb-docker-compose
  196. tidb:
  197. image: pingcap/tidb:v8.4.0
  198. profiles:
  199. - tidb
  200. command:
  201. - --store=unistore
  202. restart: always
  203. # The Weaviate vector store.
  204. weaviate:
  205. image: semitechnologies/weaviate:1.19.0
  206. profiles:
  207. - ''
  208. - weaviate
  209. restart: always
  210. volumes:
  211. # Mount the Weaviate data directory to the con tainer.
  212. - ./volumes/weaviate:/var/lib/weaviate
  213. environment:
  214. # The Weaviate configurations
  215. # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
  216. PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
  217. QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
  218. AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
  219. DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
  220. CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
  221. AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
  222. AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
  223. AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
  224. AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
  225. AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
  226. # Qdrant vector store.
  227. # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
  228. qdrant:
  229. image: langgenius/qdrant:v1.7.3
  230. profiles:
  231. - qdrant
  232. restart: always
  233. volumes:
  234. - ./volumes/qdrant:/qdrant/storage
  235. environment:
  236. QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
  237. # The Couchbase vector store.
  238. couchbase-server:
  239. build: ./couchbase-server
  240. profiles:
  241. - couchbase
  242. restart: always
  243. environment:
  244. - CLUSTER_NAME=dify_search
  245. - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
  246. - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
  247. - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
  248. - COUCHBASE_BUCKET_RAMSIZE=512
  249. - COUCHBASE_RAM_SIZE=2048
  250. - COUCHBASE_EVENTING_RAM_SIZE=512
  251. - COUCHBASE_INDEX_RAM_SIZE=512
  252. - COUCHBASE_FTS_RAM_SIZE=1024
  253. hostname: couchbase-server
  254. container_name: couchbase-server
  255. working_dir: /opt/couchbase
  256. stdin_open: true
  257. tty: true
  258. entrypoint: [ "" ]
  259. command: sh -c "/opt/couchbase/init/init-cbserver.sh"
  260. volumes:
  261. - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
  262. healthcheck:
  263. # ensure bucket was created before proceeding
  264. test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
  265. interval: 10s
  266. retries: 10
  267. start_period: 30s
  268. timeout: 10s
  269. # The pgvector vector database.
  270. pgvector:
  271. image: pgvector/pgvector:pg16
  272. profiles:
  273. - pgvector
  274. restart: always
  275. environment:
  276. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  277. # The password for the default postgres user.
  278. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  279. # The name of the default postgres database.
  280. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  281. # postgres data directory
  282. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  283. volumes:
  284. - ./volumes/pgvector/data:/var/lib/postgresql/data
  285. healthcheck:
  286. test: [ 'CMD', 'pg_isready' ]
  287. interval: 1s
  288. timeout: 3s
  289. retries: 30
  290. # pgvecto-rs vector store
  291. pgvecto-rs:
  292. image: tensorchord/pgvecto-rs:pg16-v0.3.0
  293. profiles:
  294. - pgvecto-rs
  295. restart: always
  296. environment:
  297. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  298. # The password for the default postgres user.
  299. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  300. # The name of the default postgres database.
  301. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  302. # postgres data directory
  303. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  304. volumes:
  305. - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
  306. healthcheck:
  307. test: [ 'CMD', 'pg_isready' ]
  308. interval: 1s
  309. timeout: 3s
  310. retries: 30
  311. # Chroma vector database
  312. chroma:
  313. image: ghcr.io/chroma-core/chroma:0.5.20
  314. profiles:
  315. - chroma
  316. restart: always
  317. volumes:
  318. - ./volumes/chroma:/chroma/chroma
  319. environment:
  320. CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
  321. CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
  322. IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
  323. # OceanBase vector database
  324. oceanbase:
  325. image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
  326. profiles:
  327. - oceanbase
  328. restart: always
  329. volumes:
  330. - ./volumes/oceanbase/data:/root/ob
  331. - ./volumes/oceanbase/conf:/root/.obd/cluster
  332. - ./volumes/oceanbase/init.d:/root/boot/init.d
  333. environment:
  334. OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
  335. OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  336. OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  337. OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
  338. OB_SERVER_IP: '127.0.0.1'
  339. # Oracle vector database
  340. oracle:
  341. image: container-registry.oracle.com/database/free:latest
  342. profiles:
  343. - oracle
  344. restart: always
  345. volumes:
  346. - source: oradata
  347. type: volume
  348. target: /opt/oracle/oradata
  349. - ./startupscripts:/opt/oracle/scripts/startup
  350. environment:
  351. ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
  352. ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
  353. # Milvus vector database services
  354. etcd:
  355. container_name: milvus-etcd
  356. image: quay.io/coreos/etcd:v3.5.5
  357. profiles:
  358. - milvus
  359. environment:
  360. ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
  361. ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
  362. ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
  363. ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
  364. volumes:
  365. - ./volumes/milvus/etcd:/etcd
  366. command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
  367. healthcheck:
  368. test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
  369. interval: 30s
  370. timeout: 20s
  371. retries: 3
  372. networks:
  373. - milvus
  374. minio:
  375. container_name: milvus-minio
  376. image: minio/minio:RELEASE.2023-03-20T20-16-18Z
  377. profiles:
  378. - milvus
  379. environment:
  380. MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
  381. MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
  382. volumes:
  383. - ./volumes/milvus/minio:/minio_data
  384. command: minio server /minio_data --console-address ":9001"
  385. healthcheck:
  386. test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
  387. interval: 30s
  388. timeout: 20s
  389. retries: 3
  390. networks:
  391. - milvus
  392. milvus-standalone:
  393. container_name: milvus-standalone
  394. image: milvusdb/milvus:v2.5.0-beta
  395. profiles:
  396. - milvus
  397. command: [ 'milvus', 'run', 'standalone' ]
  398. environment:
  399. ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
  400. MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
  401. common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
  402. volumes:
  403. - ./volumes/milvus/milvus:/var/lib/milvus
  404. healthcheck:
  405. test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
  406. interval: 30s
  407. start_period: 90s
  408. timeout: 20s
  409. retries: 3
  410. depends_on:
  411. - etcd
  412. - minio
  413. ports:
  414. - 19530:19530
  415. - 9091:9091
  416. networks:
  417. - milvus
  418. # Opensearch vector database
  419. opensearch:
  420. container_name: opensearch
  421. image: opensearchproject/opensearch:latest
  422. profiles:
  423. - opensearch
  424. environment:
  425. discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
  426. bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
  427. OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
  428. OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
  429. ulimits:
  430. memlock:
  431. soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
  432. hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
  433. nofile:
  434. soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
  435. hard: ${OPENSEARCH_NOFILE_HARD:-65536}
  436. volumes:
  437. - ./volumes/opensearch/data:/usr/share/opensearch/data
  438. networks:
  439. - opensearch-net
  440. opensearch-dashboards:
  441. container_name: opensearch-dashboards
  442. image: opensearchproject/opensearch-dashboards:latest
  443. profiles:
  444. - opensearch
  445. environment:
  446. OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
  447. volumes:
  448. - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
  449. networks:
  450. - opensearch-net
  451. depends_on:
  452. - opensearch
  453. # MyScale vector database
  454. myscale:
  455. container_name: myscale
  456. image: myscale/myscaledb:1.6.4
  457. profiles:
  458. - myscale
  459. restart: always
  460. tty: true
  461. volumes:
  462. - ./volumes/myscale/data:/var/lib/clickhouse
  463. - ./volumes/myscale/log:/var/log/clickhouse-server
  464. - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
  465. ports:
  466. - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
  467. # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
  468. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
  469. elasticsearch:
  470. image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
  471. container_name: elasticsearch
  472. profiles:
  473. - elasticsearch
  474. - elasticsearch-ja
  475. restart: always
  476. volumes:
  477. - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  478. - dify_es01_data:/usr/share/elasticsearch/data
  479. environment:
  480. ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
  481. VECTOR_STORE: ${VECTOR_STORE:-}
  482. cluster.name: dify-es-cluster
  483. node.name: dify-es0
  484. discovery.type: single-node
  485. xpack.license.self_generated.type: basic
  486. xpack.security.enabled: 'true'
  487. xpack.security.enrollment.enabled: 'false'
  488. xpack.security.http.ssl.enabled: 'false'
  489. ports:
  490. - ${ELASTICSEARCH_PORT:-9200}:9200
  491. deploy:
  492. resources:
  493. limits:
  494. memory: 2g
  495. entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
  496. healthcheck:
  497. test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
  498. interval: 30s
  499. timeout: 10s
  500. retries: 50
  501. # https://www.elastic.co/guide/en/kibana/current/docker.html
  502. # https://www.elastic.co/guide/en/kibana/current/settings.html
  503. kibana:
  504. image: docker.elastic.co/kibana/kibana:8.14.3
  505. container_name: kibana
  506. profiles:
  507. - elasticsearch
  508. depends_on:
  509. - elasticsearch
  510. restart: always
  511. environment:
  512. XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
  513. NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
  514. XPACK_SECURITY_ENABLED: 'true'
  515. XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
  516. XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
  517. XPACK_FLEET_ISAIRGAPPED: 'true'
  518. I18N_LOCALE: zh-CN
  519. SERVER_PORT: '5601'
  520. ELASTICSEARCH_HOSTS: http://elasticsearch:9200
  521. ports:
  522. - ${KIBANA_PORT:-5601}:5601
  523. healthcheck:
  524. test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
  525. interval: 30s
  526. timeout: 10s
  527. retries: 3
  528. # unstructured .
  529. # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
  530. unstructured:
  531. image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
  532. profiles:
  533. - unstructured
  534. restart: always
  535. volumes:
  536. - ./volumes/unstructured:/app/data
  537. networks:
  538. # create a network between sandbox, api and ssrf_proxy, and can not access outside.
  539. ssrf_proxy_network:
  540. driver: bridge
  541. internal: true
  542. milvus:
  543. driver: bridge
  544. opensearch-net:
  545. driver: bridge
  546. internal: true
  547. volumes:
  548. oradata:
  549. dify_es01_data: