Преглед на файлове

chore(docker): bump version (#25092)

Signed-off-by: -LAN- <laipz8200@outlook.com>
tags/1.8.1
-LAN- преди 1 месец
родител
ревизия
c7700ac176
No account linked to committer's email address
променени са 2 файла, в които са добавени 140 реда и са изтрити 74 реда
  1. 70
    37
      docker/docker-compose-template.yaml
  2. 70
    37
      docker/docker-compose.yaml

+ 70
- 37
docker/docker-compose-template.yaml Целия файл

# worker service # worker service
# The Celery worker for processing the queue. # The Celery worker for processing the queue.
worker: worker:
image: langgenius/dify-api:1.8.0
image: langgenius/dify-api:1.8.1
restart: always restart: always
environment: environment:
# Use the shared environment variables. # Use the shared environment variables.
# worker_beat service # worker_beat service
# Celery beat for scheduling periodic tasks. # Celery beat for scheduling periodic tasks.
worker_beat: worker_beat:
image: langgenius/dify-api:1.8.0
image: langgenius/dify-api:1.8.1
restart: always restart: always
environment: environment:
# Use the shared environment variables. # Use the shared environment variables.


# Frontend web application. # Frontend web application.
web: web:
image: langgenius/dify-web:1.8.0
image: langgenius/dify-web:1.8.1
restart: always restart: always
environment: environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-} CONSOLE_API_URL: ${CONSOLE_API_URL:-}
volumes: volumes:
- ./volumes/db/data:/var/lib/postgresql/data - ./volumes/db/data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ]
test:
[
"CMD",
"pg_isready",
"-h",
"db",
"-U",
"${PGUSER:-postgres}",
"-d",
"${POSTGRES_DB:-dify}",
]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 60 retries: 60
# Set the redis password when startup redis server. # Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG' ]
test:
[
"CMD-SHELL",
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
]


# The DifySandbox # The DifySandbox
sandbox: sandbox:
- ./volumes/sandbox/dependencies:/dependencies - ./volumes/sandbox/dependencies:/dependencies
- ./volumes/sandbox/conf:/conf - ./volumes/sandbox/conf:/conf
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
networks: networks:
- ssrf_proxy_network - ssrf_proxy_network


volumes: volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment: environment:
# pls clearly modify the squid env vars to fit your network environment. # pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128} HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
- CERTBOT_EMAIL=${CERTBOT_EMAIL} - CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN} - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: [ '/docker-entrypoint.sh' ]
command: [ 'tail', '-f', '/dev/null' ]
entrypoint: ["/docker-entrypoint.sh"]
command: ["tail", "-f", "/dev/null"]


# The nginx reverse proxy. # The nginx reverse proxy.
# used for reverse proxying the API service and Web service. # used for reverse proxying the API service and Web service.
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html - ./volumes/certbot/www:/var/www/html
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment: environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
- api - api
- web - web
ports: ports:
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"


# The Weaviate vector store. # The Weaviate vector store.
weaviate: weaviate:
image: semitechnologies/weaviate:1.19.0 image: semitechnologies/weaviate:1.19.0
profiles: profiles:
- ''
- ""
- weaviate - weaviate
restart: always restart: always
volumes: volumes:
working_dir: /opt/couchbase working_dir: /opt/couchbase
stdin_open: true stdin_open: true
tty: true tty: true
entrypoint: [ "" ]
entrypoint: [""]
command: sh -c "/opt/couchbase/init/init-cbserver.sh" command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes: volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
healthcheck: healthcheck:
# ensure bucket was created before proceeding # ensure bucket was created before proceeding
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
test:
[
"CMD-SHELL",
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
]
interval: 10s interval: 10s
retries: 10 retries: 10
start_period: 30s start_period: 30s
volumes: volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data - ./volumes/pgvector/data:/var/lib/postgresql/data
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
entrypoint: [ '/docker-entrypoint.sh' ]
entrypoint: ["/docker-entrypoint.sh"]
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
- VB_USERNAME=dify - VB_USERNAME=dify
- VB_PASSWORD=Difyai123456 - VB_PASSWORD=Difyai123456
ports: ports:
- '5434:5432'
- "5434:5432"
volumes: volumes:
- ./vastbase/lic:/home/vastbase/vastbase/lic - ./vastbase/lic:/home/vastbase/vastbase/lic
- ./vastbase/data:/home/vastbase/data - ./vastbase/data:/home/vastbase/data
- ./vastbase/backup:/home/vastbase/backup - ./vastbase/backup:/home/vastbase/backup
- ./vastbase/backup_log:/home/vastbase/backup_log - ./vastbase/backup_log:/home/vastbase/backup_log
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
volumes: volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
ports: ports:
- "${OCEANBASE_VECTOR_PORT:-2881}:2881" - "${OCEANBASE_VECTOR_PORT:-2881}:2881"
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ]
test:
[
"CMD-SHELL",
'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"',
]
interval: 10s interval: 10s
retries: 30 retries: 30
start_period: 30s start_period: 30s
- ./volumes/milvus/etcd:/etcd - ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck: healthcheck:
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 30s interval: 30s
timeout: 20s timeout: 20s
retries: 3 retries: 3
- ./volumes/milvus/minio:/minio_data - ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001" command: minio server /minio_data --console-address ":9001"
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s interval: 30s
timeout: 20s timeout: 20s
retries: 3 retries: 3
image: milvusdb/milvus:v2.5.15 image: milvusdb/milvus:v2.5.15
profiles: profiles:
- milvus - milvus
command: [ 'milvus', 'run', 'standalone' ]
command: ["milvus", "run", "standalone"]
environment: environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
volumes: volumes:
- ./volumes/milvus/milvus:/var/lib/milvus - ./volumes/milvus/milvus:/var/lib/milvus
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 30s interval: 30s
start_period: 90s start_period: 90s
timeout: 20s timeout: 20s
volumes: volumes:
- ./volumes/opengauss/data:/var/lib/opengauss/data - ./volumes/opengauss/data:/var/lib/opengauss/data
healthcheck: healthcheck:
test: [ "CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1" ]
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
interval: 10s interval: 10s
timeout: 10s timeout: 10s
retries: 10 retries: 10
node.name: dify-es0 node.name: dify-es0
discovery.type: single-node discovery.type: single-node
xpack.license.self_generated.type: basic xpack.license.self_generated.type: basic
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
xpack.security.enabled: "true"
xpack.security.enrollment.enabled: "false"
xpack.security.http.ssl.enabled: "false"
ports: ports:
- ${ELASTICSEARCH_PORT:-9200}:9200 - ${ELASTICSEARCH_PORT:-9200}:9200
deploy: deploy:
resources: resources:
limits: limits:
memory: 2g memory: 2g
entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
test:
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 50 retries: 50
environment: environment:
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
XPACK_SECURITY_ENABLED: 'true'
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
XPACK_FLEET_ISAIRGAPPED: 'true'
XPACK_SECURITY_ENABLED: "true"
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
XPACK_FLEET_ISAIRGAPPED: "true"
I18N_LOCALE: zh-CN I18N_LOCALE: zh-CN
SERVER_PORT: '5601'
SERVER_PORT: "5601"
ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports: ports:
- ${KIBANA_PORT:-5601}:5601 - ${KIBANA_PORT:-5601}:5601
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 3 retries: 3

+ 70
- 37
docker/docker-compose.yaml Целия файл

# worker service # worker service
# The Celery worker for processing the queue. # The Celery worker for processing the queue.
worker: worker:
image: langgenius/dify-api:1.8.0
image: langgenius/dify-api:1.8.1
restart: always restart: always
environment: environment:
# Use the shared environment variables. # Use the shared environment variables.
# worker_beat service # worker_beat service
# Celery beat for scheduling periodic tasks. # Celery beat for scheduling periodic tasks.
worker_beat: worker_beat:
image: langgenius/dify-api:1.8.0
image: langgenius/dify-api:1.8.1
restart: always restart: always
environment: environment:
# Use the shared environment variables. # Use the shared environment variables.


# Frontend web application. # Frontend web application.
web: web:
image: langgenius/dify-web:1.8.0
image: langgenius/dify-web:1.8.1
restart: always restart: always
environment: environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-} CONSOLE_API_URL: ${CONSOLE_API_URL:-}
volumes: volumes:
- ./volumes/db/data:/var/lib/postgresql/data - ./volumes/db/data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ]
test:
[
"CMD",
"pg_isready",
"-h",
"db",
"-U",
"${PGUSER:-postgres}",
"-d",
"${POSTGRES_DB:-dify}",
]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 60 retries: 60
# Set the redis password when startup redis server. # Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG' ]
test:
[
"CMD-SHELL",
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
]


# The DifySandbox # The DifySandbox
sandbox: sandbox:
- ./volumes/sandbox/dependencies:/dependencies - ./volumes/sandbox/dependencies:/dependencies
- ./volumes/sandbox/conf:/conf - ./volumes/sandbox/conf:/conf
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
networks: networks:
- ssrf_proxy_network - ssrf_proxy_network


volumes: volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment: environment:
# pls clearly modify the squid env vars to fit your network environment. # pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128} HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
- CERTBOT_EMAIL=${CERTBOT_EMAIL} - CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN} - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: [ '/docker-entrypoint.sh' ]
command: [ 'tail', '-f', '/dev/null' ]
entrypoint: ["/docker-entrypoint.sh"]
command: ["tail", "-f", "/dev/null"]


# The nginx reverse proxy. # The nginx reverse proxy.
# used for reverse proxying the API service and Web service. # used for reverse proxying the API service and Web service.
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html - ./volumes/certbot/www:/var/www/html
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
entrypoint:
[
"sh",
"-c",
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment: environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
- api - api
- web - web
ports: ports:
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"


# The Weaviate vector store. # The Weaviate vector store.
weaviate: weaviate:
image: semitechnologies/weaviate:1.19.0 image: semitechnologies/weaviate:1.19.0
profiles: profiles:
- ''
- ""
- weaviate - weaviate
restart: always restart: always
volumes: volumes:
working_dir: /opt/couchbase working_dir: /opt/couchbase
stdin_open: true stdin_open: true
tty: true tty: true
entrypoint: [ "" ]
entrypoint: [""]
command: sh -c "/opt/couchbase/init/init-cbserver.sh" command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes: volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
healthcheck: healthcheck:
# ensure bucket was created before proceeding # ensure bucket was created before proceeding
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
test:
[
"CMD-SHELL",
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
]
interval: 10s interval: 10s
retries: 10 retries: 10
start_period: 30s start_period: 30s
volumes: volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data - ./volumes/pgvector/data:/var/lib/postgresql/data
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
entrypoint: [ '/docker-entrypoint.sh' ]
entrypoint: ["/docker-entrypoint.sh"]
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
- VB_USERNAME=dify - VB_USERNAME=dify
- VB_PASSWORD=Difyai123456 - VB_PASSWORD=Difyai123456
ports: ports:
- '5434:5432'
- "5434:5432"
volumes: volumes:
- ./vastbase/lic:/home/vastbase/vastbase/lic - ./vastbase/lic:/home/vastbase/vastbase/lic
- ./vastbase/data:/home/vastbase/data - ./vastbase/data:/home/vastbase/data
- ./vastbase/backup:/home/vastbase/backup - ./vastbase/backup:/home/vastbase/backup
- ./vastbase/backup_log:/home/vastbase/backup_log - ./vastbase/backup_log:/home/vastbase/backup_log
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
volumes: volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: [ 'CMD', 'pg_isready' ]
test: ["CMD", "pg_isready"]
interval: 1s interval: 1s
timeout: 3s timeout: 3s
retries: 30 retries: 30
ports: ports:
- "${OCEANBASE_VECTOR_PORT:-2881}:2881" - "${OCEANBASE_VECTOR_PORT:-2881}:2881"
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ]
test:
[
"CMD-SHELL",
'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"',
]
interval: 10s interval: 10s
retries: 30 retries: 30
start_period: 30s start_period: 30s
- ./volumes/milvus/etcd:/etcd - ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck: healthcheck:
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 30s interval: 30s
timeout: 20s timeout: 20s
retries: 3 retries: 3
- ./volumes/milvus/minio:/minio_data - ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001" command: minio server /minio_data --console-address ":9001"
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s interval: 30s
timeout: 20s timeout: 20s
retries: 3 retries: 3
image: milvusdb/milvus:v2.5.15 image: milvusdb/milvus:v2.5.15
profiles: profiles:
- milvus - milvus
command: [ 'milvus', 'run', 'standalone' ]
command: ["milvus", "run", "standalone"]
environment: environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
volumes: volumes:
- ./volumes/milvus/milvus:/var/lib/milvus - ./volumes/milvus/milvus:/var/lib/milvus
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 30s interval: 30s
start_period: 90s start_period: 90s
timeout: 20s timeout: 20s
volumes: volumes:
- ./volumes/opengauss/data:/var/lib/opengauss/data - ./volumes/opengauss/data:/var/lib/opengauss/data
healthcheck: healthcheck:
test: [ "CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1" ]
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
interval: 10s interval: 10s
timeout: 10s timeout: 10s
retries: 10 retries: 10
node.name: dify-es0 node.name: dify-es0
discovery.type: single-node discovery.type: single-node
xpack.license.self_generated.type: basic xpack.license.self_generated.type: basic
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
xpack.security.enabled: "true"
xpack.security.enrollment.enabled: "false"
xpack.security.http.ssl.enabled: "false"
ports: ports:
- ${ELASTICSEARCH_PORT:-9200}:9200 - ${ELASTICSEARCH_PORT:-9200}:9200
deploy: deploy:
resources: resources:
limits: limits:
memory: 2g memory: 2g
entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
healthcheck: healthcheck:
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
test:
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 50 retries: 50
environment: environment:
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
XPACK_SECURITY_ENABLED: 'true'
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
XPACK_FLEET_ISAIRGAPPED: 'true'
XPACK_SECURITY_ENABLED: "true"
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
XPACK_FLEET_ISAIRGAPPED: "true"
I18N_LOCALE: zh-CN I18N_LOCALE: zh-CN
SERVER_PORT: '5601'
SERVER_PORT: "5601"
ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports: ports:
- ${KIBANA_PORT:-5601}:5601 - ${KIBANA_PORT:-5601}:5601
healthcheck: healthcheck:
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 3 retries: 3

Loading…
Отказ
Запис