Selaa lähdekoodia

Add Full-Text & Hybrid Search Support to Baidu Vector DB and Update SDK, Closes #25982 (#25983)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
tags/1.9.0
Shili Cao 1 kuukausi sitten
vanhempi
commit
345ac8333c
No account linked to committer's email address

+ 2
- 0
api/.env.example Näytä tiedosto

BAIDU_VECTOR_DB_DATABASE=dify BAIDU_VECTOR_DB_DATABASE=dify
BAIDU_VECTOR_DB_SHARD=1 BAIDU_VECTOR_DB_SHARD=1
BAIDU_VECTOR_DB_REPLICAS=3 BAIDU_VECTOR_DB_REPLICAS=3
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE


# Upstash configuration # Upstash configuration
UPSTASH_VECTOR_URL=your-server-url UPSTASH_VECTOR_URL=your-server-url

+ 10
- 0
api/configs/middleware/vdb/baidu_vector_config.py Näytä tiedosto

description="Number of replicas for the Baidu Vector Database (default is 3)", description="Number of replicas for the Baidu Vector Database (default is 3)",
default=3, default=3,
) )

BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: str = Field(
description="Analyzer type for inverted index in Baidu Vector Database (default is DEFAULT_ANALYZER)",
default="DEFAULT_ANALYZER",
)

BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: str = Field(
description="Parser mode for inverted index in Baidu Vector Database (default is COARSE_MODE)",
default="COARSE_MODE",
)

+ 2
- 2
api/controllers/console/datasets/datasets.py Näytä tiedosto

| VectorType.TIDB_VECTOR | VectorType.TIDB_VECTOR
| VectorType.CHROMA | VectorType.CHROMA
| VectorType.PGVECTO_RS | VectorType.PGVECTO_RS
| VectorType.BAIDU
| VectorType.VIKINGDB | VectorType.VIKINGDB
| VectorType.UPSTASH | VectorType.UPSTASH
): ):
| VectorType.TENCENT | VectorType.TENCENT
| VectorType.MATRIXONE | VectorType.MATRIXONE
| VectorType.CLICKZETTA | VectorType.CLICKZETTA
| VectorType.BAIDU
): ):
return { return {
"retrieval_method": [ "retrieval_method": [
| VectorType.TIDB_VECTOR | VectorType.TIDB_VECTOR
| VectorType.CHROMA | VectorType.CHROMA
| VectorType.PGVECTO_RS | VectorType.PGVECTO_RS
| VectorType.BAIDU
| VectorType.VIKINGDB | VectorType.VIKINGDB
| VectorType.UPSTASH | VectorType.UPSTASH
): ):
| VectorType.HUAWEI_CLOUD | VectorType.HUAWEI_CLOUD
| VectorType.MATRIXONE | VectorType.MATRIXONE
| VectorType.CLICKZETTA | VectorType.CLICKZETTA
| VectorType.BAIDU
): ):
return { return {
"retrieval_method": [ "retrieval_method": [

+ 128
- 55
api/core/rag/datasource/vdb/baidu/baidu_vector.py Näytä tiedosto

import json import json
import logging
import time import time
import uuid import uuid
from typing import Any from typing import Any
from pymochow.auth.bce_credentials import BceCredentials # type: ignore from pymochow.auth.bce_credentials import BceCredentials # type: ignore
from pymochow.configuration import Configuration # type: ignore from pymochow.configuration import Configuration # type: ignore
from pymochow.exception import ServerError # type: ignore from pymochow.exception import ServerError # type: ignore
from pymochow.model.database import Database
from pymochow.model.enum import FieldType, IndexState, IndexType, MetricType, ServerErrCode, TableState # type: ignore from pymochow.model.enum import FieldType, IndexState, IndexType, MetricType, ServerErrCode, TableState # type: ignore
from pymochow.model.schema import Field, HNSWParams, Schema, VectorIndex # type: ignore
from pymochow.model.table import AnnSearch, HNSWSearchParams, Partition, Row # type: ignore
from pymochow.model.schema import (
Field,
FilteringIndex,
HNSWParams,
InvertedIndex,
InvertedIndexAnalyzer,
InvertedIndexFieldAttribute,
InvertedIndexParams,
InvertedIndexParseMode,
Schema,
VectorIndex,
) # type: ignore
from pymochow.model.table import AnnSearch, BM25SearchRequest, HNSWSearchParams, Partition, Row # type: ignore


from configs import dify_config from configs import dify_config
from core.rag.datasource.vdb.field import Field as VDBField
from core.rag.datasource.vdb.vector_base import BaseVector from core.rag.datasource.vdb.vector_base import BaseVector
from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory
from core.rag.datasource.vdb.vector_type import VectorType from core.rag.datasource.vdb.vector_type import VectorType
from extensions.ext_redis import redis_client from extensions.ext_redis import redis_client
from models.dataset import Dataset from models.dataset import Dataset


logger = logging.getLogger(__name__)



class BaiduConfig(BaseModel): class BaiduConfig(BaseModel):
endpoint: str endpoint: str
api_key: str api_key: str
database: str database: str
index_type: str = "HNSW" index_type: str = "HNSW"
metric_type: str = "L2"
metric_type: str = "IP"
shard: int = 1 shard: int = 1
replicas: int = 3 replicas: int = 3
inverted_index_analyzer: str = "DEFAULT_ANALYZER"
inverted_index_parser_mode: str = "COARSE_MODE"


@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod




class BaiduVector(BaseVector): class BaiduVector(BaseVector):
field_id: str = "id"
field_vector: str = "vector"
field_text: str = "text"
field_metadata: str = "metadata"
field_app_id: str = "app_id"
field_annotation_id: str = "annotation_id"
index_vector: str = "vector_idx"
vector_index: str = "vector_idx"
filtering_index: str = "filtering_idx"
inverted_index: str = "content_inverted_idx"


def __init__(self, collection_name: str, config: BaiduConfig): def __init__(self, collection_name: str, config: BaiduConfig):
super().__init__(collection_name) super().__init__(collection_name)
self.add_texts(texts, embeddings) self.add_texts(texts, embeddings)


def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs):
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents if doc.metadata is not None]
total_count = len(documents) total_count = len(documents)
batch_size = 1000 batch_size = 1000


for start in range(0, total_count, batch_size): for start in range(0, total_count, batch_size):
end = min(start + batch_size, total_count) end = min(start + batch_size, total_count)
rows = [] rows = []
assert len(metadatas) == total_count, "metadatas length should be equal to total_count"
for i in range(start, end, 1): for i in range(start, end, 1):
metadata = documents[i].metadata
row = Row( row = Row(
id=metadatas[i].get("doc_id", str(uuid.uuid4())),
id=metadata.get("doc_id", str(uuid.uuid4())),
page_content=documents[i].page_content,
metadata=metadata,
vector=embeddings[i], vector=embeddings[i],
text=texts[i],
metadata=json.dumps(metadatas[i]),
app_id=metadatas[i].get("app_id", ""),
annotation_id=metadatas[i].get("annotation_id", ""),
) )
rows.append(row) rows.append(row)
table.upsert(rows=rows) table.upsert(rows=rows)


# rebuild vector index after upsert finished # rebuild vector index after upsert finished
table.rebuild_index(self.index_vector)
table.rebuild_index(self.vector_index)
timeout = 3600 # 1 hour timeout
start_time = time.time()
while True: while True:
time.sleep(1) time.sleep(1)
index = table.describe_index(self.index_vector)
index = table.describe_index(self.vector_index)
if index.state == IndexState.NORMAL: if index.state == IndexState.NORMAL:
break break
if time.time() - start_time > timeout:
raise TimeoutError(f"Index rebuild timeout after {timeout} seconds")


def text_exists(self, id: str) -> bool: def text_exists(self, id: str) -> bool:
res = self._db.table(self._collection_name).query(primary_key={self.field_id: id})
res = self._db.table(self._collection_name).query(primary_key={VDBField.PRIMARY_KEY: id})
if res and res.code == 0: if res and res.code == 0:
return True return True
return False return False
if not ids: if not ids:
return return
quoted_ids = [f"'{id}'" for id in ids] quoted_ids = [f"'{id}'" for id in ids]
self._db.table(self._collection_name).delete(filter=f"id IN({', '.join(quoted_ids)})")
self._db.table(self._collection_name).delete(filter=f"{VDBField.PRIMARY_KEY} IN({', '.join(quoted_ids)})")


def delete_by_metadata_field(self, key: str, value: str): def delete_by_metadata_field(self, key: str, value: str):
self._db.table(self._collection_name).delete(filter=f"{key} = '{value}'")
# Escape double quotes in value to prevent injection
escaped_value = value.replace('"', '\\"')
self._db.table(self._collection_name).delete(filter=f'metadata["{key}"] = "{escaped_value}"')


def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:
query_vector = [float(val) if isinstance(val, np.float64) else val for val in query_vector] query_vector = [float(val) if isinstance(val, np.float64) else val for val in query_vector]
document_ids_filter = kwargs.get("document_ids_filter") document_ids_filter = kwargs.get("document_ids_filter")
filter = ""
if document_ids_filter: if document_ids_filter:
document_ids = ", ".join(f"'{id}'" for id in document_ids_filter) document_ids = ", ".join(f"'{id}'" for id in document_ids_filter)
anns = AnnSearch(
vector_field=self.field_vector,
vector_floats=query_vector,
params=HNSWSearchParams(ef=kwargs.get("ef", 10), limit=kwargs.get("top_k", 4)),
filter=f"document_id IN ({document_ids})",
)
else:
anns = AnnSearch(
vector_field=self.field_vector,
vector_floats=query_vector,
params=HNSWSearchParams(ef=kwargs.get("ef", 10), limit=kwargs.get("top_k", 4)),
)
filter = f'metadata["document_id"] IN({document_ids})'
anns = AnnSearch(
vector_field=VDBField.VECTOR,
vector_floats=query_vector,
params=HNSWSearchParams(ef=kwargs.get("ef", 20), limit=kwargs.get("top_k", 4)),
filter=filter,
)
res = self._db.table(self._collection_name).search( res = self._db.table(self._collection_name).search(
anns=anns, anns=anns,
projections=[self.field_id, self.field_text, self.field_metadata],
retrieve_vector=True,
projections=[VDBField.CONTENT_KEY, VDBField.METADATA_KEY],
retrieve_vector=False,
) )
score_threshold = float(kwargs.get("score_threshold") or 0.0) score_threshold = float(kwargs.get("score_threshold") or 0.0)
return self._get_search_res(res, score_threshold) return self._get_search_res(res, score_threshold)


def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
# baidu vector database doesn't support bm25 search on current version
return []
# document ids filter
document_ids_filter = kwargs.get("document_ids_filter")
filter = ""
if document_ids_filter:
document_ids = ", ".join(f"'{id}'" for id in document_ids_filter)
filter = f'metadata["document_id"] IN({document_ids})'

request = BM25SearchRequest(
index_name=self.inverted_index, search_text=query, limit=kwargs.get("top_k", 4), filter=filter
)
res = self._db.table(self._collection_name).bm25_search(
request=request, projections=[VDBField.CONTENT_KEY, VDBField.METADATA_KEY]
)
score_threshold = float(kwargs.get("score_threshold") or 0.0)
return self._get_search_res(res, score_threshold)


def _get_search_res(self, res, score_threshold) -> list[Document]: def _get_search_res(self, res, score_threshold) -> list[Document]:
docs = [] docs = []
for row in res.rows: for row in res.rows:
row_data = row.get("row", {}) row_data = row.get("row", {})
meta = row_data.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
score = row.get("score", 0.0) score = row.get("score", 0.0)
meta = row_data.get(VDBField.METADATA_KEY, {})

# Handle both JSON string and dict formats for backward compatibility
if isinstance(meta, str):
try:
import json

meta = json.loads(meta)
except (json.JSONDecodeError, TypeError):
meta = {}
elif not isinstance(meta, dict):
meta = {}

if score >= score_threshold: if score >= score_threshold:
meta["score"] = score meta["score"] = score
doc = Document(page_content=row_data.get(self.field_text), metadata=meta)
doc = Document(page_content=row_data.get(VDBField.CONTENT_KEY), metadata=meta)
docs.append(doc) docs.append(doc)

return docs return docs


def delete(self): def delete(self):
client = MochowClient(config) client = MochowClient(config)
return client return client


def _init_database(self):
def _init_database(self) -> Database:
exists = False exists = False
for db in self._client.list_databases(): for db in self._client.list_databases():
if db.database_name == self._client_config.database: if db.database_name == self._client_config.database:
self._client.create_database(database_name=self._client_config.database) self._client.create_database(database_name=self._client_config.database)
except ServerError as e: except ServerError as e:
if e.code == ServerErrCode.DB_ALREADY_EXIST: if e.code == ServerErrCode.DB_ALREADY_EXIST:
pass
return self._client.database(self._client_config.database)
else: else:
raise raise
return
return self._client.database(self._client_config.database)


def _table_existed(self) -> bool: def _table_existed(self) -> bool:
tables = self._db.list_table() tables = self._db.list_table()
fields = [] fields = []
fields.append( fields.append(
Field( Field(
self.field_id,
VDBField.PRIMARY_KEY,
FieldType.STRING, FieldType.STRING,
primary_key=True, primary_key=True,
partition_key=True, partition_key=True,
not_null=True, not_null=True,
) )
) )
fields.append(Field(self.field_metadata, FieldType.STRING, not_null=True))
fields.append(Field(self.field_app_id, FieldType.STRING))
fields.append(Field(self.field_annotation_id, FieldType.STRING))
fields.append(Field(self.field_text, FieldType.TEXT, not_null=True))
fields.append(Field(self.field_vector, FieldType.FLOAT_VECTOR, not_null=True, dimension=dimension))
fields.append(Field(VDBField.CONTENT_KEY, FieldType.TEXT, not_null=False))
fields.append(Field(VDBField.METADATA_KEY, FieldType.JSON, not_null=False))
fields.append(Field(VDBField.VECTOR, FieldType.FLOAT_VECTOR, not_null=True, dimension=dimension))


# Construct vector index params # Construct vector index params
indexes = [] indexes = []
indexes.append( indexes.append(
VectorIndex( VectorIndex(
index_name="vector_idx",
index_name=self.vector_index,
index_type=index_type, index_type=index_type,
field="vector",
field=VDBField.VECTOR,
metric_type=metric_type, metric_type=metric_type,
params=HNSWParams(m=16, efconstruction=200), params=HNSWParams(m=16, efconstruction=200),
) )
) )


# Filtering index
indexes.append(
FilteringIndex(
index_name=self.filtering_index,
fields=[VDBField.METADATA_KEY],
)
)

# Get analyzer and parse_mode from config
analyzer = getattr(
InvertedIndexAnalyzer,
self._client_config.inverted_index_analyzer,
InvertedIndexAnalyzer.DEFAULT_ANALYZER,
)

parse_mode = getattr(
InvertedIndexParseMode,
self._client_config.inverted_index_parser_mode,
InvertedIndexParseMode.COARSE_MODE,
)

# Inverted index
indexes.append(
InvertedIndex(
index_name=self.inverted_index,
fields=[VDBField.CONTENT_KEY],
params=InvertedIndexParams(
analyzer=analyzer,
parse_mode=parse_mode,
case_sensitive=True,
),
field_attributes=[InvertedIndexFieldAttribute.ANALYZED],
)
)

# Create table # Create table
self._db.create_table( self._db.create_table(
table_name=self._collection_name, table_name=self._collection_name,
) )


# Wait for table created # Wait for table created
timeout = 300 # 5 minutes timeout
start_time = time.time()
while True: while True:
time.sleep(1) time.sleep(1)
table = self._db.describe_table(self._collection_name) table = self._db.describe_table(self._collection_name)
if table.state == TableState.NORMAL: if table.state == TableState.NORMAL:
break break
if time.time() - start_time > timeout:
raise TimeoutError(f"Table creation timeout after {timeout} seconds")
redis_client.set(table_exist_cache_key, 1, ex=3600) redis_client.set(table_exist_cache_key, 1, ex=3600)




database=dify_config.BAIDU_VECTOR_DB_DATABASE or "", database=dify_config.BAIDU_VECTOR_DB_DATABASE or "",
shard=dify_config.BAIDU_VECTOR_DB_SHARD, shard=dify_config.BAIDU_VECTOR_DB_SHARD,
replicas=dify_config.BAIDU_VECTOR_DB_REPLICAS, replicas=dify_config.BAIDU_VECTOR_DB_REPLICAS,
inverted_index_analyzer=dify_config.BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER,
inverted_index_parser_mode=dify_config.BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE,
), ),
) )

+ 1
- 1
api/pyproject.toml Näytä tiedosto

"pgvecto-rs[sqlalchemy]~=0.2.1", "pgvecto-rs[sqlalchemy]~=0.2.1",
"pgvector==0.2.5", "pgvector==0.2.5",
"pymilvus~=2.5.0", "pymilvus~=2.5.0",
"pymochow==1.3.1",
"pymochow==2.2.9",
"pyobvector~=0.2.15", "pyobvector~=0.2.15",
"qdrant-client==1.9.0", "qdrant-client==1.9.0",
"tablestore==6.2.0", "tablestore==6.2.0",

+ 4
- 4
api/tests/integration_tests/vdb/__mock/baiduvectordb.py Näytä tiedosto

"row": { "row": {
"id": primary_key.get("id"), "id": primary_key.get("id"),
"vector": [0.23432432, 0.8923744, 0.89238432], "vector": [0.23432432, 0.8923744, 0.89238432],
"text": "text",
"metadata": '{"doc_id": "doc_id_001"}',
"page_content": "text",
"metadata": {"doc_id": "doc_id_001"},
}, },
"code": 0, "code": 0,
"msg": "Success", "msg": "Success",
"row": { "row": {
"id": "doc_id_001", "id": "doc_id_001",
"vector": [0.23432432, 0.8923744, 0.89238432], "vector": [0.23432432, 0.8923744, 0.89238432],
"text": "text",
"metadata": '{"doc_id": "doc_id_001"}',
"page_content": "text",
"metadata": {"doc_id": "doc_id_001"},
}, },
"distance": 0.1, "distance": 0.1,
"score": 0.5, "score": 0.5,

+ 5
- 5
api/uv.lock Näytä tiedosto

version = 1 version = 1
revision = 3
revision = 2
requires-python = ">=3.11, <3.13" requires-python = ">=3.11, <3.13"
resolution-markers = [ resolution-markers = [
"python_full_version >= '3.12.4' and sys_platform == 'linux'", "python_full_version >= '3.12.4' and sys_platform == 'linux'",
{ name = "pgvecto-rs", extras = ["sqlalchemy"], specifier = "~=0.2.1" }, { name = "pgvecto-rs", extras = ["sqlalchemy"], specifier = "~=0.2.1" },
{ name = "pgvector", specifier = "==0.2.5" }, { name = "pgvector", specifier = "==0.2.5" },
{ name = "pymilvus", specifier = "~=2.5.0" }, { name = "pymilvus", specifier = "~=2.5.0" },
{ name = "pymochow", specifier = "==1.3.1" },
{ name = "pymochow", specifier = "==2.2.9" },
{ name = "pyobvector", specifier = "~=0.2.15" }, { name = "pyobvector", specifier = "~=0.2.15" },
{ name = "qdrant-client", specifier = "==1.9.0" }, { name = "qdrant-client", specifier = "==1.9.0" },
{ name = "tablestore", specifier = "==6.2.0" }, { name = "tablestore", specifier = "==6.2.0" },


[[package]] [[package]]
name = "pymochow" name = "pymochow"
version = "1.3.1"
version = "2.2.9"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "future" }, { name = "future" },
{ name = "orjson" }, { name = "orjson" },
{ name = "requests" }, { name = "requests" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/cc/da/3027eeeaf7a7db9b0ca761079de4e676a002e1cc2c4260dab0ce812972b8/pymochow-1.3.1.tar.gz", hash = "sha256:1693d10cd0bb7bce45327890a90adafb503155922ccc029acb257699a73a20ba", size = 30800, upload-time = "2024-09-11T12:06:37.88Z" }
sdist = { url = "https://files.pythonhosted.org/packages/b5/29/d9b112684ce490057b90bddede3fb6a69cf2787a3fd7736bdce203e77388/pymochow-2.2.9.tar.gz", hash = "sha256:5a28058edc8861deb67524410e786814571ed9fe0700c8c9fc0bc2ad5835b06c", size = 50079, upload-time = "2025-06-05T08:33:19.59Z" }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/6b/74/4b6227717f6baa37e7288f53e0fd55764939abc4119342eed4924a98f477/pymochow-1.3.1-py3-none-any.whl", hash = "sha256:a7f3b34fd6ea5d1d8413650bb6678365aa148fc396ae945e4ccb4f2365a52327", size = 42697, upload-time = "2024-09-11T12:06:36.114Z" },
{ url = "https://files.pythonhosted.org/packages/bf/9b/be18f9709dfd8187ff233be5acb253a9f4f1b07f1db0e7b09d84197c28e2/pymochow-2.2.9-py3-none-any.whl", hash = "sha256:639192b97f143d4a22fc163872be12aee19523c46f12e22416e8f289f1354d15", size = 77899, upload-time = "2025-06-05T08:33:17.424Z" },
] ]


[[package]] [[package]]

+ 2
- 0
docker/.env.example Näytä tiedosto

BAIDU_VECTOR_DB_DATABASE=dify BAIDU_VECTOR_DB_DATABASE=dify
BAIDU_VECTOR_DB_SHARD=1 BAIDU_VECTOR_DB_SHARD=1
BAIDU_VECTOR_DB_REPLICAS=3 BAIDU_VECTOR_DB_REPLICAS=3
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE


# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` # VikingDB configurations, only available when VECTOR_STORE is `vikingdb`
VIKINGDB_ACCESS_KEY=your-ak VIKINGDB_ACCESS_KEY=your-ak

+ 2
- 0
docker/docker-compose.yaml Näytä tiedosto

BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: ${BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER:-DEFAULT_ANALYZER}
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: ${BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE:-COARSE_MODE}
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}

Loading…
Peruuta
Tallenna