You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

retrieval_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. import concurrent.futures
  2. import logging
  3. import time
  4. from concurrent.futures import ThreadPoolExecutor
  5. from typing import Optional
  6. from flask import Flask, current_app
  7. from sqlalchemy import and_, or_
  8. from sqlalchemy.orm import load_only
  9. from sqlalchemy.sql.expression import false
  10. from configs import dify_config
  11. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  12. from core.rag.datasource.keyword.keyword_factory import Keyword
  13. from core.rag.datasource.vdb.vector_factory import Vector
  14. from core.rag.embedding.retrieval import RetrievalSegments
  15. from core.rag.index_processor.constant.index_type import IndexType
  16. from core.rag.models.document import Document
  17. from core.rag.rerank.rerank_type import RerankMode
  18. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  19. from extensions.ext_database import db
  20. from models.dataset import ChildChunk, Dataset, DocumentSegment
  21. from models.dataset import Document as DatasetDocument
  22. from services.external_knowledge_service import ExternalDatasetService
  23. default_retrieval_model = {
  24. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  25. "reranking_enable": False,
  26. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  27. "top_k": 2,
  28. "score_threshold_enabled": False,
  29. }
  30. class RetrievalService:
  31. # Cache precompiled regular expressions to avoid repeated compilation
  32. @classmethod
  33. def retrieve(
  34. cls,
  35. retrieval_method: str,
  36. dataset_id: str,
  37. query: str,
  38. top_k: int,
  39. score_threshold: Optional[float] = 0.0,
  40. reranking_model: Optional[dict] = None,
  41. reranking_mode: str = "reranking_model",
  42. weights: Optional[dict] = None,
  43. document_ids_filter: Optional[list[str]] = None,
  44. ):
  45. if not query:
  46. return []
  47. dataset = cls._get_dataset(dataset_id)
  48. if not dataset:
  49. return []
  50. all_documents: list[Document] = []
  51. exceptions: list[str] = []
  52. # Optimize multithreading with thread pools
  53. with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore
  54. futures = []
  55. if retrieval_method == "keyword_search":
  56. futures.append(
  57. executor.submit(
  58. cls.keyword_search,
  59. flask_app=current_app._get_current_object(), # type: ignore
  60. dataset_id=dataset_id,
  61. query=query,
  62. top_k=top_k,
  63. all_documents=all_documents,
  64. exceptions=exceptions,
  65. document_ids_filter=document_ids_filter,
  66. )
  67. )
  68. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  69. futures.append(
  70. executor.submit(
  71. cls.embedding_search,
  72. flask_app=current_app._get_current_object(), # type: ignore
  73. dataset_id=dataset_id,
  74. query=query,
  75. top_k=top_k,
  76. score_threshold=score_threshold,
  77. reranking_model=reranking_model,
  78. all_documents=all_documents,
  79. retrieval_method=retrieval_method,
  80. exceptions=exceptions,
  81. document_ids_filter=document_ids_filter,
  82. )
  83. )
  84. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  85. futures.append(
  86. executor.submit(
  87. cls.full_text_index_search,
  88. flask_app=current_app._get_current_object(), # type: ignore
  89. dataset_id=dataset_id,
  90. query=query,
  91. top_k=top_k,
  92. score_threshold=score_threshold,
  93. reranking_model=reranking_model,
  94. all_documents=all_documents,
  95. retrieval_method=retrieval_method,
  96. exceptions=exceptions,
  97. document_ids_filter=document_ids_filter,
  98. )
  99. )
  100. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  101. if exceptions:
  102. raise ValueError(";\n".join(exceptions))
  103. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  104. data_post_processor = DataPostProcessor(
  105. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  106. )
  107. all_documents = data_post_processor.invoke(
  108. query=query,
  109. documents=all_documents,
  110. score_threshold=score_threshold,
  111. top_n=top_k,
  112. )
  113. return all_documents
  114. @classmethod
  115. def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None):
  116. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  117. if not dataset:
  118. return []
  119. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  120. dataset.tenant_id, dataset_id, query, external_retrieval_model or {}
  121. )
  122. return all_documents
  123. @classmethod
  124. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  125. return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  126. @classmethod
  127. def keyword_search(
  128. cls,
  129. flask_app: Flask,
  130. dataset_id: str,
  131. query: str,
  132. top_k: int,
  133. all_documents: list,
  134. exceptions: list,
  135. document_ids_filter: Optional[list[str]] = None,
  136. ):
  137. with flask_app.app_context():
  138. try:
  139. dataset = cls._get_dataset(dataset_id)
  140. if not dataset:
  141. raise ValueError("dataset not found")
  142. keyword = Keyword(dataset=dataset)
  143. documents = keyword.search(
  144. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  145. )
  146. all_documents.extend(documents)
  147. except Exception as e:
  148. exceptions.append(str(e))
  149. @classmethod
  150. def embedding_search(
  151. cls,
  152. flask_app: Flask,
  153. dataset_id: str,
  154. query: str,
  155. top_k: int,
  156. score_threshold: Optional[float],
  157. reranking_model: Optional[dict],
  158. all_documents: list,
  159. retrieval_method: str,
  160. exceptions: list,
  161. document_ids_filter: Optional[list[str]] = None,
  162. ):
  163. with flask_app.app_context():
  164. try:
  165. dataset = cls._get_dataset(dataset_id)
  166. if not dataset:
  167. raise ValueError("dataset not found")
  168. start = time.time()
  169. vector = Vector(dataset=dataset)
  170. documents = vector.search_by_vector(
  171. query,
  172. search_type="similarity_score_threshold",
  173. top_k=top_k,
  174. score_threshold=score_threshold,
  175. filter={"group_id": [dataset.id]},
  176. document_ids_filter=document_ids_filter,
  177. )
  178. logging.debug(f"embedding_search ends at {time.time() - start:.2f} seconds")
  179. if documents:
  180. if (
  181. reranking_model
  182. and reranking_model.get("reranking_model_name")
  183. and reranking_model.get("reranking_provider_name")
  184. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  185. ):
  186. data_post_processor = DataPostProcessor(
  187. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  188. )
  189. all_documents.extend(
  190. data_post_processor.invoke(
  191. query=query,
  192. documents=documents,
  193. score_threshold=score_threshold,
  194. top_n=len(documents),
  195. )
  196. )
  197. else:
  198. all_documents.extend(documents)
  199. except Exception as e:
  200. exceptions.append(str(e))
  201. @classmethod
  202. def full_text_index_search(
  203. cls,
  204. flask_app: Flask,
  205. dataset_id: str,
  206. query: str,
  207. top_k: int,
  208. score_threshold: Optional[float],
  209. reranking_model: Optional[dict],
  210. all_documents: list,
  211. retrieval_method: str,
  212. exceptions: list,
  213. document_ids_filter: Optional[list[str]] = None,
  214. ):
  215. with flask_app.app_context():
  216. try:
  217. dataset = cls._get_dataset(dataset_id)
  218. if not dataset:
  219. raise ValueError("dataset not found")
  220. vector_processor = Vector(dataset=dataset)
  221. documents = vector_processor.search_by_full_text(
  222. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  223. )
  224. if documents:
  225. if (
  226. reranking_model
  227. and reranking_model.get("reranking_model_name")
  228. and reranking_model.get("reranking_provider_name")
  229. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  230. ):
  231. data_post_processor = DataPostProcessor(
  232. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  233. )
  234. all_documents.extend(
  235. data_post_processor.invoke(
  236. query=query,
  237. documents=documents,
  238. score_threshold=score_threshold,
  239. top_n=len(documents),
  240. )
  241. )
  242. else:
  243. all_documents.extend(documents)
  244. except Exception as e:
  245. exceptions.append(str(e))
  246. @staticmethod
  247. def escape_query_for_search(query: str) -> str:
  248. return query.replace('"', '\\"')
  249. @classmethod
  250. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  251. """Format retrieval documents with optimized batch processing"""
  252. if not documents:
  253. return []
  254. try:
  255. start_time = time.time()
  256. # Collect document IDs with existence check
  257. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  258. if not document_ids:
  259. return []
  260. # Batch query dataset documents
  261. dataset_documents = {
  262. doc.id: doc
  263. for doc in db.session.query(DatasetDocument)
  264. .filter(DatasetDocument.id.in_(document_ids))
  265. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  266. .all()
  267. }
  268. records = []
  269. include_segment_ids = set()
  270. segment_child_map = {}
  271. # Precompute doc_forms to avoid redundant checks
  272. doc_forms = {}
  273. for doc in documents:
  274. document_id = doc.metadata.get("document_id")
  275. dataset_doc = dataset_documents.get(document_id)
  276. if dataset_doc:
  277. doc_forms[document_id] = dataset_doc.doc_form
  278. # Batch collect index node IDs with type safety
  279. child_index_node_ids = []
  280. index_node_ids = []
  281. for doc in documents:
  282. document_id = doc.metadata.get("document_id")
  283. if doc_forms.get(document_id) == IndexType.PARENT_CHILD_INDEX:
  284. child_index_node_ids.append(doc.metadata.get("doc_id"))
  285. else:
  286. index_node_ids.append(doc.metadata.get("doc_id"))
  287. # Batch query ChildChunk
  288. child_chunks = db.session.query(ChildChunk).filter(ChildChunk.index_node_id.in_(child_index_node_ids)).all()
  289. child_chunk_map = {chunk.index_node_id: chunk for chunk in child_chunks}
  290. segment_ids_from_child = [chunk.segment_id for chunk in child_chunks]
  291. segment_conditions = []
  292. if index_node_ids:
  293. segment_conditions.append(DocumentSegment.index_node_id.in_(index_node_ids))
  294. if segment_ids_from_child:
  295. segment_conditions.append(DocumentSegment.id.in_(segment_ids_from_child))
  296. if segment_conditions:
  297. filter_expr = or_(*segment_conditions)
  298. else:
  299. filter_expr = false()
  300. segment_map = {
  301. segment.id: segment
  302. for segment in db.session.query(DocumentSegment)
  303. .filter(
  304. and_(
  305. filter_expr,
  306. DocumentSegment.enabled == True,
  307. DocumentSegment.status == "completed",
  308. )
  309. )
  310. .options(
  311. load_only(
  312. DocumentSegment.id,
  313. DocumentSegment.content,
  314. DocumentSegment.answer,
  315. )
  316. )
  317. .all()
  318. }
  319. for document in documents:
  320. document_id = document.metadata.get("document_id")
  321. dataset_document = dataset_documents.get(document_id)
  322. if not dataset_document:
  323. continue
  324. doc_form = doc_forms.get(document_id)
  325. if doc_form == IndexType.PARENT_CHILD_INDEX:
  326. # Handle parent-child documents using preloaded data
  327. child_index_node_id = document.metadata.get("doc_id")
  328. if not child_index_node_id:
  329. continue
  330. child_chunk = child_chunk_map.get(child_index_node_id)
  331. if not child_chunk:
  332. continue
  333. segment = segment_map.get(child_chunk.segment_id)
  334. if not segment:
  335. continue
  336. if segment.id not in include_segment_ids:
  337. include_segment_ids.add(segment.id)
  338. map_detail = {"max_score": document.metadata.get("score", 0.0), "child_chunks": []}
  339. segment_child_map[segment.id] = map_detail
  340. records.append({"segment": segment})
  341. # Append child chunk details
  342. child_chunk_detail = {
  343. "id": child_chunk.id,
  344. "content": child_chunk.content,
  345. "position": child_chunk.position,
  346. "score": document.metadata.get("score", 0.0),
  347. }
  348. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  349. segment_child_map[segment.id]["max_score"] = max(
  350. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  351. )
  352. else:
  353. # Handle normal documents
  354. index_node_id = document.metadata.get("doc_id")
  355. if not index_node_id:
  356. continue
  357. segment = next(
  358. (
  359. s
  360. for s in segment_map.values()
  361. if s.index_node_id == index_node_id and s.dataset_id == dataset_document.dataset_id
  362. ),
  363. None,
  364. )
  365. if not segment:
  366. continue
  367. if segment.id not in include_segment_ids:
  368. include_segment_ids.add(segment.id)
  369. records.append(
  370. {
  371. "segment": segment,
  372. "score": document.metadata.get("score", 0.0),
  373. }
  374. )
  375. # Merge child chunks information
  376. for record in records:
  377. segment_id = record["segment"].id
  378. if segment_id in segment_child_map:
  379. record["child_chunks"] = segment_child_map[segment_id]["child_chunks"]
  380. record["score"] = segment_child_map[segment_id]["max_score"]
  381. logging.debug(f"Formatting retrieval documents took {time.time() - start_time:.2f} seconds")
  382. return [RetrievalSegments(**record) for record in records]
  383. except Exception as e:
  384. # Only rollback if there were write operations
  385. db.session.rollback()
  386. raise e