Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

retrieval_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. import concurrent.futures
  2. from concurrent.futures import ThreadPoolExecutor
  3. from typing import Optional
  4. from flask import Flask, current_app
  5. from sqlalchemy.orm import load_only
  6. from configs import dify_config
  7. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  8. from core.rag.datasource.keyword.keyword_factory import Keyword
  9. from core.rag.datasource.vdb.vector_factory import Vector
  10. from core.rag.embedding.retrieval import RetrievalSegments
  11. from core.rag.entities.metadata_entities import MetadataCondition
  12. from core.rag.index_processor.constant.index_type import IndexType
  13. from core.rag.models.document import Document
  14. from core.rag.rerank.rerank_type import RerankMode
  15. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  16. from extensions.ext_database import db
  17. from models.dataset import ChildChunk, Dataset, DocumentSegment
  18. from models.dataset import Document as DatasetDocument
  19. from services.external_knowledge_service import ExternalDatasetService
  20. default_retrieval_model = {
  21. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  22. "reranking_enable": False,
  23. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  24. "top_k": 2,
  25. "score_threshold_enabled": False,
  26. }
  27. class RetrievalService:
  28. # Cache precompiled regular expressions to avoid repeated compilation
  29. @classmethod
  30. def retrieve(
  31. cls,
  32. retrieval_method: str,
  33. dataset_id: str,
  34. query: str,
  35. top_k: int,
  36. score_threshold: Optional[float] = 0.0,
  37. reranking_model: Optional[dict] = None,
  38. reranking_mode: str = "reranking_model",
  39. weights: Optional[dict] = None,
  40. document_ids_filter: Optional[list[str]] = None,
  41. ):
  42. if not query:
  43. return []
  44. dataset = cls._get_dataset(dataset_id)
  45. if not dataset:
  46. return []
  47. all_documents: list[Document] = []
  48. exceptions: list[str] = []
  49. # Optimize multithreading with thread pools
  50. with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore
  51. futures = []
  52. if retrieval_method == "keyword_search":
  53. futures.append(
  54. executor.submit(
  55. cls.keyword_search,
  56. flask_app=current_app._get_current_object(), # type: ignore
  57. dataset_id=dataset_id,
  58. query=query,
  59. top_k=top_k,
  60. all_documents=all_documents,
  61. exceptions=exceptions,
  62. document_ids_filter=document_ids_filter,
  63. )
  64. )
  65. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  66. futures.append(
  67. executor.submit(
  68. cls.embedding_search,
  69. flask_app=current_app._get_current_object(), # type: ignore
  70. dataset_id=dataset_id,
  71. query=query,
  72. top_k=top_k,
  73. score_threshold=score_threshold,
  74. reranking_model=reranking_model,
  75. all_documents=all_documents,
  76. retrieval_method=retrieval_method,
  77. exceptions=exceptions,
  78. document_ids_filter=document_ids_filter,
  79. )
  80. )
  81. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  82. futures.append(
  83. executor.submit(
  84. cls.full_text_index_search,
  85. flask_app=current_app._get_current_object(), # type: ignore
  86. dataset_id=dataset_id,
  87. query=query,
  88. top_k=top_k,
  89. score_threshold=score_threshold,
  90. reranking_model=reranking_model,
  91. all_documents=all_documents,
  92. retrieval_method=retrieval_method,
  93. exceptions=exceptions,
  94. document_ids_filter=document_ids_filter,
  95. )
  96. )
  97. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  98. if exceptions:
  99. raise ValueError(";\n".join(exceptions))
  100. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  101. data_post_processor = DataPostProcessor(
  102. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  103. )
  104. all_documents = data_post_processor.invoke(
  105. query=query,
  106. documents=all_documents,
  107. score_threshold=score_threshold,
  108. top_n=top_k,
  109. )
  110. return all_documents
  111. @classmethod
  112. def external_retrieve(
  113. cls,
  114. dataset_id: str,
  115. query: str,
  116. external_retrieval_model: Optional[dict] = None,
  117. metadata_filtering_conditions: Optional[dict] = None,
  118. ):
  119. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  120. if not dataset:
  121. return []
  122. metadata_condition = (
  123. MetadataCondition(**metadata_filtering_conditions) if metadata_filtering_conditions else None
  124. )
  125. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  126. dataset.tenant_id,
  127. dataset_id,
  128. query,
  129. external_retrieval_model or {},
  130. metadata_condition=metadata_condition,
  131. )
  132. return all_documents
  133. @classmethod
  134. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  135. return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  136. @classmethod
  137. def keyword_search(
  138. cls,
  139. flask_app: Flask,
  140. dataset_id: str,
  141. query: str,
  142. top_k: int,
  143. all_documents: list,
  144. exceptions: list,
  145. document_ids_filter: Optional[list[str]] = None,
  146. ):
  147. with flask_app.app_context():
  148. try:
  149. dataset = cls._get_dataset(dataset_id)
  150. if not dataset:
  151. raise ValueError("dataset not found")
  152. keyword = Keyword(dataset=dataset)
  153. documents = keyword.search(
  154. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  155. )
  156. all_documents.extend(documents)
  157. except Exception as e:
  158. exceptions.append(str(e))
  159. @classmethod
  160. def embedding_search(
  161. cls,
  162. flask_app: Flask,
  163. dataset_id: str,
  164. query: str,
  165. top_k: int,
  166. score_threshold: Optional[float],
  167. reranking_model: Optional[dict],
  168. all_documents: list,
  169. retrieval_method: str,
  170. exceptions: list,
  171. document_ids_filter: Optional[list[str]] = None,
  172. ):
  173. with flask_app.app_context():
  174. try:
  175. dataset = cls._get_dataset(dataset_id)
  176. if not dataset:
  177. raise ValueError("dataset not found")
  178. vector = Vector(dataset=dataset)
  179. documents = vector.search_by_vector(
  180. query,
  181. search_type="similarity_score_threshold",
  182. top_k=top_k,
  183. score_threshold=score_threshold,
  184. filter={"group_id": [dataset.id]},
  185. document_ids_filter=document_ids_filter,
  186. )
  187. if documents:
  188. if (
  189. reranking_model
  190. and reranking_model.get("reranking_model_name")
  191. and reranking_model.get("reranking_provider_name")
  192. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  193. ):
  194. data_post_processor = DataPostProcessor(
  195. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  196. )
  197. all_documents.extend(
  198. data_post_processor.invoke(
  199. query=query,
  200. documents=documents,
  201. score_threshold=score_threshold,
  202. top_n=len(documents),
  203. )
  204. )
  205. else:
  206. all_documents.extend(documents)
  207. except Exception as e:
  208. exceptions.append(str(e))
  209. @classmethod
  210. def full_text_index_search(
  211. cls,
  212. flask_app: Flask,
  213. dataset_id: str,
  214. query: str,
  215. top_k: int,
  216. score_threshold: Optional[float],
  217. reranking_model: Optional[dict],
  218. all_documents: list,
  219. retrieval_method: str,
  220. exceptions: list,
  221. document_ids_filter: Optional[list[str]] = None,
  222. ):
  223. with flask_app.app_context():
  224. try:
  225. dataset = cls._get_dataset(dataset_id)
  226. if not dataset:
  227. raise ValueError("dataset not found")
  228. vector_processor = Vector(dataset=dataset)
  229. documents = vector_processor.search_by_full_text(
  230. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  231. )
  232. if documents:
  233. if (
  234. reranking_model
  235. and reranking_model.get("reranking_model_name")
  236. and reranking_model.get("reranking_provider_name")
  237. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  238. ):
  239. data_post_processor = DataPostProcessor(
  240. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  241. )
  242. all_documents.extend(
  243. data_post_processor.invoke(
  244. query=query,
  245. documents=documents,
  246. score_threshold=score_threshold,
  247. top_n=len(documents),
  248. )
  249. )
  250. else:
  251. all_documents.extend(documents)
  252. except Exception as e:
  253. exceptions.append(str(e))
  254. @staticmethod
  255. def escape_query_for_search(query: str) -> str:
  256. return query.replace('"', '\\"')
  257. @classmethod
  258. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  259. """Format retrieval documents with optimized batch processing"""
  260. if not documents:
  261. return []
  262. try:
  263. # Collect document IDs
  264. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  265. if not document_ids:
  266. return []
  267. # Batch query dataset documents
  268. dataset_documents = {
  269. doc.id: doc
  270. for doc in db.session.query(DatasetDocument)
  271. .filter(DatasetDocument.id.in_(document_ids))
  272. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  273. .all()
  274. }
  275. records = []
  276. include_segment_ids = set()
  277. segment_child_map = {}
  278. # Process documents
  279. for document in documents:
  280. document_id = document.metadata.get("document_id")
  281. if document_id not in dataset_documents:
  282. continue
  283. dataset_document = dataset_documents[document_id]
  284. if not dataset_document:
  285. continue
  286. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  287. # Handle parent-child documents
  288. child_index_node_id = document.metadata.get("doc_id")
  289. child_chunk = (
  290. db.session.query(ChildChunk).filter(ChildChunk.index_node_id == child_index_node_id).first()
  291. )
  292. if not child_chunk:
  293. continue
  294. segment = (
  295. db.session.query(DocumentSegment)
  296. .filter(
  297. DocumentSegment.dataset_id == dataset_document.dataset_id,
  298. DocumentSegment.enabled == True,
  299. DocumentSegment.status == "completed",
  300. DocumentSegment.id == child_chunk.segment_id,
  301. )
  302. .options(
  303. load_only(
  304. DocumentSegment.id,
  305. DocumentSegment.content,
  306. DocumentSegment.answer,
  307. )
  308. )
  309. .first()
  310. )
  311. if not segment:
  312. continue
  313. if segment.id not in include_segment_ids:
  314. include_segment_ids.add(segment.id)
  315. child_chunk_detail = {
  316. "id": child_chunk.id,
  317. "content": child_chunk.content,
  318. "position": child_chunk.position,
  319. "score": document.metadata.get("score", 0.0),
  320. }
  321. map_detail = {
  322. "max_score": document.metadata.get("score", 0.0),
  323. "child_chunks": [child_chunk_detail],
  324. }
  325. segment_child_map[segment.id] = map_detail
  326. record = {
  327. "segment": segment,
  328. }
  329. records.append(record)
  330. else:
  331. child_chunk_detail = {
  332. "id": child_chunk.id,
  333. "content": child_chunk.content,
  334. "position": child_chunk.position,
  335. "score": document.metadata.get("score", 0.0),
  336. }
  337. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  338. segment_child_map[segment.id]["max_score"] = max(
  339. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  340. )
  341. else:
  342. # Handle normal documents
  343. index_node_id = document.metadata.get("doc_id")
  344. if not index_node_id:
  345. continue
  346. segment = (
  347. db.session.query(DocumentSegment)
  348. .filter(
  349. DocumentSegment.dataset_id == dataset_document.dataset_id,
  350. DocumentSegment.enabled == True,
  351. DocumentSegment.status == "completed",
  352. DocumentSegment.index_node_id == index_node_id,
  353. )
  354. .first()
  355. )
  356. if not segment:
  357. continue
  358. include_segment_ids.add(segment.id)
  359. record = {
  360. "segment": segment,
  361. "score": document.metadata.get("score"), # type: ignore
  362. }
  363. records.append(record)
  364. # Add child chunks information to records
  365. for record in records:
  366. if record["segment"].id in segment_child_map:
  367. record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore
  368. record["score"] = segment_child_map[record["segment"].id]["max_score"]
  369. result = []
  370. for record in records:
  371. # Extract segment
  372. segment = record["segment"]
  373. # Extract child_chunks, ensuring it's a list or None
  374. child_chunks = record.get("child_chunks")
  375. if not isinstance(child_chunks, list):
  376. child_chunks = None
  377. # Extract score, ensuring it's a float or None
  378. score_value = record.get("score")
  379. score = (
  380. float(score_value)
  381. if score_value is not None and isinstance(score_value, int | float | str)
  382. else None
  383. )
  384. # Create RetrievalSegments object
  385. retrieval_segment = RetrievalSegments(segment=segment, child_chunks=child_chunks, score=score)
  386. result.append(retrieval_segment)
  387. return result
  388. except Exception as e:
  389. db.session.rollback()
  390. raise e