Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

retrieval_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. import concurrent.futures
  2. from concurrent.futures import ThreadPoolExecutor
  3. from typing import Optional
  4. from flask import Flask, current_app
  5. from sqlalchemy import select
  6. from sqlalchemy.orm import Session, load_only
  7. from configs import dify_config
  8. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  9. from core.rag.datasource.keyword.keyword_factory import Keyword
  10. from core.rag.datasource.vdb.vector_factory import Vector
  11. from core.rag.embedding.retrieval import RetrievalSegments
  12. from core.rag.entities.metadata_entities import MetadataCondition
  13. from core.rag.index_processor.constant.index_type import IndexType
  14. from core.rag.models.document import Document
  15. from core.rag.rerank.rerank_type import RerankMode
  16. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  17. from extensions.ext_database import db
  18. from models.dataset import ChildChunk, Dataset, DocumentSegment
  19. from models.dataset import Document as DatasetDocument
  20. from services.external_knowledge_service import ExternalDatasetService
  21. default_retrieval_model = {
  22. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  23. "reranking_enable": False,
  24. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  25. "top_k": 4,
  26. "score_threshold_enabled": False,
  27. }
  28. class RetrievalService:
  29. # Cache precompiled regular expressions to avoid repeated compilation
  30. @classmethod
  31. def retrieve(
  32. cls,
  33. retrieval_method: str,
  34. dataset_id: str,
  35. query: str,
  36. top_k: int,
  37. score_threshold: Optional[float] = 0.0,
  38. reranking_model: Optional[dict] = None,
  39. reranking_mode: str = "reranking_model",
  40. weights: Optional[dict] = None,
  41. document_ids_filter: Optional[list[str]] = None,
  42. ):
  43. if not query:
  44. return []
  45. dataset = cls._get_dataset(dataset_id)
  46. if not dataset:
  47. return []
  48. all_documents: list[Document] = []
  49. exceptions: list[str] = []
  50. # Optimize multithreading with thread pools
  51. with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore
  52. futures = []
  53. if retrieval_method == "keyword_search":
  54. futures.append(
  55. executor.submit(
  56. cls.keyword_search,
  57. flask_app=current_app._get_current_object(), # type: ignore
  58. dataset_id=dataset_id,
  59. query=query,
  60. top_k=top_k,
  61. all_documents=all_documents,
  62. exceptions=exceptions,
  63. document_ids_filter=document_ids_filter,
  64. )
  65. )
  66. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  67. futures.append(
  68. executor.submit(
  69. cls.embedding_search,
  70. flask_app=current_app._get_current_object(), # type: ignore
  71. dataset_id=dataset_id,
  72. query=query,
  73. top_k=top_k,
  74. score_threshold=score_threshold,
  75. reranking_model=reranking_model,
  76. all_documents=all_documents,
  77. retrieval_method=retrieval_method,
  78. exceptions=exceptions,
  79. document_ids_filter=document_ids_filter,
  80. )
  81. )
  82. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  83. futures.append(
  84. executor.submit(
  85. cls.full_text_index_search,
  86. flask_app=current_app._get_current_object(), # type: ignore
  87. dataset_id=dataset_id,
  88. query=query,
  89. top_k=top_k,
  90. score_threshold=score_threshold,
  91. reranking_model=reranking_model,
  92. all_documents=all_documents,
  93. retrieval_method=retrieval_method,
  94. exceptions=exceptions,
  95. document_ids_filter=document_ids_filter,
  96. )
  97. )
  98. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  99. if exceptions:
  100. raise ValueError(";\n".join(exceptions))
  101. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  102. data_post_processor = DataPostProcessor(
  103. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  104. )
  105. all_documents = data_post_processor.invoke(
  106. query=query,
  107. documents=all_documents,
  108. score_threshold=score_threshold,
  109. top_n=top_k,
  110. )
  111. return all_documents
  112. @classmethod
  113. def external_retrieve(
  114. cls,
  115. dataset_id: str,
  116. query: str,
  117. external_retrieval_model: Optional[dict] = None,
  118. metadata_filtering_conditions: Optional[dict] = None,
  119. ):
  120. stmt = select(Dataset).where(Dataset.id == dataset_id)
  121. dataset = db.session.scalar(stmt)
  122. if not dataset:
  123. return []
  124. metadata_condition = (
  125. MetadataCondition(**metadata_filtering_conditions) if metadata_filtering_conditions else None
  126. )
  127. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  128. dataset.tenant_id,
  129. dataset_id,
  130. query,
  131. external_retrieval_model or {},
  132. metadata_condition=metadata_condition,
  133. )
  134. return all_documents
  135. @classmethod
  136. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  137. with Session(db.engine) as session:
  138. return session.query(Dataset).where(Dataset.id == dataset_id).first()
  139. @classmethod
  140. def keyword_search(
  141. cls,
  142. flask_app: Flask,
  143. dataset_id: str,
  144. query: str,
  145. top_k: int,
  146. all_documents: list,
  147. exceptions: list,
  148. document_ids_filter: Optional[list[str]] = None,
  149. ):
  150. with flask_app.app_context():
  151. try:
  152. dataset = cls._get_dataset(dataset_id)
  153. if not dataset:
  154. raise ValueError("dataset not found")
  155. keyword = Keyword(dataset=dataset)
  156. documents = keyword.search(
  157. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  158. )
  159. all_documents.extend(documents)
  160. except Exception as e:
  161. exceptions.append(str(e))
  162. @classmethod
  163. def embedding_search(
  164. cls,
  165. flask_app: Flask,
  166. dataset_id: str,
  167. query: str,
  168. top_k: int,
  169. score_threshold: Optional[float],
  170. reranking_model: Optional[dict],
  171. all_documents: list,
  172. retrieval_method: str,
  173. exceptions: list,
  174. document_ids_filter: Optional[list[str]] = None,
  175. ):
  176. with flask_app.app_context():
  177. try:
  178. dataset = cls._get_dataset(dataset_id)
  179. if not dataset:
  180. raise ValueError("dataset not found")
  181. vector = Vector(dataset=dataset)
  182. documents = vector.search_by_vector(
  183. query,
  184. search_type="similarity_score_threshold",
  185. top_k=top_k,
  186. score_threshold=score_threshold,
  187. filter={"group_id": [dataset.id]},
  188. document_ids_filter=document_ids_filter,
  189. )
  190. if documents:
  191. if (
  192. reranking_model
  193. and reranking_model.get("reranking_model_name")
  194. and reranking_model.get("reranking_provider_name")
  195. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  196. ):
  197. data_post_processor = DataPostProcessor(
  198. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  199. )
  200. all_documents.extend(
  201. data_post_processor.invoke(
  202. query=query,
  203. documents=documents,
  204. score_threshold=score_threshold,
  205. top_n=len(documents),
  206. )
  207. )
  208. else:
  209. all_documents.extend(documents)
  210. except Exception as e:
  211. exceptions.append(str(e))
  212. @classmethod
  213. def full_text_index_search(
  214. cls,
  215. flask_app: Flask,
  216. dataset_id: str,
  217. query: str,
  218. top_k: int,
  219. score_threshold: Optional[float],
  220. reranking_model: Optional[dict],
  221. all_documents: list,
  222. retrieval_method: str,
  223. exceptions: list,
  224. document_ids_filter: Optional[list[str]] = None,
  225. ):
  226. with flask_app.app_context():
  227. try:
  228. dataset = cls._get_dataset(dataset_id)
  229. if not dataset:
  230. raise ValueError("dataset not found")
  231. vector_processor = Vector(dataset=dataset)
  232. documents = vector_processor.search_by_full_text(
  233. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  234. )
  235. if documents:
  236. if (
  237. reranking_model
  238. and reranking_model.get("reranking_model_name")
  239. and reranking_model.get("reranking_provider_name")
  240. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  241. ):
  242. data_post_processor = DataPostProcessor(
  243. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  244. )
  245. all_documents.extend(
  246. data_post_processor.invoke(
  247. query=query,
  248. documents=documents,
  249. score_threshold=score_threshold,
  250. top_n=len(documents),
  251. )
  252. )
  253. else:
  254. all_documents.extend(documents)
  255. except Exception as e:
  256. exceptions.append(str(e))
  257. @staticmethod
  258. def escape_query_for_search(query: str) -> str:
  259. return query.replace('"', '\\"')
  260. @classmethod
  261. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  262. """Format retrieval documents with optimized batch processing"""
  263. if not documents:
  264. return []
  265. try:
  266. # Collect document IDs
  267. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  268. if not document_ids:
  269. return []
  270. # Batch query dataset documents
  271. dataset_documents = {
  272. doc.id: doc
  273. for doc in db.session.query(DatasetDocument)
  274. .where(DatasetDocument.id.in_(document_ids))
  275. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  276. .all()
  277. }
  278. records = []
  279. include_segment_ids = set()
  280. segment_child_map = {}
  281. # Process documents
  282. for document in documents:
  283. document_id = document.metadata.get("document_id")
  284. if document_id not in dataset_documents:
  285. continue
  286. dataset_document = dataset_documents[document_id]
  287. if not dataset_document:
  288. continue
  289. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  290. # Handle parent-child documents
  291. child_index_node_id = document.metadata.get("doc_id")
  292. child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id == child_index_node_id)
  293. child_chunk = db.session.scalar(child_chunk_stmt)
  294. if not child_chunk:
  295. continue
  296. segment = (
  297. db.session.query(DocumentSegment)
  298. .where(
  299. DocumentSegment.dataset_id == dataset_document.dataset_id,
  300. DocumentSegment.enabled == True,
  301. DocumentSegment.status == "completed",
  302. DocumentSegment.id == child_chunk.segment_id,
  303. )
  304. .options(
  305. load_only(
  306. DocumentSegment.id,
  307. DocumentSegment.content,
  308. DocumentSegment.answer,
  309. )
  310. )
  311. .first()
  312. )
  313. if not segment:
  314. continue
  315. if segment.id not in include_segment_ids:
  316. include_segment_ids.add(segment.id)
  317. child_chunk_detail = {
  318. "id": child_chunk.id,
  319. "content": child_chunk.content,
  320. "position": child_chunk.position,
  321. "score": document.metadata.get("score", 0.0),
  322. }
  323. map_detail = {
  324. "max_score": document.metadata.get("score", 0.0),
  325. "child_chunks": [child_chunk_detail],
  326. }
  327. segment_child_map[segment.id] = map_detail
  328. record = {
  329. "segment": segment,
  330. }
  331. records.append(record)
  332. else:
  333. child_chunk_detail = {
  334. "id": child_chunk.id,
  335. "content": child_chunk.content,
  336. "position": child_chunk.position,
  337. "score": document.metadata.get("score", 0.0),
  338. }
  339. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  340. segment_child_map[segment.id]["max_score"] = max(
  341. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  342. )
  343. else:
  344. # Handle normal documents
  345. index_node_id = document.metadata.get("doc_id")
  346. if not index_node_id:
  347. continue
  348. document_segment_stmt = select(DocumentSegment).where(
  349. DocumentSegment.dataset_id == dataset_document.dataset_id,
  350. DocumentSegment.enabled == True,
  351. DocumentSegment.status == "completed",
  352. DocumentSegment.index_node_id == index_node_id,
  353. )
  354. segment = db.session.scalar(document_segment_stmt)
  355. if not segment:
  356. continue
  357. include_segment_ids.add(segment.id)
  358. record = {
  359. "segment": segment,
  360. "score": document.metadata.get("score"), # type: ignore
  361. }
  362. records.append(record)
  363. # Add child chunks information to records
  364. for record in records:
  365. if record["segment"].id in segment_child_map:
  366. record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore
  367. record["score"] = segment_child_map[record["segment"].id]["max_score"]
  368. result = []
  369. for record in records:
  370. # Extract segment
  371. segment = record["segment"]
  372. # Extract child_chunks, ensuring it's a list or None
  373. child_chunks = record.get("child_chunks")
  374. if not isinstance(child_chunks, list):
  375. child_chunks = None
  376. # Extract score, ensuring it's a float or None
  377. score_value = record.get("score")
  378. score = (
  379. float(score_value)
  380. if score_value is not None and isinstance(score_value, int | float | str)
  381. else None
  382. )
  383. # Create RetrievalSegments object
  384. retrieval_segment = RetrievalSegments(segment=segment, child_chunks=child_chunks, score=score)
  385. result.append(retrieval_segment)
  386. return result
  387. except Exception as e:
  388. db.session.rollback()
  389. raise e