您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

rag_pipeline_transform_service.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. import json
  2. import logging
  3. from datetime import UTC, datetime
  4. from pathlib import Path
  5. from uuid import uuid4
  6. import yaml
  7. from flask_login import current_user
  8. from constants import DOCUMENT_EXTENSIONS
  9. from core.plugin.impl.plugin import PluginInstaller
  10. from extensions.ext_database import db
  11. from factories import variable_factory
  12. from models.dataset import Dataset, Document, DocumentPipelineExecutionLog, Pipeline
  13. from models.model import UploadFile
  14. from models.workflow import Workflow, WorkflowType
  15. from services.entities.knowledge_entities.rag_pipeline_entities import KnowledgeConfiguration, RetrievalSetting
  16. from services.plugin.plugin_migration import PluginMigration
  17. from services.plugin.plugin_service import PluginService
  18. logger = logging.getLogger(__name__)
  19. class RagPipelineTransformService:
  20. def transform_dataset(self, dataset_id: str):
  21. dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
  22. if not dataset:
  23. raise ValueError("Dataset not found")
  24. if dataset.pipeline_id and dataset.runtime_mode == "rag_pipeline":
  25. return {
  26. "pipeline_id": dataset.pipeline_id,
  27. "dataset_id": dataset_id,
  28. "status": "success",
  29. }
  30. if dataset.provider != "vendor":
  31. raise ValueError("External dataset is not supported")
  32. datasource_type = dataset.data_source_type
  33. indexing_technique = dataset.indexing_technique
  34. if not datasource_type and not indexing_technique:
  35. return self._transform_to_empty_pipeline(dataset)
  36. doc_form = dataset.doc_form
  37. if not doc_form:
  38. return self._transform_to_empty_pipeline(dataset)
  39. retrieval_model = dataset.retrieval_model
  40. pipeline_yaml = self._get_transform_yaml(doc_form, datasource_type, indexing_technique)
  41. # deal dependencies
  42. self._deal_dependencies(pipeline_yaml, dataset.tenant_id)
  43. # Extract app data
  44. workflow_data = pipeline_yaml.get("workflow")
  45. if not workflow_data:
  46. raise ValueError("Missing workflow data for rag pipeline")
  47. graph = workflow_data.get("graph", {})
  48. nodes = graph.get("nodes", [])
  49. new_nodes = []
  50. for node in nodes:
  51. if (
  52. node.get("data", {}).get("type") == "datasource"
  53. and node.get("data", {}).get("provider_type") == "local_file"
  54. ):
  55. node = self._deal_file_extensions(node)
  56. if node.get("data", {}).get("type") == "knowledge-index":
  57. node = self._deal_knowledge_index(dataset, doc_form, indexing_technique, retrieval_model, node)
  58. new_nodes.append(node)
  59. if new_nodes:
  60. graph["nodes"] = new_nodes
  61. workflow_data["graph"] = graph
  62. pipeline_yaml["workflow"] = workflow_data
  63. # create pipeline
  64. pipeline = self._create_pipeline(pipeline_yaml)
  65. # save chunk structure to dataset
  66. if doc_form == "hierarchical_model":
  67. dataset.chunk_structure = "hierarchical_model"
  68. elif doc_form == "text_model":
  69. dataset.chunk_structure = "text_model"
  70. else:
  71. raise ValueError("Unsupported doc form")
  72. dataset.runtime_mode = "rag_pipeline"
  73. dataset.pipeline_id = pipeline.id
  74. # deal document data
  75. self._deal_document_data(dataset)
  76. db.session.commit()
  77. return {
  78. "pipeline_id": pipeline.id,
  79. "dataset_id": dataset_id,
  80. "status": "success",
  81. }
  82. def _get_transform_yaml(self, doc_form: str, datasource_type: str, indexing_technique: str | None):
  83. pipeline_yaml = {}
  84. if doc_form == "text_model":
  85. match datasource_type:
  86. case "upload_file":
  87. if indexing_technique == "high_quality":
  88. # get graph from transform.file-general-high-quality.yml
  89. with open(f"{Path(__file__).parent}/transform/file-general-high-quality.yml") as f:
  90. pipeline_yaml = yaml.safe_load(f)
  91. if indexing_technique == "economy":
  92. # get graph from transform.file-general-economy.yml
  93. with open(f"{Path(__file__).parent}/transform/file-general-economy.yml") as f:
  94. pipeline_yaml = yaml.safe_load(f)
  95. case "notion_import":
  96. if indexing_technique == "high_quality":
  97. # get graph from transform.notion-general-high-quality.yml
  98. with open(f"{Path(__file__).parent}/transform/notion-general-high-quality.yml") as f:
  99. pipeline_yaml = yaml.safe_load(f)
  100. if indexing_technique == "economy":
  101. # get graph from transform.notion-general-economy.yml
  102. with open(f"{Path(__file__).parent}/transform/notion-general-economy.yml") as f:
  103. pipeline_yaml = yaml.safe_load(f)
  104. case "website_crawl":
  105. if indexing_technique == "high_quality":
  106. # get graph from transform.website-crawl-general-high-quality.yml
  107. with open(f"{Path(__file__).parent}/transform/website-crawl-general-high-quality.yml") as f:
  108. pipeline_yaml = yaml.safe_load(f)
  109. if indexing_technique == "economy":
  110. # get graph from transform.website-crawl-general-economy.yml
  111. with open(f"{Path(__file__).parent}/transform/website-crawl-general-economy.yml") as f:
  112. pipeline_yaml = yaml.safe_load(f)
  113. case _:
  114. raise ValueError("Unsupported datasource type")
  115. elif doc_form == "hierarchical_model":
  116. match datasource_type:
  117. case "upload_file":
  118. # get graph from transform.file-parentchild.yml
  119. with open(f"{Path(__file__).parent}/transform/file-parentchild.yml") as f:
  120. pipeline_yaml = yaml.safe_load(f)
  121. case "notion_import":
  122. # get graph from transform.notion-parentchild.yml
  123. with open(f"{Path(__file__).parent}/transform/notion-parentchild.yml") as f:
  124. pipeline_yaml = yaml.safe_load(f)
  125. case "website_crawl":
  126. # get graph from transform.website-crawl-parentchild.yml
  127. with open(f"{Path(__file__).parent}/transform/website-crawl-parentchild.yml") as f:
  128. pipeline_yaml = yaml.safe_load(f)
  129. case _:
  130. raise ValueError("Unsupported datasource type")
  131. else:
  132. raise ValueError("Unsupported doc form")
  133. return pipeline_yaml
  134. def _deal_file_extensions(self, node: dict):
  135. file_extensions = node.get("data", {}).get("fileExtensions", [])
  136. if not file_extensions:
  137. return node
  138. file_extensions = [file_extension.lower() for file_extension in file_extensions]
  139. node["data"]["fileExtensions"] = DOCUMENT_EXTENSIONS
  140. return node
  141. def _deal_knowledge_index(
  142. self, dataset: Dataset, doc_form: str, indexing_technique: str | None, retrieval_model: dict, node: dict
  143. ):
  144. knowledge_configuration_dict = node.get("data", {})
  145. knowledge_configuration = KnowledgeConfiguration(**knowledge_configuration_dict)
  146. if indexing_technique == "high_quality":
  147. knowledge_configuration.embedding_model = dataset.embedding_model
  148. knowledge_configuration.embedding_model_provider = dataset.embedding_model_provider
  149. if retrieval_model:
  150. retrieval_setting = RetrievalSetting(**retrieval_model)
  151. if indexing_technique == "economy":
  152. retrieval_setting.search_method = "keyword_search"
  153. knowledge_configuration.retrieval_model = retrieval_setting
  154. else:
  155. dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump()
  156. knowledge_configuration_dict.update(knowledge_configuration.model_dump())
  157. node["data"] = knowledge_configuration_dict
  158. return node
  159. def _create_pipeline(
  160. self,
  161. data: dict,
  162. ) -> Pipeline:
  163. """Create a new app or update an existing one."""
  164. pipeline_data = data.get("rag_pipeline", {})
  165. # Initialize pipeline based on mode
  166. workflow_data = data.get("workflow")
  167. if not workflow_data or not isinstance(workflow_data, dict):
  168. raise ValueError("Missing workflow data for rag pipeline")
  169. environment_variables_list = workflow_data.get("environment_variables", [])
  170. environment_variables = [
  171. variable_factory.build_environment_variable_from_mapping(obj) for obj in environment_variables_list
  172. ]
  173. conversation_variables_list = workflow_data.get("conversation_variables", [])
  174. conversation_variables = [
  175. variable_factory.build_conversation_variable_from_mapping(obj) for obj in conversation_variables_list
  176. ]
  177. rag_pipeline_variables_list = workflow_data.get("rag_pipeline_variables", [])
  178. graph = workflow_data.get("graph", {})
  179. # Create new app
  180. pipeline = Pipeline()
  181. pipeline.id = str(uuid4())
  182. pipeline.tenant_id = current_user.current_tenant_id
  183. pipeline.name = pipeline_data.get("name", "")
  184. pipeline.description = pipeline_data.get("description", "")
  185. pipeline.created_by = current_user.id
  186. pipeline.updated_by = current_user.id
  187. pipeline.is_published = True
  188. pipeline.is_public = True
  189. db.session.add(pipeline)
  190. db.session.flush()
  191. # create draft workflow
  192. draft_workflow = Workflow(
  193. tenant_id=pipeline.tenant_id,
  194. app_id=pipeline.id,
  195. features="{}",
  196. type=WorkflowType.RAG_PIPELINE.value,
  197. version="draft",
  198. graph=json.dumps(graph),
  199. created_by=current_user.id,
  200. environment_variables=environment_variables,
  201. conversation_variables=conversation_variables,
  202. rag_pipeline_variables=rag_pipeline_variables_list,
  203. )
  204. published_workflow = Workflow(
  205. tenant_id=pipeline.tenant_id,
  206. app_id=pipeline.id,
  207. features="{}",
  208. type=WorkflowType.RAG_PIPELINE.value,
  209. version=str(datetime.now(UTC).replace(tzinfo=None)),
  210. graph=json.dumps(graph),
  211. created_by=current_user.id,
  212. environment_variables=environment_variables,
  213. conversation_variables=conversation_variables,
  214. rag_pipeline_variables=rag_pipeline_variables_list,
  215. )
  216. db.session.add(draft_workflow)
  217. db.session.add(published_workflow)
  218. db.session.flush()
  219. pipeline.workflow_id = published_workflow.id
  220. db.session.add(pipeline)
  221. return pipeline
  222. def _deal_dependencies(self, pipeline_yaml: dict, tenant_id: str):
  223. installer_manager = PluginInstaller()
  224. installed_plugins = installer_manager.list_plugins(tenant_id)
  225. plugin_migration = PluginMigration()
  226. installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins]
  227. dependencies = pipeline_yaml.get("dependencies", [])
  228. need_install_plugin_unique_identifiers = []
  229. for dependency in dependencies:
  230. if dependency.get("type") == "marketplace":
  231. plugin_unique_identifier = dependency.get("value", {}).get("plugin_unique_identifier")
  232. plugin_id = plugin_unique_identifier.split(":")[0]
  233. if plugin_id not in installed_plugins_ids:
  234. plugin_unique_identifier = plugin_migration._fetch_plugin_unique_identifier(plugin_id) # type: ignore
  235. if plugin_unique_identifier:
  236. need_install_plugin_unique_identifiers.append(plugin_unique_identifier)
  237. if need_install_plugin_unique_identifiers:
  238. logger.debug("Installing missing pipeline plugins %s", need_install_plugin_unique_identifiers)
  239. PluginService.install_from_marketplace_pkg(tenant_id, need_install_plugin_unique_identifiers)
  240. def _transform_to_empty_pipeline(self, dataset: Dataset):
  241. pipeline = Pipeline(
  242. tenant_id=dataset.tenant_id,
  243. name=dataset.name,
  244. description=dataset.description,
  245. created_by=current_user.id,
  246. )
  247. db.session.add(pipeline)
  248. db.session.flush()
  249. dataset.pipeline_id = pipeline.id
  250. dataset.runtime_mode = "rag_pipeline"
  251. dataset.updated_by = current_user.id
  252. dataset.updated_at = datetime.now(UTC).replace(tzinfo=None)
  253. db.session.add(dataset)
  254. db.session.commit()
  255. return {
  256. "pipeline_id": pipeline.id,
  257. "dataset_id": dataset.id,
  258. "status": "success",
  259. }
  260. def _deal_document_data(self, dataset: Dataset):
  261. file_node_id = "1752479895761"
  262. notion_node_id = "1752489759475"
  263. jina_node_id = "1752491761974"
  264. firecrawl_node_id = "1752565402678"
  265. documents = db.session.query(Document).where(Document.dataset_id == dataset.id).all()
  266. for document in documents:
  267. data_source_info_dict = document.data_source_info_dict
  268. if not data_source_info_dict:
  269. continue
  270. if document.data_source_type == "upload_file":
  271. document.data_source_type = "local_file"
  272. file_id = data_source_info_dict.get("upload_file_id")
  273. if file_id:
  274. file = db.session.query(UploadFile).where(UploadFile.id == file_id).first()
  275. if file:
  276. data_source_info = json.dumps(
  277. {
  278. "real_file_id": file_id,
  279. "name": file.name,
  280. "size": file.size,
  281. "extension": file.extension,
  282. "mime_type": file.mime_type,
  283. "url": "",
  284. "transfer_method": "local_file",
  285. }
  286. )
  287. document.data_source_info = data_source_info
  288. document_pipeline_execution_log = DocumentPipelineExecutionLog(
  289. document_id=document.id,
  290. pipeline_id=dataset.pipeline_id,
  291. datasource_type="local_file",
  292. datasource_info=data_source_info,
  293. input_data={},
  294. created_by=document.created_by,
  295. created_at=document.created_at,
  296. datasource_node_id=file_node_id,
  297. )
  298. db.session.add(document)
  299. db.session.add(document_pipeline_execution_log)
  300. elif document.data_source_type == "notion_import":
  301. document.data_source_type = "online_document"
  302. data_source_info = json.dumps(
  303. {
  304. "workspace_id": data_source_info_dict.get("notion_workspace_id"),
  305. "page": {
  306. "page_id": data_source_info_dict.get("notion_page_id"),
  307. "page_name": document.name,
  308. "page_icon": data_source_info_dict.get("notion_page_icon"),
  309. "type": data_source_info_dict.get("type"),
  310. "last_edited_time": data_source_info_dict.get("last_edited_time"),
  311. "parent_id": None,
  312. },
  313. }
  314. )
  315. document.data_source_info = data_source_info
  316. document_pipeline_execution_log = DocumentPipelineExecutionLog(
  317. document_id=document.id,
  318. pipeline_id=dataset.pipeline_id,
  319. datasource_type="online_document",
  320. datasource_info=data_source_info,
  321. input_data={},
  322. created_by=document.created_by,
  323. created_at=document.created_at,
  324. datasource_node_id=notion_node_id,
  325. )
  326. db.session.add(document)
  327. db.session.add(document_pipeline_execution_log)
  328. elif document.data_source_type == "website_crawl":
  329. document.data_source_type = "website_crawl"
  330. data_source_info = json.dumps(
  331. {
  332. "source_url": data_source_info_dict.get("url"),
  333. "content": "",
  334. "title": document.name,
  335. "description": "",
  336. }
  337. )
  338. document.data_source_info = data_source_info
  339. if data_source_info_dict.get("provider") == "firecrawl":
  340. datasource_node_id = firecrawl_node_id
  341. elif data_source_info_dict.get("provider") == "jinareader":
  342. datasource_node_id = jina_node_id
  343. else:
  344. continue
  345. document_pipeline_execution_log = DocumentPipelineExecutionLog(
  346. document_id=document.id,
  347. pipeline_id=dataset.pipeline_id,
  348. datasource_type="website_crawl",
  349. datasource_info=data_source_info,
  350. input_data={},
  351. created_by=document.created_by,
  352. created_at=document.created_at,
  353. datasource_node_id=datasource_node_id,
  354. )
  355. db.session.add(document)
  356. db.session.add(document_pipeline_execution_log)