You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

document_service.py 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import random
  17. from datetime import datetime
  18. from elasticsearch_dsl import Q
  19. from peewee import fn
  20. from api.db.db_utils import bulk_insert_into_db
  21. from api.settings import stat_logger
  22. from api.utils import current_timestamp, get_format_time, get_uuid
  23. from rag.settings import SVR_QUEUE_NAME
  24. from rag.utils.es_conn import ELASTICSEARCH
  25. from rag.utils.minio_conn import MINIO
  26. from rag.nlp import search
  27. from api.db import FileType, TaskStatus
  28. from api.db.db_models import DB, Knowledgebase, Tenant, Task
  29. from api.db.db_models import Document
  30. from api.db.services.common_service import CommonService
  31. from api.db.services.knowledgebase_service import KnowledgebaseService
  32. from api.db import StatusEnum
  33. from rag.utils.redis_conn import REDIS_CONN
  34. class DocumentService(CommonService):
  35. model = Document
  36. @classmethod
  37. @DB.connection_context()
  38. def get_by_kb_id(cls, kb_id, page_number, items_per_page,
  39. orderby, desc, keywords):
  40. if keywords:
  41. docs = cls.model.select().where(
  42. (cls.model.kb_id == kb_id),
  43. (fn.LOWER(cls.model.name).contains(keywords.lower()))
  44. )
  45. else:
  46. docs = cls.model.select().where(cls.model.kb_id == kb_id)
  47. count = docs.count()
  48. if desc:
  49. docs = docs.order_by(cls.model.getter_by(orderby).desc())
  50. else:
  51. docs = docs.order_by(cls.model.getter_by(orderby).asc())
  52. docs = docs.paginate(page_number, items_per_page)
  53. return list(docs.dicts()), count
  54. @classmethod
  55. @DB.connection_context()
  56. def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
  57. if keywords:
  58. docs = cls.model.select().where(
  59. (cls.model.kb_id == dataset_id),
  60. (fn.LOWER(cls.model.name).contains(keywords.lower()))
  61. )
  62. else:
  63. docs = cls.model.select().where(cls.model.kb_id == dataset_id)
  64. total = docs.count()
  65. if descend == 'True':
  66. docs = docs.order_by(cls.model.getter_by(order_by).desc())
  67. if descend == 'False':
  68. docs = docs.order_by(cls.model.getter_by(order_by).asc())
  69. docs = list(docs.dicts())
  70. docs_length = len(docs)
  71. if offset < 0 or offset > docs_length:
  72. raise IndexError("Offset is out of the valid range.")
  73. if count == -1:
  74. return docs[offset:], total
  75. return docs[offset:offset + count], total
  76. @classmethod
  77. @DB.connection_context()
  78. def insert(cls, doc):
  79. if not cls.save(**doc):
  80. raise RuntimeError("Database error (Document)!")
  81. e, doc = cls.get_by_id(doc["id"])
  82. if not e:
  83. raise RuntimeError("Database error (Document retrieval)!")
  84. e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
  85. if not KnowledgebaseService.update_by_id(
  86. kb.id, {"doc_num": kb.doc_num + 1}):
  87. raise RuntimeError("Database error (Knowledgebase)!")
  88. return doc
  89. @classmethod
  90. @DB.connection_context()
  91. def remove_document(cls, doc, tenant_id):
  92. ELASTICSEARCH.deleteByQuery(
  93. Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
  94. cls.clear_chunk_num(doc.id)
  95. return cls.delete_by_id(doc.id)
  96. @classmethod
  97. @DB.connection_context()
  98. def get_newly_uploaded(cls):
  99. fields = [
  100. cls.model.id,
  101. cls.model.kb_id,
  102. cls.model.parser_id,
  103. cls.model.parser_config,
  104. cls.model.name,
  105. cls.model.type,
  106. cls.model.location,
  107. cls.model.size,
  108. Knowledgebase.tenant_id,
  109. Tenant.embd_id,
  110. Tenant.img2txt_id,
  111. Tenant.asr_id,
  112. cls.model.update_time]
  113. docs = cls.model.select(*fields) \
  114. .join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
  115. .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
  116. .where(
  117. cls.model.status == StatusEnum.VALID.value,
  118. ~(cls.model.type == FileType.VIRTUAL.value),
  119. cls.model.progress == 0,
  120. cls.model.update_time >= current_timestamp() - 1000 * 600,
  121. cls.model.run == TaskStatus.RUNNING.value)\
  122. .order_by(cls.model.update_time.asc())
  123. return list(docs.dicts())
  124. @classmethod
  125. @DB.connection_context()
  126. def get_unfinished_docs(cls):
  127. fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg, cls.model.run]
  128. docs = cls.model.select(*fields) \
  129. .where(
  130. cls.model.status == StatusEnum.VALID.value,
  131. ~(cls.model.type == FileType.VIRTUAL.value),
  132. cls.model.progress < 1,
  133. cls.model.progress > 0)
  134. return list(docs.dicts())
  135. @classmethod
  136. @DB.connection_context()
  137. def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
  138. num = cls.model.update(token_num=cls.model.token_num + token_num,
  139. chunk_num=cls.model.chunk_num + chunk_num,
  140. process_duation=cls.model.process_duation + duation).where(
  141. cls.model.id == doc_id).execute()
  142. if num == 0:
  143. raise LookupError(
  144. "Document not found which is supposed to be there")
  145. num = Knowledgebase.update(
  146. token_num=Knowledgebase.token_num +
  147. token_num,
  148. chunk_num=Knowledgebase.chunk_num +
  149. chunk_num).where(
  150. Knowledgebase.id == kb_id).execute()
  151. return num
  152. @classmethod
  153. @DB.connection_context()
  154. def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
  155. num = cls.model.update(token_num=cls.model.token_num - token_num,
  156. chunk_num=cls.model.chunk_num - chunk_num,
  157. process_duation=cls.model.process_duation + duation).where(
  158. cls.model.id == doc_id).execute()
  159. if num == 0:
  160. raise LookupError(
  161. "Document not found which is supposed to be there")
  162. num = Knowledgebase.update(
  163. token_num=Knowledgebase.token_num -
  164. token_num,
  165. chunk_num=Knowledgebase.chunk_num -
  166. chunk_num
  167. ).where(
  168. Knowledgebase.id == kb_id).execute()
  169. return num
  170. @classmethod
  171. @DB.connection_context()
  172. def clear_chunk_num(cls, doc_id):
  173. doc = cls.model.get_by_id(doc_id)
  174. assert doc, "Can't fine document in database."
  175. num = Knowledgebase.update(
  176. token_num=Knowledgebase.token_num -
  177. doc.token_num,
  178. chunk_num=Knowledgebase.chunk_num -
  179. doc.chunk_num,
  180. doc_num=Knowledgebase.doc_num-1
  181. ).where(
  182. Knowledgebase.id == doc.kb_id).execute()
  183. return num
  184. @classmethod
  185. @DB.connection_context()
  186. def get_tenant_id(cls, doc_id):
  187. docs = cls.model.select(
  188. Knowledgebase.tenant_id).join(
  189. Knowledgebase, on=(
  190. Knowledgebase.id == cls.model.kb_id)).where(
  191. cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
  192. docs = docs.dicts()
  193. if not docs:
  194. return
  195. return docs[0]["tenant_id"]
  196. @classmethod
  197. @DB.connection_context()
  198. def get_tenant_id_by_name(cls, name):
  199. docs = cls.model.select(
  200. Knowledgebase.tenant_id).join(
  201. Knowledgebase, on=(
  202. Knowledgebase.id == cls.model.kb_id)).where(
  203. cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
  204. docs = docs.dicts()
  205. if not docs:
  206. return
  207. return docs[0]["tenant_id"]
  208. @classmethod
  209. @DB.connection_context()
  210. def get_embd_id(cls, doc_id):
  211. docs = cls.model.select(
  212. Knowledgebase.embd_id).join(
  213. Knowledgebase, on=(
  214. Knowledgebase.id == cls.model.kb_id)).where(
  215. cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
  216. docs = docs.dicts()
  217. if not docs:
  218. return
  219. return docs[0]["embd_id"]
  220. @classmethod
  221. @DB.connection_context()
  222. def get_doc_id_by_doc_name(cls, doc_name):
  223. fields = [cls.model.id]
  224. doc_id = cls.model.select(*fields) \
  225. .where(cls.model.name == doc_name)
  226. doc_id = doc_id.dicts()
  227. if not doc_id:
  228. return
  229. return doc_id[0]["id"]
  230. @classmethod
  231. @DB.connection_context()
  232. def get_thumbnails(cls, docids):
  233. fields = [cls.model.id, cls.model.thumbnail]
  234. return list(cls.model.select(
  235. *fields).where(cls.model.id.in_(docids)).dicts())
  236. @classmethod
  237. @DB.connection_context()
  238. def update_parser_config(cls, id, config):
  239. e, d = cls.get_by_id(id)
  240. if not e:
  241. raise LookupError(f"Document({id}) not found.")
  242. def dfs_update(old, new):
  243. for k, v in new.items():
  244. if k not in old:
  245. old[k] = v
  246. continue
  247. if isinstance(v, dict):
  248. assert isinstance(old[k], dict)
  249. dfs_update(old[k], v)
  250. else:
  251. old[k] = v
  252. dfs_update(d.parser_config, config)
  253. cls.update_by_id(id, {"parser_config": d.parser_config})
  254. @classmethod
  255. @DB.connection_context()
  256. def get_doc_count(cls, tenant_id):
  257. docs = cls.model.select(cls.model.id).join(Knowledgebase,
  258. on=(Knowledgebase.id == cls.model.kb_id)).where(
  259. Knowledgebase.tenant_id == tenant_id)
  260. return len(docs)
  261. @classmethod
  262. @DB.connection_context()
  263. def begin2parse(cls, docid):
  264. cls.update_by_id(
  265. docid, {"progress": random.random() * 1 / 100.,
  266. "progress_msg": "Task dispatched...",
  267. "process_begin_at": get_format_time()
  268. })
  269. @classmethod
  270. @DB.connection_context()
  271. def update_progress(cls):
  272. docs = cls.get_unfinished_docs()
  273. for d in docs:
  274. try:
  275. tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
  276. if not tsks:
  277. continue
  278. msg = []
  279. prg = 0
  280. finished = True
  281. bad = 0
  282. e, doc = DocumentService.get_by_id(d["id"])
  283. status = doc.run#TaskStatus.RUNNING.value
  284. for t in tsks:
  285. if 0 <= t.progress < 1:
  286. finished = False
  287. prg += t.progress if t.progress >= 0 else 0
  288. msg.append(t.progress_msg)
  289. if t.progress == -1:
  290. bad += 1
  291. prg /= len(tsks)
  292. if finished and bad:
  293. prg = -1
  294. status = TaskStatus.FAIL.value
  295. elif finished:
  296. if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
  297. queue_raptor_tasks(d)
  298. prg *= 0.98
  299. msg.append("------ RAPTOR -------")
  300. else:
  301. status = TaskStatus.DONE.value
  302. msg = "\n".join(msg)
  303. info = {
  304. "process_duation": datetime.timestamp(
  305. datetime.now()) -
  306. d["process_begin_at"].timestamp(),
  307. "run": status}
  308. if prg != 0:
  309. info["progress"] = prg
  310. if msg:
  311. info["progress_msg"] = msg
  312. cls.update_by_id(d["id"], info)
  313. except Exception as e:
  314. stat_logger.error("fetch task exception:" + str(e))
  315. @classmethod
  316. @DB.connection_context()
  317. def get_kb_doc_count(cls, kb_id):
  318. return len(cls.model.select(cls.model.id).where(
  319. cls.model.kb_id == kb_id).dicts())
  320. @classmethod
  321. @DB.connection_context()
  322. def do_cancel(cls, doc_id):
  323. try:
  324. _, doc = DocumentService.get_by_id(doc_id)
  325. return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
  326. except Exception as e:
  327. pass
  328. return False
  329. def queue_raptor_tasks(doc):
  330. def new_task():
  331. nonlocal doc
  332. return {
  333. "id": get_uuid(),
  334. "doc_id": doc["id"],
  335. "from_page": 0,
  336. "to_page": -1,
  337. "progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
  338. }
  339. task = new_task()
  340. bulk_insert_into_db(Task, [task], True)
  341. task["type"] = "raptor"
  342. assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."