You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

document_service.py 9.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import random
  17. from datetime import datetime
  18. from elasticsearch_dsl import Q
  19. from peewee import fn
  20. from api.settings import stat_logger
  21. from api.utils import current_timestamp, get_format_time
  22. from rag.utils.es_conn import ELASTICSEARCH
  23. from rag.utils.minio_conn import MINIO
  24. from rag.nlp import search
  25. from api.db import FileType, TaskStatus
  26. from api.db.db_models import DB, Knowledgebase, Tenant, Task
  27. from api.db.db_models import Document
  28. from api.db.services.common_service import CommonService
  29. from api.db.services.knowledgebase_service import KnowledgebaseService
  30. from api.db import StatusEnum
  31. class DocumentService(CommonService):
  32. model = Document
  33. @classmethod
  34. @DB.connection_context()
  35. def get_by_kb_id(cls, kb_id, page_number, items_per_page,
  36. orderby, desc, keywords):
  37. if keywords:
  38. docs = cls.model.select().where(
  39. (cls.model.kb_id == kb_id),
  40. (fn.LOWER(cls.model.name).contains(keywords.lower()))
  41. )
  42. else:
  43. docs = cls.model.select().where(cls.model.kb_id == kb_id)
  44. count = docs.count()
  45. if desc:
  46. docs = docs.order_by(cls.model.getter_by(orderby).desc())
  47. else:
  48. docs = docs.order_by(cls.model.getter_by(orderby).asc())
  49. docs = docs.paginate(page_number, items_per_page)
  50. return list(docs.dicts()), count
  51. @classmethod
  52. @DB.connection_context()
  53. def insert(cls, doc):
  54. if not cls.save(**doc):
  55. raise RuntimeError("Database error (Document)!")
  56. e, doc = cls.get_by_id(doc["id"])
  57. if not e:
  58. raise RuntimeError("Database error (Document retrieval)!")
  59. e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
  60. if not KnowledgebaseService.update_by_id(
  61. kb.id, {"doc_num": kb.doc_num + 1}):
  62. raise RuntimeError("Database error (Knowledgebase)!")
  63. return doc
  64. @classmethod
  65. @DB.connection_context()
  66. def delete(cls, doc):
  67. e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
  68. if not KnowledgebaseService.update_by_id(
  69. kb.id, {"doc_num": max(0, kb.doc_num - 1)}):
  70. raise RuntimeError("Database error (Knowledgebase)!")
  71. return cls.delete_by_id(doc.id)
  72. @classmethod
  73. @DB.connection_context()
  74. def remove_document(cls, doc, tenant_id):
  75. ELASTICSEARCH.deleteByQuery(
  76. Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
  77. cls.increment_chunk_num(
  78. doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, 0)
  79. if not cls.delete(doc):
  80. raise RuntimeError("Database error (Document removal)!")
  81. MINIO.rm(doc.kb_id, doc.location)
  82. return cls.delete_by_id(doc.id)
  83. @classmethod
  84. @DB.connection_context()
  85. def get_newly_uploaded(cls):
  86. fields = [
  87. cls.model.id,
  88. cls.model.kb_id,
  89. cls.model.parser_id,
  90. cls.model.parser_config,
  91. cls.model.name,
  92. cls.model.type,
  93. cls.model.location,
  94. cls.model.size,
  95. Knowledgebase.tenant_id,
  96. Tenant.embd_id,
  97. Tenant.img2txt_id,
  98. Tenant.asr_id,
  99. cls.model.update_time]
  100. docs = cls.model.select(*fields) \
  101. .join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
  102. .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
  103. .where(
  104. cls.model.status == StatusEnum.VALID.value,
  105. ~(cls.model.type == FileType.VIRTUAL.value),
  106. cls.model.progress == 0,
  107. cls.model.update_time >= current_timestamp() - 1000 * 600,
  108. cls.model.run == TaskStatus.RUNNING.value)\
  109. .order_by(cls.model.update_time.asc())
  110. return list(docs.dicts())
  111. @classmethod
  112. @DB.connection_context()
  113. def get_unfinished_docs(cls):
  114. fields = [cls.model.id, cls.model.process_begin_at]
  115. docs = cls.model.select(*fields) \
  116. .where(
  117. cls.model.status == StatusEnum.VALID.value,
  118. ~(cls.model.type == FileType.VIRTUAL.value),
  119. cls.model.progress < 1,
  120. cls.model.progress > 0)
  121. return list(docs.dicts())
  122. @classmethod
  123. @DB.connection_context()
  124. def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
  125. num = cls.model.update(token_num=cls.model.token_num + token_num,
  126. chunk_num=cls.model.chunk_num + chunk_num,
  127. process_duation=cls.model.process_duation + duation).where(
  128. cls.model.id == doc_id).execute()
  129. if num == 0:
  130. raise LookupError(
  131. "Document not found which is supposed to be there")
  132. num = Knowledgebase.update(
  133. token_num=Knowledgebase.token_num +
  134. token_num,
  135. chunk_num=Knowledgebase.chunk_num +
  136. chunk_num).where(
  137. Knowledgebase.id == kb_id).execute()
  138. return num
  139. @classmethod
  140. @DB.connection_context()
  141. def clear_chunk_num(cls, doc_id):
  142. doc = cls.model.get_by_id(doc_id)
  143. assert doc, "Can't fine document in database."
  144. num = Knowledgebase.update(
  145. token_num=Knowledgebase.token_num -
  146. doc.token_num,
  147. chunk_num=Knowledgebase.chunk_num -
  148. doc.chunk_num,
  149. doc_num=Knowledgebase.doc_num-1
  150. ).where(
  151. Knowledgebase.id == doc.kb_id).execute()
  152. return num
  153. @classmethod
  154. @DB.connection_context()
  155. def get_tenant_id(cls, doc_id):
  156. docs = cls.model.select(
  157. Knowledgebase.tenant_id).join(
  158. Knowledgebase, on=(
  159. Knowledgebase.id == cls.model.kb_id)).where(
  160. cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
  161. docs = docs.dicts()
  162. if not docs:
  163. return
  164. return docs[0]["tenant_id"]
  165. @classmethod
  166. @DB.connection_context()
  167. def get_thumbnails(cls, docids):
  168. fields = [cls.model.id, cls.model.thumbnail]
  169. return list(cls.model.select(
  170. *fields).where(cls.model.id.in_(docids)).dicts())
  171. @classmethod
  172. @DB.connection_context()
  173. def update_parser_config(cls, id, config):
  174. e, d = cls.get_by_id(id)
  175. if not e:
  176. raise LookupError(f"Document({id}) not found.")
  177. def dfs_update(old, new):
  178. for k, v in new.items():
  179. if k not in old:
  180. old[k] = v
  181. continue
  182. if isinstance(v, dict):
  183. assert isinstance(old[k], dict)
  184. dfs_update(old[k], v)
  185. else:
  186. old[k] = v
  187. dfs_update(d.parser_config, config)
  188. cls.update_by_id(id, {"parser_config": d.parser_config})
  189. @classmethod
  190. @DB.connection_context()
  191. def get_doc_count(cls, tenant_id):
  192. docs = cls.model.select(cls.model.id).join(Knowledgebase,
  193. on=(Knowledgebase.id == cls.model.kb_id)).where(
  194. Knowledgebase.tenant_id == tenant_id)
  195. return len(docs)
  196. @classmethod
  197. @DB.connection_context()
  198. def begin2parse(cls, docid):
  199. cls.update_by_id(
  200. docid, {"progress": random.random() * 1 / 100.,
  201. "progress_msg": "Task dispatched...",
  202. "process_begin_at": get_format_time()
  203. })
  204. @classmethod
  205. @DB.connection_context()
  206. def update_progress(cls):
  207. docs = cls.get_unfinished_docs()
  208. for d in docs:
  209. try:
  210. tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
  211. if not tsks:
  212. continue
  213. msg = []
  214. prg = 0
  215. finished = True
  216. bad = 0
  217. status = TaskStatus.RUNNING.value
  218. for t in tsks:
  219. if 0 <= t.progress < 1:
  220. finished = False
  221. prg += t.progress if t.progress >= 0 else 0
  222. msg.append(t.progress_msg)
  223. if t.progress == -1:
  224. bad += 1
  225. prg /= len(tsks)
  226. if finished and bad:
  227. prg = -1
  228. status = TaskStatus.FAIL.value
  229. elif finished:
  230. status = TaskStatus.DONE.value
  231. msg = "\n".join(msg)
  232. info = {
  233. "process_duation": datetime.timestamp(
  234. datetime.now()) -
  235. d["process_begin_at"].timestamp(),
  236. "run": status}
  237. if prg != 0:
  238. info["progress"] = prg
  239. if msg:
  240. info["progress_msg"] = msg
  241. cls.update_by_id(d["id"], info)
  242. except Exception as e:
  243. stat_logger.error("fetch task exception:" + str(e))