Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

1 рік тому
1 рік тому
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from peewee import Expression
  17. from elasticsearch_dsl import Q
  18. from api.utils import current_timestamp
  19. from rag.utils.es_conn import ELASTICSEARCH
  20. from rag.utils.minio_conn import MINIO
  21. from rag.nlp import search
  22. from api.db import FileType, TaskStatus
  23. from api.db.db_models import DB, Knowledgebase, Tenant
  24. from api.db.db_models import Document
  25. from api.db.services.common_service import CommonService
  26. from api.db.services.knowledgebase_service import KnowledgebaseService
  27. from api.db import StatusEnum
  28. class DocumentService(CommonService):
  29. model = Document
  30. @classmethod
  31. @DB.connection_context()
  32. def get_by_kb_id(cls, kb_id, page_number, items_per_page,
  33. orderby, desc, keywords):
  34. if keywords:
  35. docs = cls.model.select().where(
  36. cls.model.kb_id == kb_id,
  37. cls.model.name.like(f"%%{keywords}%%"))
  38. else:
  39. docs = cls.model.select().where(cls.model.kb_id == kb_id)
  40. count = docs.count()
  41. if desc:
  42. docs = docs.order_by(cls.model.getter_by(orderby).desc())
  43. else:
  44. docs = docs.order_by(cls.model.getter_by(orderby).asc())
  45. docs = docs.paginate(page_number, items_per_page)
  46. return list(docs.dicts()), count
  47. @classmethod
  48. @DB.connection_context()
  49. def insert(cls, doc):
  50. if not cls.save(**doc):
  51. raise RuntimeError("Database error (Document)!")
  52. e, doc = cls.get_by_id(doc["id"])
  53. if not e:
  54. raise RuntimeError("Database error (Document retrieval)!")
  55. e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
  56. if not KnowledgebaseService.update_by_id(
  57. kb.id, {"doc_num": kb.doc_num + 1}):
  58. raise RuntimeError("Database error (Knowledgebase)!")
  59. return doc
  60. @classmethod
  61. @DB.connection_context()
  62. def delete(cls, doc):
  63. e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
  64. if not KnowledgebaseService.update_by_id(
  65. kb.id, {"doc_num": kb.doc_num - 1}):
  66. raise RuntimeError("Database error (Knowledgebase)!")
  67. return cls.delete_by_id(doc.id)
  68. @classmethod
  69. @DB.connection_context()
  70. def remove_document(cls, doc, tenant_id):
  71. ELASTICSEARCH.deleteByQuery(
  72. Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
  73. cls.increment_chunk_num(
  74. doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, 0)
  75. if not cls.delete(doc):
  76. raise RuntimeError("Database error (Document removal)!")
  77. MINIO.rm(doc.kb_id, doc.location)
  78. return cls.delete_by_id(doc.id)
  79. @classmethod
  80. @DB.connection_context()
  81. def get_newly_uploaded(cls, tm):
  82. fields = [
  83. cls.model.id,
  84. cls.model.kb_id,
  85. cls.model.parser_id,
  86. cls.model.parser_config,
  87. cls.model.name,
  88. cls.model.type,
  89. cls.model.location,
  90. cls.model.size,
  91. Knowledgebase.tenant_id,
  92. Tenant.embd_id,
  93. Tenant.img2txt_id,
  94. Tenant.asr_id,
  95. cls.model.update_time]
  96. docs = cls.model.select(*fields) \
  97. .join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
  98. .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
  99. .where(
  100. cls.model.status == StatusEnum.VALID.value,
  101. ~(cls.model.type == FileType.VIRTUAL.value),
  102. cls.model.progress == 0,
  103. cls.model.update_time >= current_timestamp() - 1000 * 600,
  104. cls.model.run == TaskStatus.RUNNING.value)\
  105. .order_by(cls.model.update_time.asc())
  106. return list(docs.dicts())
  107. @classmethod
  108. @DB.connection_context()
  109. def get_unfinished_docs(cls):
  110. fields = [cls.model.id, cls.model.process_begin_at]
  111. docs = cls.model.select(*fields) \
  112. .where(
  113. cls.model.status == StatusEnum.VALID.value,
  114. ~(cls.model.type == FileType.VIRTUAL.value),
  115. cls.model.progress < 1,
  116. cls.model.progress > 0)
  117. return list(docs.dicts())
  118. @classmethod
  119. @DB.connection_context()
  120. def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
  121. num = cls.model.update(token_num=cls.model.token_num + token_num,
  122. chunk_num=cls.model.chunk_num + chunk_num,
  123. process_duation=cls.model.process_duation + duation).where(
  124. cls.model.id == doc_id).execute()
  125. if num == 0:
  126. raise LookupError(
  127. "Document not found which is supposed to be there")
  128. num = Knowledgebase.update(
  129. token_num=Knowledgebase.token_num +
  130. token_num,
  131. chunk_num=Knowledgebase.chunk_num +
  132. chunk_num).where(
  133. Knowledgebase.id == kb_id).execute()
  134. return num
  135. @classmethod
  136. @DB.connection_context()
  137. def get_tenant_id(cls, doc_id):
  138. docs = cls.model.select(
  139. Knowledgebase.tenant_id).join(
  140. Knowledgebase, on=(
  141. Knowledgebase.id == cls.model.kb_id)).where(
  142. cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
  143. docs = docs.dicts()
  144. if not docs:
  145. return
  146. return docs[0]["tenant_id"]
  147. @classmethod
  148. @DB.connection_context()
  149. def get_thumbnails(cls, docids):
  150. fields = [cls.model.id, cls.model.thumbnail]
  151. return list(cls.model.select(
  152. *fields).where(cls.model.id.in_(docids)).dicts())
  153. @classmethod
  154. @DB.connection_context()
  155. def update_parser_config(cls, id, config):
  156. e, d = cls.get_by_id(id)
  157. if not e:
  158. raise LookupError(f"Document({id}) not found.")
  159. def dfs_update(old, new):
  160. for k, v in new.items():
  161. if k not in old:
  162. old[k] = v
  163. continue
  164. if isinstance(v, dict):
  165. assert isinstance(old[k], dict)
  166. dfs_update(old[k], v)
  167. else:
  168. old[k] = v
  169. dfs_update(d.parser_config, config)
  170. cls.update_by_id(id, {"parser_config": d.parser_config})
  171. @classmethod
  172. @DB.connection_context()
  173. def get_doc_count(cls, tenant_id):
  174. docs = cls.model.select(cls.model.id).join(Knowledgebase,
  175. on=(Knowledgebase.id == cls.model.kb_id)).where(
  176. Knowledgebase.tenant_id == tenant_id)
  177. return len(docs)