Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

dialog_service.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. from copy import deepcopy
  18. from api.db import LLMType
  19. from api.db.db_models import Dialog, Conversation
  20. from api.db.services.common_service import CommonService
  21. from api.db.services.knowledgebase_service import KnowledgebaseService
  22. from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
  23. from api.settings import chat_logger, retrievaler
  24. from rag.app.resume import forbidden_select_fields4resume
  25. from rag.nlp.search import index_name
  26. from rag.utils import rmSpace, num_tokens_from_string, encoder
  27. class DialogService(CommonService):
  28. model = Dialog
  29. class ConversationService(CommonService):
  30. model = Conversation
  31. def message_fit_in(msg, max_length=4000):
  32. def count():
  33. nonlocal msg
  34. tks_cnts = []
  35. for m in msg:
  36. tks_cnts.append(
  37. {"role": m["role"], "count": num_tokens_from_string(m["content"])})
  38. total = 0
  39. for m in tks_cnts:
  40. total += m["count"]
  41. return total
  42. c = count()
  43. if c < max_length:
  44. return c, msg
  45. msg_ = [m for m in msg[:-1] if m["role"] == "system"]
  46. msg_.append(msg[-1])
  47. msg = msg_
  48. c = count()
  49. if c < max_length:
  50. return c, msg
  51. ll = num_tokens_from_string(msg_[0]["content"])
  52. l = num_tokens_from_string(msg_[-1]["content"])
  53. if ll / (ll + l) > 0.8:
  54. m = msg_[0]["content"]
  55. m = encoder.decode(encoder.encode(m)[:max_length - l])
  56. msg[0]["content"] = m
  57. return max_length, msg
  58. m = msg_[1]["content"]
  59. m = encoder.decode(encoder.encode(m)[:max_length - l])
  60. msg[1]["content"] = m
  61. return max_length, msg
  62. def chat(dialog, messages, stream=True, **kwargs):
  63. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  64. llm = LLMService.query(llm_name=dialog.llm_id)
  65. if not llm:
  66. llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
  67. if not llm:
  68. raise LookupError("LLM(%s) not found" % dialog.llm_id)
  69. max_tokens = 1024
  70. else: max_tokens = llm[0].max_tokens
  71. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  72. embd_nms = list(set([kb.embd_id for kb in kbs]))
  73. if len(embd_nms) != 1:
  74. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  75. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  76. questions = [m["content"] for m in messages if m["role"] == "user"]
  77. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
  78. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  79. prompt_config = dialog.prompt_config
  80. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  81. # try to use sql if field mapping is good to go
  82. if field_map:
  83. chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
  84. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  85. if ans:
  86. yield ans
  87. return
  88. for p in prompt_config["parameters"]:
  89. if p["key"] == "knowledge":
  90. continue
  91. if p["key"] not in kwargs and not p["optional"]:
  92. raise KeyError("Miss parameter: " + p["key"])
  93. if p["key"] not in kwargs:
  94. prompt_config["system"] = prompt_config["system"].replace(
  95. "{%s}" % p["key"], " ")
  96. for _ in range(len(questions) // 2):
  97. questions.append(questions[-1])
  98. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  99. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  100. else:
  101. rerank_mdl = None
  102. if dialog.rerank_id:
  103. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  104. kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
  105. dialog.similarity_threshold,
  106. dialog.vector_similarity_weight,
  107. doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
  108. top=1024, aggs=False, rerank_mdl=rerank_mdl)
  109. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  110. chat_logger.info(
  111. "{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  112. if not knowledges and prompt_config.get("empty_response"):
  113. yield {"answer": prompt_config["empty_response"], "reference": kbinfos}
  114. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  115. kwargs["knowledge"] = "\n".join(knowledges)
  116. gen_conf = dialog.llm_setting
  117. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  118. msg.extend([{"role": m["role"], "content": m["content"]}
  119. for m in messages if m["role"] != "system"])
  120. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
  121. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  122. if "max_tokens" in gen_conf:
  123. gen_conf["max_tokens"] = min(
  124. gen_conf["max_tokens"],
  125. max_tokens - used_token_count)
  126. def decorate_answer(answer):
  127. nonlocal prompt_config, knowledges, kwargs, kbinfos
  128. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  129. answer, idx = retrievaler.insert_citations(answer,
  130. [ck["content_ltks"]
  131. for ck in kbinfos["chunks"]],
  132. [ck["vector"]
  133. for ck in kbinfos["chunks"]],
  134. embd_mdl,
  135. tkweight=1 - dialog.vector_similarity_weight,
  136. vtweight=dialog.vector_similarity_weight)
  137. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  138. recall_docs = [
  139. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  140. if not recall_docs: recall_docs = kbinfos["doc_aggs"]
  141. kbinfos["doc_aggs"] = recall_docs
  142. refs = deepcopy(kbinfos)
  143. for c in refs["chunks"]:
  144. if c.get("vector"):
  145. del c["vector"]
  146. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api")>=0:
  147. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  148. return {"answer": answer, "reference": refs}
  149. if stream:
  150. answer = ""
  151. for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], gen_conf):
  152. answer = ans
  153. yield {"answer": answer, "reference": {}}
  154. yield decorate_answer(answer)
  155. else:
  156. answer = chat_mdl.chat(
  157. msg[0]["content"], msg[1:], gen_conf)
  158. chat_logger.info("User: {}|Assistant: {}".format(
  159. msg[-1]["content"], answer))
  160. yield decorate_answer(answer)
  161. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  162. sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
  163. user_promt = """
  164. 表名:{};
  165. 数据库表字段说明如下:
  166. {}
  167. 问题如下:
  168. {}
  169. 请写出SQL, 且只要SQL,不要有其他说明及文字。
  170. """.format(
  171. index_name(tenant_id),
  172. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  173. question
  174. )
  175. tried_times = 0
  176. def get_table():
  177. nonlocal sys_prompt, user_promt, question, tried_times
  178. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
  179. "temperature": 0.06})
  180. print(user_promt, sql)
  181. chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
  182. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  183. sql = re.sub(r".*select ", "select ", sql.lower())
  184. sql = re.sub(r" +", " ", sql)
  185. sql = re.sub(r"([;;]|```).*", "", sql)
  186. if sql[:len("select ")] != "select ":
  187. return None, None
  188. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  189. if sql[:len("select *")] != "select *":
  190. sql = "select doc_id,docnm_kwd," + sql[6:]
  191. else:
  192. flds = []
  193. for k in field_map.keys():
  194. if k in forbidden_select_fields4resume:
  195. continue
  196. if len(flds) > 11:
  197. break
  198. flds.append(k)
  199. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  200. print(f"“{question}” get SQL(refined): {sql}")
  201. chat_logger.info(f"“{question}” get SQL(refined): {sql}")
  202. tried_times += 1
  203. return retrievaler.sql_retrieval(sql, format="json"), sql
  204. tbl, sql = get_table()
  205. if tbl is None:
  206. return None
  207. if tbl.get("error") and tried_times <= 2:
  208. user_promt = """
  209. 表名:{};
  210. 数据库表字段说明如下:
  211. {}
  212. 问题如下:
  213. {}
  214. 你上一次给出的错误SQL如下:
  215. {}
  216. 后台报错如下:
  217. {}
  218. 请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
  219. """.format(
  220. index_name(tenant_id),
  221. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  222. question, sql, tbl["error"]
  223. )
  224. tbl, sql = get_table()
  225. chat_logger.info("TRY it again: {}".format(sql))
  226. chat_logger.info("GET table: {}".format(tbl))
  227. print(tbl)
  228. if tbl.get("error") or len(tbl["rows"]) == 0:
  229. return None
  230. docid_idx = set([ii for ii, c in enumerate(
  231. tbl["columns"]) if c["name"] == "doc_id"])
  232. docnm_idx = set([ii for ii, c in enumerate(
  233. tbl["columns"]) if c["name"] == "docnm_kwd"])
  234. clmn_idx = [ii for ii in range(
  235. len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
  236. # compose markdown table
  237. clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
  238. tbl["columns"][i]["name"])) for i in clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  239. line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
  240. ("|------|" if docid_idx and docid_idx else "")
  241. rows = ["|" +
  242. "|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
  243. "|" for r in tbl["rows"]]
  244. if quota:
  245. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  246. else: rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  247. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  248. if not docid_idx or not docnm_idx:
  249. chat_logger.warning("SQL missing field: " + sql)
  250. return {
  251. "answer": "\n".join([clmns, line, rows]),
  252. "reference": {"chunks": [], "doc_aggs": []}
  253. }
  254. docid_idx = list(docid_idx)[0]
  255. docnm_idx = list(docnm_idx)[0]
  256. doc_aggs = {}
  257. for r in tbl["rows"]:
  258. if r[docid_idx] not in doc_aggs:
  259. doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
  260. doc_aggs[r[docid_idx]]["count"] += 1
  261. return {
  262. "answer": "\n".join([clmns, line, rows]),
  263. "reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
  264. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()]}
  265. }