Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

dialog_service.py 16KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import json
  18. import re
  19. from copy import deepcopy
  20. from api.db import LLMType, ParserType
  21. from api.db.db_models import Dialog, Conversation
  22. from api.db.services.common_service import CommonService
  23. from api.db.services.knowledgebase_service import KnowledgebaseService
  24. from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
  25. from api.settings import chat_logger, retrievaler, kg_retrievaler
  26. from rag.app.resume import forbidden_select_fields4resume
  27. from rag.nlp import keyword_extraction
  28. from rag.nlp.search import index_name
  29. from rag.utils import rmSpace, num_tokens_from_string, encoder
  30. from api.utils.file_utils import get_project_base_directory
  31. class DialogService(CommonService):
  32. model = Dialog
  33. class ConversationService(CommonService):
  34. model = Conversation
  35. def message_fit_in(msg, max_length=4000):
  36. def count():
  37. nonlocal msg
  38. tks_cnts = []
  39. for m in msg:
  40. tks_cnts.append(
  41. {"role": m["role"], "count": num_tokens_from_string(m["content"])})
  42. total = 0
  43. for m in tks_cnts:
  44. total += m["count"]
  45. return total
  46. c = count()
  47. if c < max_length:
  48. return c, msg
  49. msg_ = [m for m in msg[:-1] if m["role"] == "system"]
  50. msg_.append(msg[-1])
  51. msg = msg_
  52. c = count()
  53. if c < max_length:
  54. return c, msg
  55. ll = num_tokens_from_string(msg_[0]["content"])
  56. l = num_tokens_from_string(msg_[-1]["content"])
  57. if ll / (ll + l) > 0.8:
  58. m = msg_[0]["content"]
  59. m = encoder.decode(encoder.encode(m)[:max_length - l])
  60. msg[0]["content"] = m
  61. return max_length, msg
  62. m = msg_[1]["content"]
  63. m = encoder.decode(encoder.encode(m)[:max_length - l])
  64. msg[1]["content"] = m
  65. return max_length, msg
  66. def llm_id2llm_type(llm_id):
  67. fnm = os.path.join(get_project_base_directory(), "conf")
  68. llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
  69. for llm_factory in llm_factories["factory_llm_infos"]:
  70. for llm in llm_factory["llm"]:
  71. if llm_id == llm["llm_name"]:
  72. return llm["model_type"].strip(",")[-1]
  73. def chat(dialog, messages, stream=True, **kwargs):
  74. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  75. llm = LLMService.query(llm_name=dialog.llm_id)
  76. if not llm:
  77. llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
  78. if not llm:
  79. raise LookupError("LLM(%s) not found" % dialog.llm_id)
  80. max_tokens = 8192
  81. else:
  82. max_tokens = llm[0].max_tokens
  83. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  84. embd_nms = list(set([kb.embd_id for kb in kbs]))
  85. if len(embd_nms) != 1:
  86. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  87. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  88. is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
  89. retr = retrievaler if not is_kg else kg_retrievaler
  90. questions = [m["content"] for m in messages if m["role"] == "user"]
  91. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
  92. if llm_id2llm_type(dialog.llm_id) == "image2text":
  93. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  94. else:
  95. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  96. prompt_config = dialog.prompt_config
  97. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  98. # try to use sql if field mapping is good to go
  99. if field_map:
  100. chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
  101. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  102. if ans:
  103. yield ans
  104. return
  105. for p in prompt_config["parameters"]:
  106. if p["key"] == "knowledge":
  107. continue
  108. if p["key"] not in kwargs and not p["optional"]:
  109. raise KeyError("Miss parameter: " + p["key"])
  110. if p["key"] not in kwargs:
  111. prompt_config["system"] = prompt_config["system"].replace(
  112. "{%s}" % p["key"], " ")
  113. rerank_mdl = None
  114. if dialog.rerank_id:
  115. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  116. for _ in range(len(questions) // 2):
  117. questions.append(questions[-1])
  118. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  119. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  120. else:
  121. if prompt_config.get("keyword", False):
  122. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  123. kbinfos = retr.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
  124. dialog.similarity_threshold,
  125. dialog.vector_similarity_weight,
  126. doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
  127. top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
  128. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  129. #self-rag
  130. if dialog.prompt_config.get("self_rag") and not relevant(dialog.tenant_id, dialog.llm_id, questions[-1], knowledges):
  131. questions[-1] = rewrite(dialog.tenant_id, dialog.llm_id, questions[-1])
  132. kbinfos = retr.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
  133. dialog.similarity_threshold,
  134. dialog.vector_similarity_weight,
  135. doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
  136. top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
  137. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  138. chat_logger.info(
  139. "{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  140. if not knowledges and prompt_config.get("empty_response"):
  141. yield {"answer": prompt_config["empty_response"], "reference": kbinfos}
  142. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  143. kwargs["knowledge"] = "\n".join(knowledges)
  144. gen_conf = dialog.llm_setting
  145. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  146. msg.extend([{"role": m["role"], "content": m["content"]}
  147. for m in messages if m["role"] != "system"])
  148. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
  149. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  150. if "max_tokens" in gen_conf:
  151. gen_conf["max_tokens"] = min(
  152. gen_conf["max_tokens"],
  153. max_tokens - used_token_count)
  154. def decorate_answer(answer):
  155. nonlocal prompt_config, knowledges, kwargs, kbinfos
  156. refs = []
  157. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  158. answer, idx = retr.insert_citations(answer,
  159. [ck["content_ltks"]
  160. for ck in kbinfos["chunks"]],
  161. [ck["vector"]
  162. for ck in kbinfos["chunks"]],
  163. embd_mdl,
  164. tkweight=1 - dialog.vector_similarity_weight,
  165. vtweight=dialog.vector_similarity_weight)
  166. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  167. recall_docs = [
  168. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  169. if not recall_docs: recall_docs = kbinfos["doc_aggs"]
  170. kbinfos["doc_aggs"] = recall_docs
  171. refs = deepcopy(kbinfos)
  172. for c in refs["chunks"]:
  173. if c.get("vector"):
  174. del c["vector"]
  175. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  176. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  177. return {"answer": answer, "reference": refs}
  178. if stream:
  179. answer = ""
  180. for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], gen_conf):
  181. answer = ans
  182. yield {"answer": answer, "reference": {}}
  183. yield decorate_answer(answer)
  184. else:
  185. answer = chat_mdl.chat(
  186. msg[0]["content"], msg[1:], gen_conf)
  187. chat_logger.info("User: {}|Assistant: {}".format(
  188. msg[-1]["content"], answer))
  189. yield decorate_answer(answer)
  190. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  191. sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
  192. user_promt = """
  193. 表名:{};
  194. 数据库表字段说明如下:
  195. {}
  196. 问题如下:
  197. {}
  198. 请写出SQL, 且只要SQL,不要有其他说明及文字。
  199. """.format(
  200. index_name(tenant_id),
  201. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  202. question
  203. )
  204. tried_times = 0
  205. def get_table():
  206. nonlocal sys_prompt, user_promt, question, tried_times
  207. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
  208. "temperature": 0.06})
  209. print(user_promt, sql)
  210. chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
  211. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  212. sql = re.sub(r".*select ", "select ", sql.lower())
  213. sql = re.sub(r" +", " ", sql)
  214. sql = re.sub(r"([;;]|```).*", "", sql)
  215. if sql[:len("select ")] != "select ":
  216. return None, None
  217. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  218. if sql[:len("select *")] != "select *":
  219. sql = "select doc_id,docnm_kwd," + sql[6:]
  220. else:
  221. flds = []
  222. for k in field_map.keys():
  223. if k in forbidden_select_fields4resume:
  224. continue
  225. if len(flds) > 11:
  226. break
  227. flds.append(k)
  228. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  229. print(f"“{question}” get SQL(refined): {sql}")
  230. chat_logger.info(f"“{question}” get SQL(refined): {sql}")
  231. tried_times += 1
  232. return retrievaler.sql_retrieval(sql, format="json"), sql
  233. tbl, sql = get_table()
  234. if tbl is None:
  235. return None
  236. if tbl.get("error") and tried_times <= 2:
  237. user_promt = """
  238. 表名:{};
  239. 数据库表字段说明如下:
  240. {}
  241. 问题如下:
  242. {}
  243. 你上一次给出的错误SQL如下:
  244. {}
  245. 后台报错如下:
  246. {}
  247. 请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
  248. """.format(
  249. index_name(tenant_id),
  250. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  251. question, sql, tbl["error"]
  252. )
  253. tbl, sql = get_table()
  254. chat_logger.info("TRY it again: {}".format(sql))
  255. chat_logger.info("GET table: {}".format(tbl))
  256. print(tbl)
  257. if tbl.get("error") or len(tbl["rows"]) == 0:
  258. return None
  259. docid_idx = set([ii for ii, c in enumerate(
  260. tbl["columns"]) if c["name"] == "doc_id"])
  261. docnm_idx = set([ii for ii, c in enumerate(
  262. tbl["columns"]) if c["name"] == "docnm_kwd"])
  263. clmn_idx = [ii for ii in range(
  264. len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
  265. # compose markdown table
  266. clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
  267. tbl["columns"][i]["name"])) for i in
  268. clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  269. line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
  270. ("|------|" if docid_idx and docid_idx else "")
  271. rows = ["|" +
  272. "|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
  273. "|" for r in tbl["rows"]]
  274. if quota:
  275. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  276. else:
  277. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  278. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  279. if not docid_idx or not docnm_idx:
  280. chat_logger.warning("SQL missing field: " + sql)
  281. return {
  282. "answer": "\n".join([clmns, line, rows]),
  283. "reference": {"chunks": [], "doc_aggs": []}
  284. }
  285. docid_idx = list(docid_idx)[0]
  286. docnm_idx = list(docnm_idx)[0]
  287. doc_aggs = {}
  288. for r in tbl["rows"]:
  289. if r[docid_idx] not in doc_aggs:
  290. doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
  291. doc_aggs[r[docid_idx]]["count"] += 1
  292. return {
  293. "answer": "\n".join([clmns, line, rows]),
  294. "reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
  295. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
  296. doc_aggs.items()]}
  297. }
  298. def relevant(tenant_id, llm_id, question, contents: list):
  299. if llm_id2llm_type(llm_id) == "image2text":
  300. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  301. else:
  302. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  303. prompt = """
  304. You are a grader assessing relevance of a retrieved document to a user question.
  305. It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
  306. If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
  307. Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
  308. No other words needed except 'yes' or 'no'.
  309. """
  310. if not contents:return False
  311. contents = "Documents: \n" + " - ".join(contents)
  312. contents = f"Question: {question}\n" + contents
  313. if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
  314. contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
  315. ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
  316. if ans.lower().find("yes") >= 0: return True
  317. return False
  318. def rewrite(tenant_id, llm_id, question):
  319. if llm_id2llm_type(llm_id) == "image2text":
  320. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  321. else:
  322. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  323. prompt = """
  324. You are an expert at query expansion to generate a paraphrasing of a question.
  325. I can't retrieval relevant information from the knowledge base by using user's question directly.
  326. You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
  327. writing the abbreviation in its entirety, adding some extra descriptions or explanations,
  328. changing the way of expression, translating the original question into another language (English/Chinese), etc.
  329. And return 5 versions of question and one is from translation.
  330. Just list the question. No other words are needed.
  331. """
  332. ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
  333. return ans