You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

dialog_service.py 27KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import binascii
  17. import logging
  18. import re
  19. import time
  20. from copy import deepcopy
  21. from datetime import datetime
  22. from functools import partial
  23. from timeit import default_timer as timer
  24. from langfuse import Langfuse
  25. from agentic_reasoning import DeepResearcher
  26. from api import settings
  27. from api.db import LLMType, ParserType, StatusEnum
  28. from api.db.db_models import DB, Dialog
  29. from api.db.services.common_service import CommonService
  30. from api.db.services.knowledgebase_service import KnowledgebaseService
  31. from api.db.services.langfuse_service import TenantLangfuseService
  32. from api.db.services.llm_service import LLMBundle, TenantLLMService
  33. from api.utils import current_timestamp, datetime_format
  34. from rag.app.resume import forbidden_select_fields4resume
  35. from rag.app.tag import label_question
  36. from rag.nlp.search import index_name
  37. from rag.prompts import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, llm_id2llm_type, message_fit_in
  38. from rag.utils import num_tokens_from_string, rmSpace
  39. from rag.utils.tavily_conn import Tavily
  40. class DialogService(CommonService):
  41. model = Dialog
  42. @classmethod
  43. def save(cls, **kwargs):
  44. """Save a new record to database.
  45. This method creates a new record in the database with the provided field values,
  46. forcing an insert operation rather than an update.
  47. Args:
  48. **kwargs: Record field values as keyword arguments.
  49. Returns:
  50. Model instance: The created record object.
  51. """
  52. sample_obj = cls.model(**kwargs).save(force_insert=True)
  53. return sample_obj
  54. @classmethod
  55. def update_many_by_id(cls, data_list):
  56. """Update multiple records by their IDs.
  57. This method updates multiple records in the database, identified by their IDs.
  58. It automatically updates the update_time and update_date fields for each record.
  59. Args:
  60. data_list (list): List of dictionaries containing record data to update.
  61. Each dictionary must include an 'id' field.
  62. """
  63. with DB.atomic():
  64. for data in data_list:
  65. data["update_time"] = current_timestamp()
  66. data["update_date"] = datetime_format(datetime.now())
  67. cls.model.update(data).where(cls.model.id == data["id"]).execute()
  68. @classmethod
  69. @DB.connection_context()
  70. def get_list(cls, tenant_id, page_number, items_per_page, orderby, desc, id, name):
  71. chats = cls.model.select()
  72. if id:
  73. chats = chats.where(cls.model.id == id)
  74. if name:
  75. chats = chats.where(cls.model.name == name)
  76. chats = chats.where((cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value))
  77. if desc:
  78. chats = chats.order_by(cls.model.getter_by(orderby).desc())
  79. else:
  80. chats = chats.order_by(cls.model.getter_by(orderby).asc())
  81. chats = chats.paginate(page_number, items_per_page)
  82. return list(chats.dicts())
  83. def chat_solo(dialog, messages, stream=True):
  84. if llm_id2llm_type(dialog.llm_id) == "image2text":
  85. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  86. else:
  87. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  88. prompt_config = dialog.prompt_config
  89. tts_mdl = None
  90. if prompt_config.get("tts"):
  91. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  92. msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
  93. if stream:
  94. last_ans = ""
  95. delta_ans = ""
  96. for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
  97. answer = ans
  98. delta_ans = ans[len(last_ans) :]
  99. if num_tokens_from_string(delta_ans) < 16:
  100. continue
  101. last_ans = answer
  102. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
  103. delta_ans = ""
  104. if delta_ans:
  105. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
  106. else:
  107. answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
  108. user_content = msg[-1].get("content", "[content not available]")
  109. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  110. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
  111. def chat(dialog, messages, stream=True, **kwargs):
  112. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  113. if not dialog.kb_ids:
  114. for ans in chat_solo(dialog, messages, stream):
  115. yield ans
  116. return
  117. chat_start_ts = timer()
  118. if llm_id2llm_type(dialog.llm_id) == "image2text":
  119. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  120. else:
  121. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  122. max_tokens = llm_model_config.get("max_tokens", 8192)
  123. check_llm_ts = timer()
  124. langfuse_tracer = None
  125. langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=dialog.tenant_id)
  126. if langfuse_keys:
  127. langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
  128. if langfuse.auth_check():
  129. langfuse_tracer = langfuse
  130. langfuse.trace = langfuse_tracer.trace(name=f"{dialog.name}-{llm_model_config['llm_name']}")
  131. check_langfuse_tracer_ts = timer()
  132. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  133. embedding_list = list(set([kb.embd_id for kb in kbs]))
  134. if len(embedding_list) != 1:
  135. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  136. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  137. embedding_model_name = embedding_list[0]
  138. retriever = settings.retrievaler
  139. questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
  140. attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
  141. if "doc_ids" in messages[-1]:
  142. attachments = messages[-1]["doc_ids"]
  143. create_retriever_ts = timer()
  144. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
  145. if not embd_mdl:
  146. raise LookupError("Embedding model(%s) not found" % embedding_model_name)
  147. bind_embedding_ts = timer()
  148. if llm_id2llm_type(dialog.llm_id) == "image2text":
  149. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  150. else:
  151. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  152. toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
  153. if toolcall_session and tools:
  154. chat_mdl.bind_tools(toolcall_session, tools)
  155. bind_llm_ts = timer()
  156. prompt_config = dialog.prompt_config
  157. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  158. tts_mdl = None
  159. if prompt_config.get("tts"):
  160. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  161. # try to use sql if field mapping is good to go
  162. if field_map:
  163. logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
  164. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  165. if ans:
  166. yield ans
  167. return
  168. for p in prompt_config["parameters"]:
  169. if p["key"] == "knowledge":
  170. continue
  171. if p["key"] not in kwargs and not p["optional"]:
  172. raise KeyError("Miss parameter: " + p["key"])
  173. if p["key"] not in kwargs:
  174. prompt_config["system"] = prompt_config["system"].replace("{%s}" % p["key"], " ")
  175. if len(questions) > 1 and prompt_config.get("refine_multiturn"):
  176. questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
  177. else:
  178. questions = questions[-1:]
  179. if prompt_config.get("cross_languages"):
  180. questions = [cross_languages(dialog.tenant_id, dialog.llm_id, questions[0], prompt_config["cross_languages"])]
  181. refine_question_ts = timer()
  182. rerank_mdl = None
  183. if dialog.rerank_id:
  184. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  185. bind_reranker_ts = timer()
  186. generate_keyword_ts = bind_reranker_ts
  187. thought = ""
  188. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  189. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  190. knowledges = []
  191. else:
  192. if prompt_config.get("keyword", False):
  193. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  194. generate_keyword_ts = timer()
  195. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  196. knowledges = []
  197. if prompt_config.get("reasoning", False):
  198. reasoner = DeepResearcher(
  199. chat_mdl,
  200. prompt_config,
  201. partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3),
  202. )
  203. for think in reasoner.thinking(kbinfos, " ".join(questions)):
  204. if isinstance(think, str):
  205. thought = think
  206. knowledges = [t for t in think.split("\n") if t]
  207. elif stream:
  208. yield think
  209. else:
  210. kbinfos = retriever.retrieval(
  211. " ".join(questions),
  212. embd_mdl,
  213. tenant_ids,
  214. dialog.kb_ids,
  215. 1,
  216. dialog.top_n,
  217. dialog.similarity_threshold,
  218. dialog.vector_similarity_weight,
  219. doc_ids=attachments,
  220. top=dialog.top_k,
  221. aggs=False,
  222. rerank_mdl=rerank_mdl,
  223. rank_feature=label_question(" ".join(questions), kbs),
  224. )
  225. if prompt_config.get("tavily_api_key"):
  226. tav = Tavily(prompt_config["tavily_api_key"])
  227. tav_res = tav.retrieve_chunks(" ".join(questions))
  228. kbinfos["chunks"].extend(tav_res["chunks"])
  229. kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
  230. if prompt_config.get("use_kg"):
  231. ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, LLMBundle(dialog.tenant_id, LLMType.CHAT))
  232. if ck["content_with_weight"]:
  233. kbinfos["chunks"].insert(0, ck)
  234. knowledges = kb_prompt(kbinfos, max_tokens)
  235. logging.debug("{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  236. retrieval_ts = timer()
  237. if not knowledges and prompt_config.get("empty_response"):
  238. empty_res = prompt_config["empty_response"]
  239. yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
  240. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  241. kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
  242. gen_conf = dialog.llm_setting
  243. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  244. prompt4citation = ""
  245. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  246. prompt4citation = citation_prompt()
  247. msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"])
  248. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.95))
  249. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  250. prompt = msg[0]["content"]
  251. if "max_tokens" in gen_conf:
  252. gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
  253. def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
  254. max_index = len(kbinfos["chunks"])
  255. def safe_add(i):
  256. if 0 <= i < max_index:
  257. idx.add(i)
  258. return True
  259. return False
  260. def find_and_replace(pattern, group_index=1, repl=lambda i: f"##{i}$$", flags=0):
  261. nonlocal answer
  262. for match in re.finditer(pattern, answer, flags=flags):
  263. try:
  264. i = int(match.group(group_index))
  265. if safe_add(i):
  266. answer = answer.replace(match.group(0), repl(i))
  267. except Exception:
  268. continue
  269. find_and_replace(r"\(\s*ID:\s*(\d+)\s*\)") # (ID: 12)
  270. find_and_replace(r"ID[: ]+(\d+)") # ID: 12, ID 12
  271. find_and_replace(r"\$\$(\d+)\$\$") # $$12$$
  272. find_and_replace(r"\$\[(\d+)\]\$") # $[12]$
  273. find_and_replace(r"\$\$(\d+)\${2,}") # $$12$$$$
  274. find_and_replace(r"\$(\d+)\$") # $12$
  275. find_and_replace(r"(#{2,})(\d+)(\${2,})", group_index=2) # 2+ # and 2+ $
  276. find_and_replace(r"(#{2,})(\d+)(#{1,})", group_index=2) # 2+ # and 1+ #
  277. find_and_replace(r"##(\d+)#{2,}") # ##12###
  278. find_and_replace(r"【(\d+)】") # 【12】
  279. find_and_replace(r"ref\s*(\d+)", flags=re.IGNORECASE) # ref12, ref 12, REF 12
  280. return answer, idx
  281. def decorate_answer(answer):
  282. nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
  283. refs = []
  284. ans = answer.split("</think>")
  285. think = ""
  286. if len(ans) == 2:
  287. think = ans[0] + "</think>"
  288. answer = ans[1]
  289. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  290. answer = re.sub(r"##[ij]\$\$", "", answer, flags=re.DOTALL)
  291. idx = set([])
  292. if not re.search(r"##[0-9]+\$\$", answer):
  293. answer, idx = retriever.insert_citations(
  294. answer,
  295. [ck["content_ltks"] for ck in kbinfos["chunks"]],
  296. [ck["vector"] for ck in kbinfos["chunks"]],
  297. embd_mdl,
  298. tkweight=1 - dialog.vector_similarity_weight,
  299. vtweight=dialog.vector_similarity_weight,
  300. )
  301. else:
  302. for match in re.finditer(r"##([0-9]+)\$\$", answer):
  303. i = int(match.group(1))
  304. if i < len(kbinfos["chunks"]):
  305. idx.add(i)
  306. answer, idx = repair_bad_citation_formats(answer, kbinfos, idx)
  307. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  308. recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  309. if not recall_docs:
  310. recall_docs = kbinfos["doc_aggs"]
  311. kbinfos["doc_aggs"] = recall_docs
  312. refs = deepcopy(kbinfos)
  313. for c in refs["chunks"]:
  314. if c.get("vector"):
  315. del c["vector"]
  316. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  317. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  318. finish_chat_ts = timer()
  319. total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
  320. check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
  321. check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
  322. create_retriever_time_cost = (create_retriever_ts - check_langfuse_tracer_ts) * 1000
  323. bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
  324. bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
  325. refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
  326. bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
  327. generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
  328. retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
  329. generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
  330. tk_num = num_tokens_from_string(think + answer)
  331. prompt += "\n\n### Query:\n%s" % " ".join(questions)
  332. prompt = (
  333. f"{prompt}\n\n"
  334. "## Time elapsed:\n"
  335. f" - Total: {total_time_cost:.1f}ms\n"
  336. f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
  337. f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
  338. f" - Create retriever: {create_retriever_time_cost:.1f}ms\n"
  339. f" - Bind embedding: {bind_embedding_time_cost:.1f}ms\n"
  340. f" - Bind LLM: {bind_llm_time_cost:.1f}ms\n"
  341. f" - Multi-turn optimization: {refine_question_time_cost:.1f}ms\n"
  342. f" - Bind reranker: {bind_reranker_time_cost:.1f}ms\n"
  343. f" - Generate keyword: {generate_keyword_time_cost:.1f}ms\n"
  344. f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
  345. f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
  346. "## Token usage:\n"
  347. f" - Generated tokens(approximately): {tk_num}\n"
  348. f" - Token speed: {int(tk_num / (generate_result_time_cost / 1000.0))}/s"
  349. )
  350. langfuse_output = "\n" + re.sub(r"^.*?(### Query:.*)", r"\1", prompt, flags=re.DOTALL)
  351. langfuse_output = {"time_elapsed:": re.sub(r"\n", " \n", langfuse_output), "created_at": time.time()}
  352. # Add a condition check to call the end method only if langfuse_tracer exists
  353. if langfuse_tracer and "langfuse_generation" in locals():
  354. langfuse_generation.end(output=langfuse_output)
  355. return {"answer": think + answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
  356. if langfuse_tracer:
  357. langfuse_generation = langfuse_tracer.trace.generation(name="chat", model=llm_model_config["llm_name"], input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg})
  358. if stream:
  359. last_ans = ""
  360. answer = ""
  361. for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
  362. if thought:
  363. ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
  364. answer = ans
  365. delta_ans = ans[len(last_ans) :]
  366. if num_tokens_from_string(delta_ans) < 16:
  367. continue
  368. last_ans = answer
  369. yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  370. delta_ans = answer[len(last_ans) :]
  371. if delta_ans:
  372. yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  373. yield decorate_answer(thought + answer)
  374. else:
  375. answer = chat_mdl.chat(prompt + prompt4citation, msg[1:], gen_conf)
  376. user_content = msg[-1].get("content", "[content not available]")
  377. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  378. res = decorate_answer(answer)
  379. res["audio_binary"] = tts(tts_mdl, answer)
  380. yield res
  381. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  382. sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
  383. user_prompt = """
  384. Table name: {};
  385. Table of database fields are as follows:
  386. {}
  387. Question are as follows:
  388. {}
  389. Please write the SQL, only SQL, without any other explanations or text.
  390. """.format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question)
  391. tried_times = 0
  392. def get_table():
  393. nonlocal sys_prompt, user_prompt, question, tried_times
  394. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
  395. sql = re.sub(r"^.*</think>", "", sql, flags=re.DOTALL)
  396. logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
  397. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  398. sql = re.sub(r".*select ", "select ", sql.lower())
  399. sql = re.sub(r" +", " ", sql)
  400. sql = re.sub(r"([;;]|```).*", "", sql)
  401. if sql[: len("select ")] != "select ":
  402. return None, None
  403. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  404. if sql[: len("select *")] != "select *":
  405. sql = "select doc_id,docnm_kwd," + sql[6:]
  406. else:
  407. flds = []
  408. for k in field_map.keys():
  409. if k in forbidden_select_fields4resume:
  410. continue
  411. if len(flds) > 11:
  412. break
  413. flds.append(k)
  414. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  415. logging.debug(f"{question} get SQL(refined): {sql}")
  416. tried_times += 1
  417. return settings.retrievaler.sql_retrieval(sql, format="json"), sql
  418. tbl, sql = get_table()
  419. if tbl is None:
  420. return None
  421. if tbl.get("error") and tried_times <= 2:
  422. user_prompt = """
  423. Table name: {};
  424. Table of database fields are as follows:
  425. {}
  426. Question are as follows:
  427. {}
  428. Please write the SQL, only SQL, without any other explanations or text.
  429. The SQL error you provided last time is as follows:
  430. {}
  431. Error issued by database as follows:
  432. {}
  433. Please correct the error and write SQL again, only SQL, without any other explanations or text.
  434. """.format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question, sql, tbl["error"])
  435. tbl, sql = get_table()
  436. logging.debug("TRY it again: {}".format(sql))
  437. logging.debug("GET table: {}".format(tbl))
  438. if tbl.get("error") or len(tbl["rows"]) == 0:
  439. return None
  440. docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
  441. doc_name_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
  442. column_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
  443. # compose Markdown table
  444. columns = (
  445. "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  446. )
  447. line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
  448. rows = ["|" + "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") + "|" for r in tbl["rows"]]
  449. rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
  450. if quota:
  451. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  452. else:
  453. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  454. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  455. if not docid_idx or not doc_name_idx:
  456. logging.warning("SQL missing field: " + sql)
  457. return {"answer": "\n".join([columns, line, rows]), "reference": {"chunks": [], "doc_aggs": []}, "prompt": sys_prompt}
  458. docid_idx = list(docid_idx)[0]
  459. doc_name_idx = list(doc_name_idx)[0]
  460. doc_aggs = {}
  461. for r in tbl["rows"]:
  462. if r[docid_idx] not in doc_aggs:
  463. doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
  464. doc_aggs[r[docid_idx]]["count"] += 1
  465. return {
  466. "answer": "\n".join([columns, line, rows]),
  467. "reference": {
  468. "chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
  469. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()],
  470. },
  471. "prompt": sys_prompt,
  472. }
  473. def tts(tts_mdl, text):
  474. if not tts_mdl or not text:
  475. return
  476. bin = b""
  477. for chunk in tts_mdl.tts(text):
  478. bin += chunk
  479. return binascii.hexlify(bin).decode("utf-8")
  480. def ask(question, kb_ids, tenant_id):
  481. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  482. embedding_list = list(set([kb.embd_id for kb in kbs]))
  483. is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
  484. retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
  485. embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
  486. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  487. max_tokens = chat_mdl.max_length
  488. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  489. kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False, rank_feature=label_question(question, kbs))
  490. knowledges = kb_prompt(kbinfos, max_tokens)
  491. prompt = """
  492. Role: You're a smart assistant. Your name is Miss R.
  493. Task: Summarize the information from knowledge bases and answer user's question.
  494. Requirements and restriction:
  495. - DO NOT make things up, especially for numbers.
  496. - If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
  497. - Answer with markdown format text.
  498. - Answer in language of user's question.
  499. - DO NOT make things up, especially for numbers.
  500. ### Information from knowledge bases
  501. %s
  502. The above is information from knowledge bases.
  503. """ % "\n".join(knowledges)
  504. msg = [{"role": "user", "content": question}]
  505. def decorate_answer(answer):
  506. nonlocal knowledges, kbinfos, prompt
  507. answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], embd_mdl, tkweight=0.7, vtweight=0.3)
  508. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  509. recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  510. if not recall_docs:
  511. recall_docs = kbinfos["doc_aggs"]
  512. kbinfos["doc_aggs"] = recall_docs
  513. refs = deepcopy(kbinfos)
  514. for c in refs["chunks"]:
  515. if c.get("vector"):
  516. del c["vector"]
  517. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  518. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  519. refs["chunks"] = chunks_format(refs)
  520. return {"answer": answer, "reference": refs}
  521. answer = ""
  522. for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
  523. answer = ans
  524. yield {"answer": answer, "reference": {}}
  525. yield decorate_answer(answer)