Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

dialog_service.py 46KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import binascii
  18. import os
  19. import json
  20. import time
  21. import json_repair
  22. import re
  23. from collections import defaultdict
  24. from copy import deepcopy
  25. from timeit import default_timer as timer
  26. import datetime
  27. from datetime import timedelta
  28. from api.db import LLMType, ParserType, StatusEnum
  29. from api.db.db_models import Dialog, DB
  30. from api.db.services.common_service import CommonService
  31. from api.db.services.document_service import DocumentService
  32. from api.db.services.knowledgebase_service import KnowledgebaseService
  33. from api.db.services.llm_service import TenantLLMService, LLMBundle
  34. from api import settings
  35. from graphrag.utils import get_tags_from_cache, set_tags_to_cache
  36. from rag.app.resume import forbidden_select_fields4resume
  37. from rag.nlp import extract_between
  38. from rag.nlp.search import index_name
  39. from rag.settings import TAG_FLD
  40. from rag.utils import rmSpace, num_tokens_from_string, encoder
  41. from api.utils.file_utils import get_project_base_directory
  42. from rag.utils.tavily_conn import Tavily
  43. class DialogService(CommonService):
  44. model = Dialog
  45. @classmethod
  46. @DB.connection_context()
  47. def get_list(cls, tenant_id,
  48. page_number, items_per_page, orderby, desc, id, name):
  49. chats = cls.model.select()
  50. if id:
  51. chats = chats.where(cls.model.id == id)
  52. if name:
  53. chats = chats.where(cls.model.name == name)
  54. chats = chats.where(
  55. (cls.model.tenant_id == tenant_id)
  56. & (cls.model.status == StatusEnum.VALID.value)
  57. )
  58. if desc:
  59. chats = chats.order_by(cls.model.getter_by(orderby).desc())
  60. else:
  61. chats = chats.order_by(cls.model.getter_by(orderby).asc())
  62. chats = chats.paginate(page_number, items_per_page)
  63. return list(chats.dicts())
  64. def message_fit_in(msg, max_length=4000):
  65. def count():
  66. nonlocal msg
  67. tks_cnts = []
  68. for m in msg:
  69. tks_cnts.append(
  70. {"role": m["role"], "count": num_tokens_from_string(m["content"])})
  71. total = 0
  72. for m in tks_cnts:
  73. total += m["count"]
  74. return total
  75. c = count()
  76. if c < max_length:
  77. return c, msg
  78. msg_ = [m for m in msg[:-1] if m["role"] == "system"]
  79. if len(msg) > 1:
  80. msg_.append(msg[-1])
  81. msg = msg_
  82. c = count()
  83. if c < max_length:
  84. return c, msg
  85. ll = num_tokens_from_string(msg_[0]["content"])
  86. ll2 = num_tokens_from_string(msg_[-1]["content"])
  87. if ll / (ll + ll2) > 0.8:
  88. m = msg_[0]["content"]
  89. m = encoder.decode(encoder.encode(m)[:max_length - ll2])
  90. msg[0]["content"] = m
  91. return max_length, msg
  92. m = msg_[1]["content"]
  93. m = encoder.decode(encoder.encode(m)[:max_length - ll2])
  94. msg[1]["content"] = m
  95. return max_length, msg
  96. def llm_id2llm_type(llm_id):
  97. llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
  98. fnm = os.path.join(get_project_base_directory(), "conf")
  99. llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
  100. for llm_factory in llm_factories["factory_llm_infos"]:
  101. for llm in llm_factory["llm"]:
  102. if llm_id == llm["llm_name"]:
  103. return llm["model_type"].strip(",")[-1]
  104. def kb_prompt(kbinfos, max_tokens):
  105. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  106. used_token_count = 0
  107. chunks_num = 0
  108. for i, c in enumerate(knowledges):
  109. used_token_count += num_tokens_from_string(c)
  110. chunks_num += 1
  111. if max_tokens * 0.97 < used_token_count:
  112. knowledges = knowledges[:i]
  113. logging.warning(f"Not all the retrieval into prompt: {i+1}/{len(knowledges)}")
  114. break
  115. docs = DocumentService.get_by_ids([ck["doc_id"] for ck in kbinfos["chunks"][:chunks_num]])
  116. docs = {d.id: d.meta_fields for d in docs}
  117. doc2chunks = defaultdict(lambda: {"chunks": [], "meta": []})
  118. for ck in kbinfos["chunks"][:chunks_num]:
  119. doc2chunks[ck["docnm_kwd"]]["chunks"].append((f"URL: {ck['url']}\n" if "url" in ck else "") + ck["content_with_weight"])
  120. doc2chunks[ck["docnm_kwd"]]["meta"] = docs.get(ck["doc_id"], {})
  121. knowledges = []
  122. for nm, cks_meta in doc2chunks.items():
  123. txt = f"Document: {nm} \n"
  124. for k, v in cks_meta["meta"].items():
  125. txt += f"{k}: {v}\n"
  126. txt += "Relevant fragments as following:\n"
  127. for i, chunk in enumerate(cks_meta["chunks"], 1):
  128. txt += f"{i}. {chunk}\n"
  129. knowledges.append(txt)
  130. return knowledges
  131. def label_question(question, kbs):
  132. tags = None
  133. tag_kb_ids = []
  134. for kb in kbs:
  135. if kb.parser_config.get("tag_kb_ids"):
  136. tag_kb_ids.extend(kb.parser_config["tag_kb_ids"])
  137. if tag_kb_ids:
  138. all_tags = get_tags_from_cache(tag_kb_ids)
  139. if not all_tags:
  140. all_tags = settings.retrievaler.all_tags_in_portion(kb.tenant_id, tag_kb_ids)
  141. set_tags_to_cache(all_tags, tag_kb_ids)
  142. else:
  143. all_tags = json.loads(all_tags)
  144. tag_kbs = KnowledgebaseService.get_by_ids(tag_kb_ids)
  145. tags = settings.retrievaler.tag_query(question,
  146. list(set([kb.tenant_id for kb in tag_kbs])),
  147. tag_kb_ids,
  148. all_tags,
  149. kb.parser_config.get("topn_tags", 3)
  150. )
  151. return tags
  152. def chat_solo(dialog, messages, stream=True):
  153. if llm_id2llm_type(dialog.llm_id) == "image2text":
  154. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  155. else:
  156. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  157. prompt_config = dialog.prompt_config
  158. tts_mdl = None
  159. if prompt_config.get("tts"):
  160. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  161. msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
  162. for m in messages if m["role"] != "system"]
  163. if stream:
  164. last_ans = ""
  165. for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
  166. answer = ans
  167. delta_ans = ans[len(last_ans):]
  168. if num_tokens_from_string(delta_ans) < 16:
  169. continue
  170. last_ans = answer
  171. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt":"", "created_at": time.time()}
  172. else:
  173. answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
  174. user_content = msg[-1].get("content", "[content not available]")
  175. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  176. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
  177. def chat(dialog, messages, stream=True, **kwargs):
  178. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  179. if not dialog.kb_ids:
  180. for ans in chat_solo(dialog, messages, stream):
  181. yield ans
  182. return
  183. chat_start_ts = timer()
  184. if llm_id2llm_type(dialog.llm_id) == "image2text":
  185. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  186. else:
  187. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  188. max_tokens = llm_model_config.get("max_tokens", 8192)
  189. check_llm_ts = timer()
  190. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  191. embedding_list = list(set([kb.embd_id for kb in kbs]))
  192. if len(embedding_list) != 1:
  193. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  194. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  195. embedding_model_name = embedding_list[0]
  196. retriever = settings.retrievaler
  197. questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
  198. attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
  199. if "doc_ids" in messages[-1]:
  200. attachments = messages[-1]["doc_ids"]
  201. create_retriever_ts = timer()
  202. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
  203. if not embd_mdl:
  204. raise LookupError("Embedding model(%s) not found" % embedding_model_name)
  205. bind_embedding_ts = timer()
  206. if llm_id2llm_type(dialog.llm_id) == "image2text":
  207. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  208. else:
  209. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  210. bind_llm_ts = timer()
  211. prompt_config = dialog.prompt_config
  212. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  213. tts_mdl = None
  214. if prompt_config.get("tts"):
  215. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  216. # try to use sql if field mapping is good to go
  217. if field_map:
  218. logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
  219. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  220. if ans:
  221. yield ans
  222. return
  223. for p in prompt_config["parameters"]:
  224. if p["key"] == "knowledge":
  225. continue
  226. if p["key"] not in kwargs and not p["optional"]:
  227. raise KeyError("Miss parameter: " + p["key"])
  228. if p["key"] not in kwargs:
  229. prompt_config["system"] = prompt_config["system"].replace(
  230. "{%s}" % p["key"], " ")
  231. if len(questions) > 1 and prompt_config.get("refine_multiturn"):
  232. questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
  233. else:
  234. questions = questions[-1:]
  235. refine_question_ts = timer()
  236. rerank_mdl = None
  237. if dialog.rerank_id:
  238. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  239. bind_reranker_ts = timer()
  240. generate_keyword_ts = bind_reranker_ts
  241. thought = ""
  242. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  243. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  244. knowledges = []
  245. else:
  246. if prompt_config.get("keyword", False):
  247. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  248. generate_keyword_ts = timer()
  249. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  250. knowledges = []
  251. if prompt_config.get("reasoning", False):
  252. for think in reasoning(kbinfos, " ".join(questions), chat_mdl, embd_mdl, tenant_ids, dialog.kb_ids, prompt_config, MAX_SEARCH_LIMIT=3):
  253. if isinstance(think, str):
  254. thought = think
  255. knowledges = [t for t in think.split("\n") if t]
  256. else:
  257. yield think
  258. else:
  259. kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
  260. dialog.similarity_threshold,
  261. dialog.vector_similarity_weight,
  262. doc_ids=attachments,
  263. top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
  264. rank_feature=label_question(" ".join(questions), kbs)
  265. )
  266. if prompt_config.get("tavily_api_key"):
  267. tav = Tavily(prompt_config["tavily_api_key"])
  268. tav_res = tav.retrieve_chunks(" ".join(questions))
  269. kbinfos["chunks"].extend(tav_res["chunks"])
  270. kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
  271. if prompt_config.get("use_kg"):
  272. ck = settings.kg_retrievaler.retrieval(" ".join(questions),
  273. tenant_ids,
  274. dialog.kb_ids,
  275. embd_mdl,
  276. LLMBundle(dialog.tenant_id, LLMType.CHAT))
  277. if ck["content_with_weight"]:
  278. kbinfos["chunks"].insert(0, ck)
  279. knowledges = kb_prompt(kbinfos, max_tokens)
  280. logging.debug(
  281. "{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  282. retrieval_ts = timer()
  283. if not knowledges and prompt_config.get("empty_response"):
  284. empty_res = prompt_config["empty_response"]
  285. yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
  286. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  287. kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
  288. gen_conf = dialog.llm_setting
  289. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  290. msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
  291. for m in messages if m["role"] != "system"])
  292. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
  293. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  294. prompt = msg[0]["content"]
  295. prompt += "\n\n### Query:\n%s" % " ".join(questions)
  296. if "max_tokens" in gen_conf:
  297. gen_conf["max_tokens"] = min(
  298. gen_conf["max_tokens"],
  299. max_tokens - used_token_count)
  300. def decorate_answer(answer):
  301. nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
  302. refs = []
  303. ans = answer.split("</think>")
  304. think = ""
  305. if len(ans) == 2:
  306. think = ans[0] + "</think>"
  307. answer = ans[1]
  308. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  309. answer, idx = retriever.insert_citations(answer,
  310. [ck["content_ltks"]
  311. for ck in kbinfos["chunks"]],
  312. [ck["vector"]
  313. for ck in kbinfos["chunks"]],
  314. embd_mdl,
  315. tkweight=1 - dialog.vector_similarity_weight,
  316. vtweight=dialog.vector_similarity_weight)
  317. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  318. recall_docs = [
  319. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  320. if not recall_docs:
  321. recall_docs = kbinfos["doc_aggs"]
  322. kbinfos["doc_aggs"] = recall_docs
  323. refs = deepcopy(kbinfos)
  324. for c in refs["chunks"]:
  325. if c.get("vector"):
  326. del c["vector"]
  327. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  328. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  329. finish_chat_ts = timer()
  330. total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
  331. check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
  332. create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
  333. bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
  334. bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
  335. refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
  336. bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
  337. generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
  338. retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
  339. generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
  340. prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
  341. return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
  342. if stream:
  343. last_ans = ""
  344. answer = ""
  345. for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
  346. if thought:
  347. ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
  348. answer = ans
  349. delta_ans = ans[len(last_ans):]
  350. if num_tokens_from_string(delta_ans) < 16:
  351. continue
  352. last_ans = answer
  353. yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  354. delta_ans = answer[len(last_ans):]
  355. if delta_ans:
  356. yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  357. yield decorate_answer(thought+answer)
  358. else:
  359. answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
  360. user_content = msg[-1].get("content", "[content not available]")
  361. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  362. res = decorate_answer(answer)
  363. res["audio_binary"] = tts(tts_mdl, answer)
  364. yield res
  365. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  366. sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
  367. user_prompt = """
  368. Table name: {};
  369. Table of database fields are as follows:
  370. {}
  371. Question are as follows:
  372. {}
  373. Please write the SQL, only SQL, without any other explanations or text.
  374. """.format(
  375. index_name(tenant_id),
  376. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  377. question
  378. )
  379. tried_times = 0
  380. def get_table():
  381. nonlocal sys_prompt, user_prompt, question, tried_times
  382. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
  383. "temperature": 0.06})
  384. logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
  385. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  386. sql = re.sub(r".*select ", "select ", sql.lower())
  387. sql = re.sub(r" +", " ", sql)
  388. sql = re.sub(r"([;;]|```).*", "", sql)
  389. if sql[:len("select ")] != "select ":
  390. return None, None
  391. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  392. if sql[:len("select *")] != "select *":
  393. sql = "select doc_id,docnm_kwd," + sql[6:]
  394. else:
  395. flds = []
  396. for k in field_map.keys():
  397. if k in forbidden_select_fields4resume:
  398. continue
  399. if len(flds) > 11:
  400. break
  401. flds.append(k)
  402. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  403. logging.debug(f"{question} get SQL(refined): {sql}")
  404. tried_times += 1
  405. return settings.retrievaler.sql_retrieval(sql, format="json"), sql
  406. tbl, sql = get_table()
  407. if tbl is None:
  408. return None
  409. if tbl.get("error") and tried_times <= 2:
  410. user_prompt = """
  411. Table name: {};
  412. Table of database fields are as follows:
  413. {}
  414. Question are as follows:
  415. {}
  416. Please write the SQL, only SQL, without any other explanations or text.
  417. The SQL error you provided last time is as follows:
  418. {}
  419. Error issued by database as follows:
  420. {}
  421. Please correct the error and write SQL again, only SQL, without any other explanations or text.
  422. """.format(
  423. index_name(tenant_id),
  424. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  425. question, sql, tbl["error"]
  426. )
  427. tbl, sql = get_table()
  428. logging.debug("TRY it again: {}".format(sql))
  429. logging.debug("GET table: {}".format(tbl))
  430. if tbl.get("error") or len(tbl["rows"]) == 0:
  431. return None
  432. docid_idx = set([ii for ii, c in enumerate(
  433. tbl["columns"]) if c["name"] == "doc_id"])
  434. doc_name_idx = set([ii for ii, c in enumerate(
  435. tbl["columns"]) if c["name"] == "docnm_kwd"])
  436. column_idx = [ii for ii in range(
  437. len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
  438. # compose Markdown table
  439. columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
  440. tbl["columns"][i]["name"])) for i in
  441. column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  442. line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
  443. ("|------|" if docid_idx and docid_idx else "")
  444. rows = ["|" +
  445. "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
  446. "|" for r in tbl["rows"]]
  447. rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
  448. if quota:
  449. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  450. else:
  451. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  452. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  453. if not docid_idx or not doc_name_idx:
  454. logging.warning("SQL missing field: " + sql)
  455. return {
  456. "answer": "\n".join([columns, line, rows]),
  457. "reference": {"chunks": [], "doc_aggs": []},
  458. "prompt": sys_prompt
  459. }
  460. docid_idx = list(docid_idx)[0]
  461. doc_name_idx = list(doc_name_idx)[0]
  462. doc_aggs = {}
  463. for r in tbl["rows"]:
  464. if r[docid_idx] not in doc_aggs:
  465. doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
  466. doc_aggs[r[docid_idx]]["count"] += 1
  467. return {
  468. "answer": "\n".join([columns, line, rows]),
  469. "reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
  470. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
  471. doc_aggs.items()]},
  472. "prompt": sys_prompt
  473. }
  474. def relevant(tenant_id, llm_id, question, contents: list):
  475. if llm_id2llm_type(llm_id) == "image2text":
  476. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  477. else:
  478. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  479. prompt = """
  480. You are a grader assessing relevance of a retrieved document to a user question.
  481. It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
  482. If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
  483. Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
  484. No other words needed except 'yes' or 'no'.
  485. """
  486. if not contents:
  487. return False
  488. contents = "Documents: \n" + " - ".join(contents)
  489. contents = f"Question: {question}\n" + contents
  490. if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
  491. contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
  492. ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
  493. if ans.lower().find("yes") >= 0:
  494. return True
  495. return False
  496. def rewrite(tenant_id, llm_id, question):
  497. if llm_id2llm_type(llm_id) == "image2text":
  498. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  499. else:
  500. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  501. prompt = """
  502. You are an expert at query expansion to generate a paraphrasing of a question.
  503. I can't retrieval relevant information from the knowledge base by using user's question directly.
  504. You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
  505. writing the abbreviation in its entirety, adding some extra descriptions or explanations,
  506. changing the way of expression, translating the original question into another language (English/Chinese), etc.
  507. And return 5 versions of question and one is from translation.
  508. Just list the question. No other words are needed.
  509. """
  510. ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
  511. return ans
  512. def keyword_extraction(chat_mdl, content, topn=3):
  513. prompt = f"""
  514. Role: You're a text analyzer.
  515. Task: extract the most important keywords/phrases of a given piece of text content.
  516. Requirements:
  517. - Summarize the text content, and give top {topn} important keywords/phrases.
  518. - The keywords MUST be in language of the given piece of text content.
  519. - The keywords are delimited by ENGLISH COMMA.
  520. - Keywords ONLY in output.
  521. ### Text Content
  522. {content}
  523. """
  524. msg = [
  525. {"role": "system", "content": prompt},
  526. {"role": "user", "content": "Output: "}
  527. ]
  528. _, msg = message_fit_in(msg, chat_mdl.max_length)
  529. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  530. if isinstance(kwd, tuple):
  531. kwd = kwd[0]
  532. kwd = re.sub(r"<think>.*</think>", "", kwd, flags=re.DOTALL)
  533. if kwd.find("**ERROR**") >= 0:
  534. return ""
  535. return kwd
  536. def question_proposal(chat_mdl, content, topn=3):
  537. prompt = f"""
  538. Role: You're a text analyzer.
  539. Task: propose {topn} questions about a given piece of text content.
  540. Requirements:
  541. - Understand and summarize the text content, and propose top {topn} important questions.
  542. - The questions SHOULD NOT have overlapping meanings.
  543. - The questions SHOULD cover the main content of the text as much as possible.
  544. - The questions MUST be in language of the given piece of text content.
  545. - One question per line.
  546. - Question ONLY in output.
  547. ### Text Content
  548. {content}
  549. """
  550. msg = [
  551. {"role": "system", "content": prompt},
  552. {"role": "user", "content": "Output: "}
  553. ]
  554. _, msg = message_fit_in(msg, chat_mdl.max_length)
  555. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  556. if isinstance(kwd, tuple):
  557. kwd = kwd[0]
  558. kwd = re.sub(r"<think>.*</think>", "", kwd, flags=re.DOTALL)
  559. if kwd.find("**ERROR**") >= 0:
  560. return ""
  561. return kwd
  562. def full_question(tenant_id, llm_id, messages):
  563. if llm_id2llm_type(llm_id) == "image2text":
  564. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  565. else:
  566. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  567. conv = []
  568. for m in messages:
  569. if m["role"] not in ["user", "assistant"]:
  570. continue
  571. conv.append("{}: {}".format(m["role"].upper(), m["content"]))
  572. conv = "\n".join(conv)
  573. today = datetime.date.today().isoformat()
  574. yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
  575. tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
  576. prompt = f"""
  577. Role: A helpful assistant
  578. Task and steps:
  579. 1. Generate a full user question that would follow the conversation.
  580. 2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
  581. Requirements & Restrictions:
  582. - Text generated MUST be in the same language of the original user's question.
  583. - If the user's latest question is completely, don't do anything, just return the original question.
  584. - DON'T generate anything except a refined question.
  585. ######################
  586. -Examples-
  587. ######################
  588. # Example 1
  589. ## Conversation
  590. USER: What is the name of Donald Trump's father?
  591. ASSISTANT: Fred Trump.
  592. USER: And his mother?
  593. ###############
  594. Output: What's the name of Donald Trump's mother?
  595. ------------
  596. # Example 2
  597. ## Conversation
  598. USER: What is the name of Donald Trump's father?
  599. ASSISTANT: Fred Trump.
  600. USER: And his mother?
  601. ASSISTANT: Mary Trump.
  602. User: What's her full name?
  603. ###############
  604. Output: What's the full name of Donald Trump's mother Mary Trump?
  605. ------------
  606. # Example 3
  607. ## Conversation
  608. USER: What's the weather today in London?
  609. ASSISTANT: Cloudy.
  610. USER: What's about tomorrow in Rochester?
  611. ###############
  612. Output: What's the weather in Rochester on {tomorrow}?
  613. ######################
  614. # Real Data
  615. ## Conversation
  616. {conv}
  617. ###############
  618. """
  619. ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
  620. ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
  621. return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
  622. def tts(tts_mdl, text):
  623. if not tts_mdl or not text:
  624. return
  625. bin = b""
  626. for chunk in tts_mdl.tts(text):
  627. bin += chunk
  628. return binascii.hexlify(bin).decode("utf-8")
  629. def ask(question, kb_ids, tenant_id):
  630. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  631. embedding_list = list(set([kb.embd_id for kb in kbs]))
  632. is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
  633. retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
  634. embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
  635. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  636. max_tokens = chat_mdl.max_length
  637. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  638. kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids,
  639. 1, 12, 0.1, 0.3, aggs=False,
  640. rank_feature=label_question(question, kbs)
  641. )
  642. knowledges = kb_prompt(kbinfos, max_tokens)
  643. prompt = """
  644. Role: You're a smart assistant. Your name is Miss R.
  645. Task: Summarize the information from knowledge bases and answer user's question.
  646. Requirements and restriction:
  647. - DO NOT make things up, especially for numbers.
  648. - If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
  649. - Answer with markdown format text.
  650. - Answer in language of user's question.
  651. - DO NOT make things up, especially for numbers.
  652. ### Information from knowledge bases
  653. %s
  654. The above is information from knowledge bases.
  655. """ % "\n".join(knowledges)
  656. msg = [{"role": "user", "content": question}]
  657. def decorate_answer(answer):
  658. nonlocal knowledges, kbinfos, prompt
  659. answer, idx = retriever.insert_citations(answer,
  660. [ck["content_ltks"]
  661. for ck in kbinfos["chunks"]],
  662. [ck["vector"]
  663. for ck in kbinfos["chunks"]],
  664. embd_mdl,
  665. tkweight=0.7,
  666. vtweight=0.3)
  667. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  668. recall_docs = [
  669. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  670. if not recall_docs:
  671. recall_docs = kbinfos["doc_aggs"]
  672. kbinfos["doc_aggs"] = recall_docs
  673. refs = deepcopy(kbinfos)
  674. for c in refs["chunks"]:
  675. if c.get("vector"):
  676. del c["vector"]
  677. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  678. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  679. return {"answer": answer, "reference": refs}
  680. answer = ""
  681. for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
  682. answer = ans
  683. yield {"answer": answer, "reference": {}}
  684. yield decorate_answer(answer)
  685. def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
  686. prompt = f"""
  687. Role: You're a text analyzer.
  688. Task: Tag (put on some labels) to a given piece of text content based on the examples and the entire tag set.
  689. Steps::
  690. - Comprehend the tag/label set.
  691. - Comprehend examples which all consist of both text content and assigned tags with relevance score in format of JSON.
  692. - Summarize the text content, and tag it with top {topn} most relevant tags from the set of tag/label and the corresponding relevance score.
  693. Requirements
  694. - The tags MUST be from the tag set.
  695. - The output MUST be in JSON format only, the key is tag and the value is its relevance score.
  696. - The relevance score must be range from 1 to 10.
  697. - Keywords ONLY in output.
  698. # TAG SET
  699. {", ".join(all_tags)}
  700. """
  701. for i, ex in enumerate(examples):
  702. prompt += """
  703. # Examples {}
  704. ### Text Content
  705. {}
  706. Output:
  707. {}
  708. """.format(i, ex["content"], json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False))
  709. prompt += f"""
  710. # Real Data
  711. ### Text Content
  712. {content}
  713. """
  714. msg = [
  715. {"role": "system", "content": prompt},
  716. {"role": "user", "content": "Output: "}
  717. ]
  718. _, msg = message_fit_in(msg, chat_mdl.max_length)
  719. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.5})
  720. if isinstance(kwd, tuple):
  721. kwd = kwd[0]
  722. kwd = re.sub(r"<think>.*</think>", "", kwd, flags=re.DOTALL)
  723. if kwd.find("**ERROR**") >= 0:
  724. raise Exception(kwd)
  725. try:
  726. return json_repair.loads(kwd)
  727. except json_repair.JSONDecodeError:
  728. try:
  729. result = kwd.replace(prompt[:-1], '').replace('user', '').replace('model', '').strip()
  730. result = '{' + result.split('{')[1].split('}')[0] + '}'
  731. return json_repair.loads(result)
  732. except Exception as e:
  733. logging.exception(f"JSON parsing error: {result} -> {e}")
  734. raise e
  735. def reasoning(chunk_info: dict, question: str, chat_mdl: LLMBundle, embd_mdl: LLMBundle,
  736. tenant_ids: list[str], kb_ids: list[str], prompt_config, MAX_SEARCH_LIMIT: int = 3,
  737. top_n: int = 5, similarity_threshold: float = 0.4, vector_similarity_weight: float = 0.3):
  738. BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
  739. END_SEARCH_QUERY = "<|end_search_query|>"
  740. BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
  741. END_SEARCH_RESULT = "<|end_search_result|>"
  742. def rm_query_tags(line):
  743. pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
  744. return re.sub(pattern, "", line)
  745. def rm_result_tags(line):
  746. pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
  747. return re.sub(pattern, "", line)
  748. reason_prompt = (
  749. "You are a reasoning assistant with the ability to perform dataset searches to help "
  750. "you answer the user's question accurately. You have special tools:\n\n"
  751. f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
  752. f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
  753. f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
  754. "Once you have all the information you need, continue your reasoning.\n\n"
  755. "-- Example 1 --\n" ########################################
  756. "Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
  757. "Assistant:\n"
  758. f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
  759. "User:\n"
  760. f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
  761. "Continues reasoning with the new information.\n"
  762. "Assistant:\n"
  763. f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
  764. "User:\n"
  765. f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
  766. "Continues reasoning with the new information...\n\n"
  767. "Assistant:\n"
  768. f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
  769. "User:\n"
  770. f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
  771. "Continues reasoning with the new information...\n\n"
  772. "Assistant:\n"
  773. f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
  774. "User:\n"
  775. f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
  776. "Continues reasoning with the new information...\n\n"
  777. "Assistant:\nIt's enough to answer the question\n"
  778. "-- Example 2 --\n" #########################################
  779. "Question: \"When was the founder of craigslist born?\"\n"
  780. "Assistant:\n"
  781. f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
  782. "User:\n"
  783. f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
  784. "Continues reasoning with the new information.\n"
  785. "Assistant:\n"
  786. f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
  787. "User:\n"
  788. f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
  789. "Continues reasoning with the new information...\n\n"
  790. "Assistant:\nIt's enough to answer the question\n"
  791. "**Remember**:\n"
  792. f"- You have a dataset to search, so you just provide a proper search query.\n"
  793. f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
  794. "- The language of query MUST be as the same as 'Question' or 'search result'.\n"
  795. "- When done searching, continue your reasoning.\n\n"
  796. 'Please answer the following question. You should think step by step to solve it.\n\n'
  797. )
  798. relevant_extraction_prompt = """**Task Instruction:**
  799. You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
  800. **Guidelines:**
  801. 1. **Analyze the Searched Web Pages:**
  802. - Carefully review the content of each searched web page.
  803. - Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
  804. 2. **Extract Relevant Information:**
  805. - Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
  806. - Ensure that the extracted information is accurate and relevant.
  807. 3. **Output Format:**
  808. - **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
  809. - The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
  810. **Final Information**
  811. [Helpful information]
  812. - **If the web pages do not provide any helpful information for current search query:** Output the following text.
  813. **Final Information**
  814. No helpful information found.
  815. **Inputs:**
  816. - **Previous Reasoning Steps:**
  817. {prev_reasoning}
  818. - **Current Search Query:**
  819. {search_query}
  820. - **Searched Web Pages:**
  821. {document}
  822. """
  823. executed_search_queries = []
  824. msg_hisotry = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
  825. all_reasoning_steps = []
  826. think = "<think>"
  827. for ii in range(MAX_SEARCH_LIMIT + 1):
  828. if ii == MAX_SEARCH_LIMIT - 1:
  829. summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
  830. yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
  831. all_reasoning_steps.append(summary_think)
  832. msg_hisotry.append({"role": "assistant", "content": summary_think})
  833. break
  834. query_think = ""
  835. if msg_hisotry[-1]["role"] != "user":
  836. msg_hisotry.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
  837. else:
  838. msg_hisotry[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
  839. for ans in chat_mdl.chat_streamly(reason_prompt, msg_hisotry, {"temperature": 0.7}):
  840. ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
  841. if not ans:
  842. continue
  843. query_think = ans
  844. yield {"answer": think + rm_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
  845. think += rm_query_tags(query_think)
  846. all_reasoning_steps.append(query_think)
  847. queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
  848. if not queries:
  849. if ii > 0:
  850. break
  851. queries = [question]
  852. for search_query in queries:
  853. logging.info(f"[THINK]Query: {ii}. {search_query}")
  854. msg_hisotry.append({"role": "assistant", "content": search_query})
  855. think += f"\n\n> {ii+1}. {search_query}\n\n"
  856. yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
  857. summary_think = ""
  858. # The search query has been searched in previous steps.
  859. if search_query in executed_search_queries:
  860. summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
  861. yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
  862. all_reasoning_steps.append(summary_think)
  863. msg_hisotry.append({"role": "user", "content": summary_think})
  864. think += summary_think
  865. continue
  866. truncated_prev_reasoning = ""
  867. for i, step in enumerate(all_reasoning_steps):
  868. truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
  869. prev_steps = truncated_prev_reasoning.split('\n\n')
  870. if len(prev_steps) <= 5:
  871. truncated_prev_reasoning = '\n\n'.join(prev_steps)
  872. else:
  873. truncated_prev_reasoning = ''
  874. for i, step in enumerate(prev_steps):
  875. if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
  876. truncated_prev_reasoning += step + '\n\n'
  877. else:
  878. if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
  879. truncated_prev_reasoning += '...\n\n'
  880. truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
  881. # Retrieval procedure:
  882. # 1. KB search
  883. # 2. Web search (optional)
  884. # 3. KG search (optional)
  885. kbinfos = settings.retrievaler.retrieval(search_query, embd_mdl, tenant_ids, kb_ids, 1, top_n,
  886. similarity_threshold,
  887. vector_similarity_weight
  888. )
  889. if prompt_config.get("tavily_api_key", "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"):
  890. tav = Tavily(prompt_config["tavily_api_key"])
  891. tav_res = tav.retrieve_chunks(" ".join(search_query))
  892. kbinfos["chunks"].extend(tav_res["chunks"])
  893. kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
  894. if prompt_config.get("use_kg"):
  895. ck = settings.kg_retrievaler.retrieval(search_query,
  896. tenant_ids,
  897. kb_ids,
  898. embd_mdl,
  899. chat_mdl)
  900. if ck["content_with_weight"]:
  901. kbinfos["chunks"].insert(0, ck)
  902. # Merge chunk info for citations
  903. if not chunk_info["chunks"]:
  904. for k in chunk_info.keys():
  905. chunk_info[k] = kbinfos[k]
  906. else:
  907. cids = [c["chunk_id"] for c in chunk_info["chunks"]]
  908. for c in kbinfos["chunks"]:
  909. if c["chunk_id"] in cids:
  910. continue
  911. chunk_info["chunks"].append(c)
  912. dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
  913. for d in kbinfos["doc_aggs"]:
  914. if d["doc_id"] in dids:
  915. continue
  916. chunk_info["doc_aggs"].append(d)
  917. think += "\n\n"
  918. for ans in chat_mdl.chat_streamly(
  919. relevant_extraction_prompt.format(
  920. prev_reasoning=truncated_prev_reasoning,
  921. search_query=search_query,
  922. document="\n".join(kb_prompt(kbinfos, 4096))
  923. ),
  924. [{"role": "user",
  925. "content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
  926. {"temperature": 0.7}):
  927. ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
  928. if not ans:
  929. continue
  930. summary_think = ans
  931. yield {"answer": think + rm_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
  932. all_reasoning_steps.append(summary_think)
  933. msg_hisotry.append(
  934. {"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
  935. think += rm_result_tags(summary_think)
  936. logging.info(f"[THINK]Summary: {ii}. {summary_think}")
  937. yield think + "</think>"