您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

dialog_service.py 31KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import binascii
  17. import logging
  18. import re
  19. import time
  20. from copy import deepcopy
  21. from datetime import datetime
  22. from functools import partial
  23. from timeit import default_timer as timer
  24. from langfuse import Langfuse
  25. from peewee import fn
  26. from agentic_reasoning import DeepResearcher
  27. from api import settings
  28. from api.db import LLMType, ParserType, StatusEnum
  29. from api.db.db_models import DB, Dialog
  30. from api.db.services.common_service import CommonService
  31. from api.db.services.document_service import DocumentService
  32. from api.db.services.knowledgebase_service import KnowledgebaseService
  33. from api.db.services.langfuse_service import TenantLangfuseService
  34. from api.db.services.llm_service import LLMBundle
  35. from api.db.services.tenant_llm_service import TenantLLMService
  36. from api.utils import current_timestamp, datetime_format
  37. from rag.app.resume import forbidden_select_fields4resume
  38. from rag.app.tag import label_question
  39. from rag.nlp.search import index_name
  40. from rag.prompts import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, message_fit_in
  41. from rag.prompts.prompts import gen_meta_filter
  42. from rag.utils import num_tokens_from_string, rmSpace
  43. from rag.utils.tavily_conn import Tavily
  44. class DialogService(CommonService):
  45. model = Dialog
  46. @classmethod
  47. def save(cls, **kwargs):
  48. """Save a new record to database.
  49. This method creates a new record in the database with the provided field values,
  50. forcing an insert operation rather than an update.
  51. Args:
  52. **kwargs: Record field values as keyword arguments.
  53. Returns:
  54. Model instance: The created record object.
  55. """
  56. sample_obj = cls.model(**kwargs).save(force_insert=True)
  57. return sample_obj
  58. @classmethod
  59. def update_many_by_id(cls, data_list):
  60. """Update multiple records by their IDs.
  61. This method updates multiple records in the database, identified by their IDs.
  62. It automatically updates the update_time and update_date fields for each record.
  63. Args:
  64. data_list (list): List of dictionaries containing record data to update.
  65. Each dictionary must include an 'id' field.
  66. """
  67. with DB.atomic():
  68. for data in data_list:
  69. data["update_time"] = current_timestamp()
  70. data["update_date"] = datetime_format(datetime.now())
  71. cls.model.update(data).where(cls.model.id == data["id"]).execute()
  72. @classmethod
  73. @DB.connection_context()
  74. def get_list(cls, tenant_id, page_number, items_per_page, orderby, desc, id, name):
  75. chats = cls.model.select()
  76. if id:
  77. chats = chats.where(cls.model.id == id)
  78. if name:
  79. chats = chats.where(cls.model.name == name)
  80. chats = chats.where((cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value))
  81. if desc:
  82. chats = chats.order_by(cls.model.getter_by(orderby).desc())
  83. else:
  84. chats = chats.order_by(cls.model.getter_by(orderby).asc())
  85. chats = chats.paginate(page_number, items_per_page)
  86. return list(chats.dicts())
  87. @classmethod
  88. @DB.connection_context()
  89. def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, orderby, desc, keywords, parser_id=None):
  90. from api.db.db_models import User
  91. fields = [
  92. cls.model.id,
  93. cls.model.tenant_id,
  94. cls.model.name,
  95. cls.model.description,
  96. cls.model.language,
  97. cls.model.llm_id,
  98. cls.model.llm_setting,
  99. cls.model.prompt_type,
  100. cls.model.prompt_config,
  101. cls.model.similarity_threshold,
  102. cls.model.vector_similarity_weight,
  103. cls.model.top_n,
  104. cls.model.top_k,
  105. cls.model.do_refer,
  106. cls.model.rerank_id,
  107. cls.model.kb_ids,
  108. cls.model.icon,
  109. cls.model.status,
  110. User.nickname,
  111. User.avatar.alias("tenant_avatar"),
  112. cls.model.update_time,
  113. cls.model.create_time,
  114. ]
  115. if keywords:
  116. dialogs = (
  117. cls.model.select(*fields)
  118. .join(User, on=(cls.model.tenant_id == User.id))
  119. .where(
  120. (cls.model.tenant_id.in_(joined_tenant_ids) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value),
  121. (fn.LOWER(cls.model.name).contains(keywords.lower())),
  122. )
  123. )
  124. else:
  125. dialogs = (
  126. cls.model.select(*fields)
  127. .join(User, on=(cls.model.tenant_id == User.id))
  128. .where(
  129. (cls.model.tenant_id.in_(joined_tenant_ids) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value),
  130. )
  131. )
  132. if parser_id:
  133. dialogs = dialogs.where(cls.model.parser_id == parser_id)
  134. if desc:
  135. dialogs = dialogs.order_by(cls.model.getter_by(orderby).desc())
  136. else:
  137. dialogs = dialogs.order_by(cls.model.getter_by(orderby).asc())
  138. count = dialogs.count()
  139. if page_number and items_per_page:
  140. dialogs = dialogs.paginate(page_number, items_per_page)
  141. return list(dialogs.dicts()), count
  142. def chat_solo(dialog, messages, stream=True):
  143. if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
  144. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  145. else:
  146. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  147. prompt_config = dialog.prompt_config
  148. tts_mdl = None
  149. if prompt_config.get("tts"):
  150. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  151. msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
  152. if stream:
  153. last_ans = ""
  154. delta_ans = ""
  155. for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
  156. answer = ans
  157. delta_ans = ans[len(last_ans) :]
  158. if num_tokens_from_string(delta_ans) < 16:
  159. continue
  160. last_ans = answer
  161. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
  162. delta_ans = ""
  163. if delta_ans:
  164. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
  165. else:
  166. answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
  167. user_content = msg[-1].get("content", "[content not available]")
  168. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  169. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
  170. def get_models(dialog):
  171. embd_mdl, chat_mdl, rerank_mdl, tts_mdl = None, None, None, None
  172. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  173. embedding_list = list(set([kb.embd_id for kb in kbs]))
  174. if len(embedding_list) > 1:
  175. raise Exception("**ERROR**: Knowledge bases use different embedding models.")
  176. if embedding_list:
  177. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_list[0])
  178. if not embd_mdl:
  179. raise LookupError("Embedding model(%s) not found" % embedding_list[0])
  180. if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
  181. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  182. else:
  183. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  184. if dialog.rerank_id:
  185. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  186. if dialog.prompt_config.get("tts"):
  187. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  188. return kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl
  189. BAD_CITATION_PATTERNS = [
  190. re.compile(r"\(\s*ID\s*[: ]*\s*(\d+)\s*\)"), # (ID: 12)
  191. re.compile(r"\[\s*ID\s*[: ]*\s*(\d+)\s*\]"), # [ID: 12]
  192. re.compile(r"【\s*ID\s*[: ]*\s*(\d+)\s*】"), # 【ID: 12】
  193. re.compile(r"ref\s*(\d+)", flags=re.IGNORECASE), # ref12、REF 12
  194. ]
  195. def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
  196. max_index = len(kbinfos["chunks"])
  197. def safe_add(i):
  198. if 0 <= i < max_index:
  199. idx.add(i)
  200. return True
  201. return False
  202. def find_and_replace(pattern, group_index=1, repl=lambda i: f"ID:{i}", flags=0):
  203. nonlocal answer
  204. def replacement(match):
  205. try:
  206. i = int(match.group(group_index))
  207. if safe_add(i):
  208. return f"[{repl(i)}]"
  209. except Exception:
  210. pass
  211. return match.group(0)
  212. answer = re.sub(pattern, replacement, answer, flags=flags)
  213. for pattern in BAD_CITATION_PATTERNS:
  214. find_and_replace(pattern)
  215. return answer, idx
  216. def meta_filter(metas: dict, filters: list[dict]):
  217. doc_ids = []
  218. def filter_out(v2docs, operator, value):
  219. nonlocal doc_ids
  220. for input,docids in v2docs.items():
  221. try:
  222. input = float(input)
  223. value = float(value)
  224. except Exception:
  225. input = str(input)
  226. value = str(value)
  227. for conds in [
  228. (operator == "contains", str(value).lower() in str(input).lower()),
  229. (operator == "not contains", str(value).lower() not in str(input).lower()),
  230. (operator == "start with", str(input).lower().startswith(str(value).lower())),
  231. (operator == "end with", str(input).lower().endswith(str(value).lower())),
  232. (operator == "empty", not input),
  233. (operator == "not empty", input),
  234. (operator == "=", input == value),
  235. (operator == "≠", input != value),
  236. (operator == ">", input > value),
  237. (operator == "<", input < value),
  238. (operator == "≥", input >= value),
  239. (operator == "≤", input <= value),
  240. ]:
  241. try:
  242. if all(conds):
  243. doc_ids.extend(docids)
  244. except Exception:
  245. pass
  246. for k, v2docs in metas.items():
  247. for f in filters:
  248. if k != f["key"]:
  249. continue
  250. filter_out(v2docs, f["op"], f["value"])
  251. return doc_ids
  252. def chat(dialog, messages, stream=True, **kwargs):
  253. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  254. if not dialog.kb_ids and not dialog.prompt_config.get("tavily_api_key"):
  255. for ans in chat_solo(dialog, messages, stream):
  256. yield ans
  257. return
  258. chat_start_ts = timer()
  259. if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
  260. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  261. else:
  262. llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  263. max_tokens = llm_model_config.get("max_tokens", 8192)
  264. check_llm_ts = timer()
  265. langfuse_tracer = None
  266. trace_context = {}
  267. langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=dialog.tenant_id)
  268. if langfuse_keys:
  269. langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
  270. if langfuse.auth_check():
  271. langfuse_tracer = langfuse
  272. trace_id = langfuse_tracer.create_trace_id()
  273. trace_context = {"trace_id": trace_id}
  274. check_langfuse_tracer_ts = timer()
  275. kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl = get_models(dialog)
  276. toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
  277. if toolcall_session and tools:
  278. chat_mdl.bind_tools(toolcall_session, tools)
  279. bind_models_ts = timer()
  280. retriever = settings.retrievaler
  281. questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
  282. attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else []
  283. if "doc_ids" in messages[-1]:
  284. attachments = messages[-1]["doc_ids"]
  285. prompt_config = dialog.prompt_config
  286. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  287. # try to use sql if field mapping is good to go
  288. if field_map:
  289. logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
  290. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  291. if ans:
  292. yield ans
  293. return
  294. for p in prompt_config["parameters"]:
  295. if p["key"] == "knowledge":
  296. continue
  297. if p["key"] not in kwargs and not p["optional"]:
  298. raise KeyError("Miss parameter: " + p["key"])
  299. if p["key"] not in kwargs:
  300. prompt_config["system"] = prompt_config["system"].replace("{%s}" % p["key"], " ")
  301. if len(questions) > 1 and prompt_config.get("refine_multiturn"):
  302. questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
  303. else:
  304. questions = questions[-1:]
  305. if prompt_config.get("cross_languages"):
  306. questions = [cross_languages(dialog.tenant_id, dialog.llm_id, questions[0], prompt_config["cross_languages"])]
  307. if dialog.meta_data_filter:
  308. metas = DocumentService.get_meta_by_kbs(dialog.kb_ids)
  309. if dialog.meta_data_filter.get("method") == "auto":
  310. filters = gen_meta_filter(chat_mdl, metas, questions[-1])
  311. attachments.extend(meta_filter(metas, filters))
  312. if not attachments:
  313. attachments = None
  314. elif dialog.meta_data_filter.get("method") == "manual":
  315. attachments.extend(meta_filter(metas, dialog.meta_data_filter["manual"]))
  316. if not attachments:
  317. attachments = None
  318. if prompt_config.get("keyword", False):
  319. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  320. refine_question_ts = timer()
  321. thought = ""
  322. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  323. knowledges = []
  324. if attachments is not None and "knowledge" in [p["key"] for p in prompt_config["parameters"]]:
  325. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  326. knowledges = []
  327. if prompt_config.get("reasoning", False):
  328. reasoner = DeepResearcher(
  329. chat_mdl,
  330. prompt_config,
  331. partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3, doc_ids=attachments),
  332. )
  333. for think in reasoner.thinking(kbinfos, " ".join(questions)):
  334. if isinstance(think, str):
  335. thought = think
  336. knowledges = [t for t in think.split("\n") if t]
  337. elif stream:
  338. yield think
  339. else:
  340. if embd_mdl:
  341. kbinfos = retriever.retrieval(
  342. " ".join(questions),
  343. embd_mdl,
  344. tenant_ids,
  345. dialog.kb_ids,
  346. 1,
  347. dialog.top_n,
  348. dialog.similarity_threshold,
  349. dialog.vector_similarity_weight,
  350. doc_ids=attachments,
  351. top=dialog.top_k,
  352. aggs=False,
  353. rerank_mdl=rerank_mdl,
  354. rank_feature=label_question(" ".join(questions), kbs),
  355. )
  356. if prompt_config.get("tavily_api_key"):
  357. tav = Tavily(prompt_config["tavily_api_key"])
  358. tav_res = tav.retrieve_chunks(" ".join(questions))
  359. kbinfos["chunks"].extend(tav_res["chunks"])
  360. kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
  361. if prompt_config.get("use_kg"):
  362. ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, LLMBundle(dialog.tenant_id, LLMType.CHAT))
  363. if ck["content_with_weight"]:
  364. kbinfos["chunks"].insert(0, ck)
  365. knowledges = kb_prompt(kbinfos, max_tokens)
  366. logging.debug("{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  367. retrieval_ts = timer()
  368. if not knowledges and prompt_config.get("empty_response"):
  369. empty_res = prompt_config["empty_response"]
  370. yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
  371. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  372. kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
  373. gen_conf = dialog.llm_setting
  374. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  375. prompt4citation = ""
  376. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  377. prompt4citation = citation_prompt()
  378. msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"])
  379. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.95))
  380. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  381. prompt = msg[0]["content"]
  382. if "max_tokens" in gen_conf:
  383. gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
  384. def decorate_answer(answer):
  385. nonlocal embd_mdl, prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
  386. refs = []
  387. ans = answer.split("</think>")
  388. think = ""
  389. if len(ans) == 2:
  390. think = ans[0] + "</think>"
  391. answer = ans[1]
  392. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  393. idx = set([])
  394. if embd_mdl and not re.search(r"\[ID:([0-9]+)\]", answer):
  395. answer, idx = retriever.insert_citations(
  396. answer,
  397. [ck["content_ltks"] for ck in kbinfos["chunks"]],
  398. [ck["vector"] for ck in kbinfos["chunks"]],
  399. embd_mdl,
  400. tkweight=1 - dialog.vector_similarity_weight,
  401. vtweight=dialog.vector_similarity_weight,
  402. )
  403. else:
  404. for match in re.finditer(r"\[ID:([0-9]+)\]", answer):
  405. i = int(match.group(1))
  406. if i < len(kbinfos["chunks"]):
  407. idx.add(i)
  408. answer, idx = repair_bad_citation_formats(answer, kbinfos, idx)
  409. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  410. recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  411. if not recall_docs:
  412. recall_docs = kbinfos["doc_aggs"]
  413. kbinfos["doc_aggs"] = recall_docs
  414. refs = deepcopy(kbinfos)
  415. for c in refs["chunks"]:
  416. if c.get("vector"):
  417. del c["vector"]
  418. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  419. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  420. finish_chat_ts = timer()
  421. total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
  422. check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
  423. check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
  424. bind_embedding_time_cost = (bind_models_ts - check_langfuse_tracer_ts) * 1000
  425. refine_question_time_cost = (refine_question_ts - bind_models_ts) * 1000
  426. retrieval_time_cost = (retrieval_ts - refine_question_ts) * 1000
  427. generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
  428. tk_num = num_tokens_from_string(think + answer)
  429. prompt += "\n\n### Query:\n%s" % " ".join(questions)
  430. prompt = (
  431. f"{prompt}\n\n"
  432. "## Time elapsed:\n"
  433. f" - Total: {total_time_cost:.1f}ms\n"
  434. f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
  435. f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
  436. f" - Bind models: {bind_embedding_time_cost:.1f}ms\n"
  437. f" - Query refinement(LLM): {refine_question_time_cost:.1f}ms\n"
  438. f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
  439. f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
  440. "## Token usage:\n"
  441. f" - Generated tokens(approximately): {tk_num}\n"
  442. f" - Token speed: {int(tk_num / (generate_result_time_cost / 1000.0))}/s"
  443. )
  444. # Add a condition check to call the end method only if langfuse_tracer exists
  445. if langfuse_tracer and "langfuse_generation" in locals():
  446. langfuse_output = "\n" + re.sub(r"^.*?(### Query:.*)", r"\1", prompt, flags=re.DOTALL)
  447. langfuse_output = {"time_elapsed:": re.sub(r"\n", " \n", langfuse_output), "created_at": time.time()}
  448. langfuse_generation.update(output=langfuse_output)
  449. langfuse_generation.end()
  450. return {"answer": think + answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
  451. if langfuse_tracer:
  452. langfuse_generation = langfuse_tracer.start_generation(
  453. trace_context=trace_context, name="chat", model=llm_model_config["llm_name"], input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg}
  454. )
  455. if stream:
  456. last_ans = ""
  457. answer = ""
  458. for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
  459. if thought:
  460. ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
  461. answer = ans
  462. delta_ans = ans[len(last_ans) :]
  463. if num_tokens_from_string(delta_ans) < 16:
  464. continue
  465. last_ans = answer
  466. yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  467. delta_ans = answer[len(last_ans) :]
  468. if delta_ans:
  469. yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  470. yield decorate_answer(thought + answer)
  471. else:
  472. answer = chat_mdl.chat(prompt + prompt4citation, msg[1:], gen_conf)
  473. user_content = msg[-1].get("content", "[content not available]")
  474. logging.debug("User: {}|Assistant: {}".format(user_content, answer))
  475. res = decorate_answer(answer)
  476. res["audio_binary"] = tts(tts_mdl, answer)
  477. yield res
  478. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  479. sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
  480. user_prompt = """
  481. Table name: {};
  482. Table of database fields are as follows:
  483. {}
  484. Question are as follows:
  485. {}
  486. Please write the SQL, only SQL, without any other explanations or text.
  487. """.format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question)
  488. tried_times = 0
  489. def get_table():
  490. nonlocal sys_prompt, user_prompt, question, tried_times
  491. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
  492. sql = re.sub(r"^.*</think>", "", sql, flags=re.DOTALL)
  493. logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
  494. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  495. sql = re.sub(r".*select ", "select ", sql.lower())
  496. sql = re.sub(r" +", " ", sql)
  497. sql = re.sub(r"([;;]|```).*", "", sql)
  498. if sql[: len("select ")] != "select ":
  499. return None, None
  500. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  501. if sql[: len("select *")] != "select *":
  502. sql = "select doc_id,docnm_kwd," + sql[6:]
  503. else:
  504. flds = []
  505. for k in field_map.keys():
  506. if k in forbidden_select_fields4resume:
  507. continue
  508. if len(flds) > 11:
  509. break
  510. flds.append(k)
  511. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  512. logging.debug(f"{question} get SQL(refined): {sql}")
  513. tried_times += 1
  514. return settings.retrievaler.sql_retrieval(sql, format="json"), sql
  515. tbl, sql = get_table()
  516. if tbl is None:
  517. return None
  518. if tbl.get("error") and tried_times <= 2:
  519. user_prompt = """
  520. Table name: {};
  521. Table of database fields are as follows:
  522. {}
  523. Question are as follows:
  524. {}
  525. Please write the SQL, only SQL, without any other explanations or text.
  526. The SQL error you provided last time is as follows:
  527. {}
  528. Error issued by database as follows:
  529. {}
  530. Please correct the error and write SQL again, only SQL, without any other explanations or text.
  531. """.format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question, sql, tbl["error"])
  532. tbl, sql = get_table()
  533. logging.debug("TRY it again: {}".format(sql))
  534. logging.debug("GET table: {}".format(tbl))
  535. if tbl.get("error") or len(tbl["rows"]) == 0:
  536. return None
  537. docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
  538. doc_name_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
  539. column_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
  540. # compose Markdown table
  541. columns = (
  542. "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  543. )
  544. line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
  545. rows = ["|" + "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") + "|" for r in tbl["rows"]]
  546. rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
  547. if quota:
  548. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  549. else:
  550. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  551. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  552. if not docid_idx or not doc_name_idx:
  553. logging.warning("SQL missing field: " + sql)
  554. return {"answer": "\n".join([columns, line, rows]), "reference": {"chunks": [], "doc_aggs": []}, "prompt": sys_prompt}
  555. docid_idx = list(docid_idx)[0]
  556. doc_name_idx = list(doc_name_idx)[0]
  557. doc_aggs = {}
  558. for r in tbl["rows"]:
  559. if r[docid_idx] not in doc_aggs:
  560. doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
  561. doc_aggs[r[docid_idx]]["count"] += 1
  562. return {
  563. "answer": "\n".join([columns, line, rows]),
  564. "reference": {
  565. "chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
  566. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()],
  567. },
  568. "prompt": sys_prompt,
  569. }
  570. def tts(tts_mdl, text):
  571. if not tts_mdl or not text:
  572. return
  573. bin = b""
  574. for chunk in tts_mdl.tts(text):
  575. bin += chunk
  576. return binascii.hexlify(bin).decode("utf-8")
  577. def ask(question, kb_ids, tenant_id, chat_llm_name=None):
  578. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  579. embedding_list = list(set([kb.embd_id for kb in kbs]))
  580. is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
  581. retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
  582. embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
  583. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, chat_llm_name)
  584. max_tokens = chat_mdl.max_length
  585. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  586. kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False, rank_feature=label_question(question, kbs))
  587. knowledges = kb_prompt(kbinfos, max_tokens)
  588. prompt = """
  589. Role: You're a smart assistant. Your name is Miss R.
  590. Task: Summarize the information from knowledge bases and answer user's question.
  591. Requirements and restriction:
  592. - DO NOT make things up, especially for numbers.
  593. - If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
  594. - Answer with markdown format text.
  595. - Answer in language of user's question.
  596. - DO NOT make things up, especially for numbers.
  597. ### Information from knowledge bases
  598. %s
  599. The above is information from knowledge bases.
  600. """ % "\n".join(knowledges)
  601. msg = [{"role": "user", "content": question}]
  602. def decorate_answer(answer):
  603. nonlocal knowledges, kbinfos, prompt
  604. answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], embd_mdl, tkweight=0.7, vtweight=0.3)
  605. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  606. recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  607. if not recall_docs:
  608. recall_docs = kbinfos["doc_aggs"]
  609. kbinfos["doc_aggs"] = recall_docs
  610. refs = deepcopy(kbinfos)
  611. for c in refs["chunks"]:
  612. if c.get("vector"):
  613. del c["vector"]
  614. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  615. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  616. refs["chunks"] = chunks_format(refs)
  617. return {"answer": answer, "reference": refs}
  618. answer = ""
  619. for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
  620. answer = ans
  621. yield {"answer": answer, "reference": {}}
  622. yield decorate_answer(answer)