Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

dialog_service.py 28KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import binascii
  18. import os
  19. import json
  20. import re
  21. from collections import defaultdict
  22. from copy import deepcopy
  23. from timeit import default_timer as timer
  24. import datetime
  25. from datetime import timedelta
  26. from api.db import LLMType, ParserType, StatusEnum
  27. from api.db.db_models import Dialog, DB
  28. from api.db.services.common_service import CommonService
  29. from api.db.services.knowledgebase_service import KnowledgebaseService
  30. from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
  31. from api import settings
  32. from rag.app.resume import forbidden_select_fields4resume
  33. from rag.nlp.search import index_name
  34. from rag.utils import rmSpace, num_tokens_from_string, encoder
  35. from api.utils.file_utils import get_project_base_directory
  36. class DialogService(CommonService):
  37. model = Dialog
  38. @classmethod
  39. @DB.connection_context()
  40. def get_list(cls, tenant_id,
  41. page_number, items_per_page, orderby, desc, id, name):
  42. chats = cls.model.select()
  43. if id:
  44. chats = chats.where(cls.model.id == id)
  45. if name:
  46. chats = chats.where(cls.model.name == name)
  47. chats = chats.where(
  48. (cls.model.tenant_id == tenant_id)
  49. & (cls.model.status == StatusEnum.VALID.value)
  50. )
  51. if desc:
  52. chats = chats.order_by(cls.model.getter_by(orderby).desc())
  53. else:
  54. chats = chats.order_by(cls.model.getter_by(orderby).asc())
  55. chats = chats.paginate(page_number, items_per_page)
  56. return list(chats.dicts())
  57. def message_fit_in(msg, max_length=4000):
  58. def count():
  59. nonlocal msg
  60. tks_cnts = []
  61. for m in msg:
  62. tks_cnts.append(
  63. {"role": m["role"], "count": num_tokens_from_string(m["content"])})
  64. total = 0
  65. for m in tks_cnts:
  66. total += m["count"]
  67. return total
  68. c = count()
  69. if c < max_length:
  70. return c, msg
  71. msg_ = [m for m in msg[:-1] if m["role"] == "system"]
  72. if len(msg) > 1:
  73. msg_.append(msg[-1])
  74. msg = msg_
  75. c = count()
  76. if c < max_length:
  77. return c, msg
  78. ll = num_tokens_from_string(msg_[0]["content"])
  79. ll2 = num_tokens_from_string(msg_[-1]["content"])
  80. if ll / (ll + ll2) > 0.8:
  81. m = msg_[0]["content"]
  82. m = encoder.decode(encoder.encode(m)[:max_length - ll2])
  83. msg[0]["content"] = m
  84. return max_length, msg
  85. m = msg_[1]["content"]
  86. m = encoder.decode(encoder.encode(m)[:max_length - ll2])
  87. msg[1]["content"] = m
  88. return max_length, msg
  89. def llm_id2llm_type(llm_id):
  90. llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
  91. fnm = os.path.join(get_project_base_directory(), "conf")
  92. llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
  93. for llm_factory in llm_factories["factory_llm_infos"]:
  94. for llm in llm_factory["llm"]:
  95. if llm_id == llm["llm_name"]:
  96. return llm["model_type"].strip(",")[-1]
  97. def kb_prompt(kbinfos, max_tokens):
  98. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  99. used_token_count = 0
  100. chunks_num = 0
  101. for i, c in enumerate(knowledges):
  102. used_token_count += num_tokens_from_string(c)
  103. chunks_num += 1
  104. if max_tokens * 0.97 < used_token_count:
  105. knowledges = knowledges[:i]
  106. break
  107. doc2chunks = defaultdict(list)
  108. for i, ck in enumerate(kbinfos["chunks"]):
  109. if i >= chunks_num:
  110. break
  111. doc2chunks[ck["docnm_kwd"]].append(ck["content_with_weight"])
  112. knowledges = []
  113. for nm, chunks in doc2chunks.items():
  114. txt = f"Document: {nm} \nContains the following relevant fragments:\n"
  115. for i, chunk in enumerate(chunks, 1):
  116. txt += f"{i}. {chunk}\n"
  117. knowledges.append(txt)
  118. return knowledges
  119. def chat(dialog, messages, stream=True, **kwargs):
  120. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  121. chat_start_ts = timer()
  122. # Get llm model name and model provider name
  123. llm_id, model_provider = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
  124. # Get llm model instance by model and provide name
  125. llm = LLMService.query(llm_name=llm_id) if not model_provider else LLMService.query(llm_name=llm_id, fid=model_provider)
  126. if not llm:
  127. # Model name is provided by tenant, but not system built-in
  128. llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not model_provider else \
  129. TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=model_provider)
  130. if not llm:
  131. raise LookupError("LLM(%s) not found" % dialog.llm_id)
  132. max_tokens = 8192
  133. else:
  134. max_tokens = llm[0].max_tokens
  135. check_llm_ts = timer()
  136. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  137. embedding_list = list(set([kb.embd_id for kb in kbs]))
  138. if len(embedding_list) != 1:
  139. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  140. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  141. embedding_model_name = embedding_list[0]
  142. is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
  143. retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
  144. questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
  145. attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
  146. if "doc_ids" in messages[-1]:
  147. attachments = messages[-1]["doc_ids"]
  148. for m in messages[:-1]:
  149. if "doc_ids" in m:
  150. attachments.extend(m["doc_ids"])
  151. create_retriever_ts = timer()
  152. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
  153. if not embd_mdl:
  154. raise LookupError("Embedding model(%s) not found" % embedding_model_name)
  155. bind_embedding_ts = timer()
  156. if llm_id2llm_type(dialog.llm_id) == "image2text":
  157. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  158. else:
  159. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  160. bind_llm_ts = timer()
  161. prompt_config = dialog.prompt_config
  162. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  163. tts_mdl = None
  164. if prompt_config.get("tts"):
  165. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  166. # try to use sql if field mapping is good to go
  167. if field_map:
  168. logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
  169. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  170. if ans:
  171. yield ans
  172. return
  173. for p in prompt_config["parameters"]:
  174. if p["key"] == "knowledge":
  175. continue
  176. if p["key"] not in kwargs and not p["optional"]:
  177. raise KeyError("Miss parameter: " + p["key"])
  178. if p["key"] not in kwargs:
  179. prompt_config["system"] = prompt_config["system"].replace(
  180. "{%s}" % p["key"], " ")
  181. if len(questions) > 1 and prompt_config.get("refine_multiturn"):
  182. questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
  183. else:
  184. questions = questions[-1:]
  185. refine_question_ts = timer()
  186. rerank_mdl = None
  187. if dialog.rerank_id:
  188. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  189. bind_reranker_ts = timer()
  190. generate_keyword_ts = bind_reranker_ts
  191. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  192. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  193. else:
  194. if prompt_config.get("keyword", False):
  195. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  196. generate_keyword_ts = timer()
  197. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  198. kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
  199. dialog.similarity_threshold,
  200. dialog.vector_similarity_weight,
  201. doc_ids=attachments,
  202. top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
  203. retrieval_ts = timer()
  204. knowledges = kb_prompt(kbinfos, max_tokens)
  205. logging.debug(
  206. "{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  207. if not knowledges and prompt_config.get("empty_response"):
  208. empty_res = prompt_config["empty_response"]
  209. yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
  210. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  211. kwargs["knowledge"] = "\n\n------\n\n".join(knowledges)
  212. gen_conf = dialog.llm_setting
  213. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  214. msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
  215. for m in messages if m["role"] != "system"])
  216. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
  217. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  218. prompt = msg[0]["content"]
  219. prompt += "\n\n### Query:\n%s" % " ".join(questions)
  220. if "max_tokens" in gen_conf:
  221. gen_conf["max_tokens"] = min(
  222. gen_conf["max_tokens"],
  223. max_tokens - used_token_count)
  224. def decorate_answer(answer):
  225. nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
  226. finish_chat_ts = timer()
  227. refs = []
  228. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  229. answer, idx = retriever.insert_citations(answer,
  230. [ck["content_ltks"]
  231. for ck in kbinfos["chunks"]],
  232. [ck["vector"]
  233. for ck in kbinfos["chunks"]],
  234. embd_mdl,
  235. tkweight=1 - dialog.vector_similarity_weight,
  236. vtweight=dialog.vector_similarity_weight)
  237. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  238. recall_docs = [
  239. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  240. if not recall_docs:
  241. recall_docs = kbinfos["doc_aggs"]
  242. kbinfos["doc_aggs"] = recall_docs
  243. refs = deepcopy(kbinfos)
  244. for c in refs["chunks"]:
  245. if c.get("vector"):
  246. del c["vector"]
  247. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  248. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  249. finish_chat_ts = timer()
  250. total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
  251. check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
  252. create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
  253. bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
  254. bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
  255. refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
  256. bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
  257. generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
  258. retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
  259. generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
  260. prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
  261. return {"answer": answer, "reference": refs, "prompt": prompt}
  262. if stream:
  263. last_ans = ""
  264. answer = ""
  265. for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
  266. answer = ans
  267. delta_ans = ans[len(last_ans):]
  268. if num_tokens_from_string(delta_ans) < 16:
  269. continue
  270. last_ans = answer
  271. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  272. delta_ans = answer[len(last_ans):]
  273. if delta_ans:
  274. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  275. yield decorate_answer(answer)
  276. else:
  277. answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
  278. logging.debug("User: {}|Assistant: {}".format(
  279. msg[-1]["content"], answer))
  280. res = decorate_answer(answer)
  281. res["audio_binary"] = tts(tts_mdl, answer)
  282. yield res
  283. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  284. sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
  285. user_prompt = """
  286. Table name: {};
  287. Table of database fields are as follows:
  288. {}
  289. Question are as follows:
  290. {}
  291. Please write the SQL, only SQL, without any other explanations or text.
  292. """.format(
  293. index_name(tenant_id),
  294. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  295. question
  296. )
  297. tried_times = 0
  298. def get_table():
  299. nonlocal sys_prompt, user_prompt, question, tried_times
  300. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
  301. "temperature": 0.06})
  302. logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
  303. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  304. sql = re.sub(r".*select ", "select ", sql.lower())
  305. sql = re.sub(r" +", " ", sql)
  306. sql = re.sub(r"([;;]|```).*", "", sql)
  307. if sql[:len("select ")] != "select ":
  308. return None, None
  309. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  310. if sql[:len("select *")] != "select *":
  311. sql = "select doc_id,docnm_kwd," + sql[6:]
  312. else:
  313. flds = []
  314. for k in field_map.keys():
  315. if k in forbidden_select_fields4resume:
  316. continue
  317. if len(flds) > 11:
  318. break
  319. flds.append(k)
  320. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  321. logging.debug(f"{question} get SQL(refined): {sql}")
  322. tried_times += 1
  323. return settings.retrievaler.sql_retrieval(sql, format="json"), sql
  324. tbl, sql = get_table()
  325. if tbl is None:
  326. return None
  327. if tbl.get("error") and tried_times <= 2:
  328. user_prompt = """
  329. Table name: {};
  330. Table of database fields are as follows:
  331. {}
  332. Question are as follows:
  333. {}
  334. Please write the SQL, only SQL, without any other explanations or text.
  335. The SQL error you provided last time is as follows:
  336. {}
  337. Error issued by database as follows:
  338. {}
  339. Please correct the error and write SQL again, only SQL, without any other explanations or text.
  340. """.format(
  341. index_name(tenant_id),
  342. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  343. question, sql, tbl["error"]
  344. )
  345. tbl, sql = get_table()
  346. logging.debug("TRY it again: {}".format(sql))
  347. logging.debug("GET table: {}".format(tbl))
  348. if tbl.get("error") or len(tbl["rows"]) == 0:
  349. return None
  350. docid_idx = set([ii for ii, c in enumerate(
  351. tbl["columns"]) if c["name"] == "doc_id"])
  352. doc_name_idx = set([ii for ii, c in enumerate(
  353. tbl["columns"]) if c["name"] == "docnm_kwd"])
  354. column_idx = [ii for ii in range(
  355. len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
  356. # compose Markdown table
  357. columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
  358. tbl["columns"][i]["name"])) for i in
  359. column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  360. line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
  361. ("|------|" if docid_idx and docid_idx else "")
  362. rows = ["|" +
  363. "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
  364. "|" for r in tbl["rows"]]
  365. rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
  366. if quota:
  367. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  368. else:
  369. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  370. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  371. if not docid_idx or not doc_name_idx:
  372. logging.warning("SQL missing field: " + sql)
  373. return {
  374. "answer": "\n".join([columns, line, rows]),
  375. "reference": {"chunks": [], "doc_aggs": []},
  376. "prompt": sys_prompt
  377. }
  378. docid_idx = list(docid_idx)[0]
  379. doc_name_idx = list(doc_name_idx)[0]
  380. doc_aggs = {}
  381. for r in tbl["rows"]:
  382. if r[docid_idx] not in doc_aggs:
  383. doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
  384. doc_aggs[r[docid_idx]]["count"] += 1
  385. return {
  386. "answer": "\n".join([columns, line, rows]),
  387. "reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
  388. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
  389. doc_aggs.items()]},
  390. "prompt": sys_prompt
  391. }
  392. def relevant(tenant_id, llm_id, question, contents: list):
  393. if llm_id2llm_type(llm_id) == "image2text":
  394. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  395. else:
  396. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  397. prompt = """
  398. You are a grader assessing relevance of a retrieved document to a user question.
  399. It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
  400. If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
  401. Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
  402. No other words needed except 'yes' or 'no'.
  403. """
  404. if not contents:
  405. return False
  406. contents = "Documents: \n" + " - ".join(contents)
  407. contents = f"Question: {question}\n" + contents
  408. if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
  409. contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
  410. ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
  411. if ans.lower().find("yes") >= 0:
  412. return True
  413. return False
  414. def rewrite(tenant_id, llm_id, question):
  415. if llm_id2llm_type(llm_id) == "image2text":
  416. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  417. else:
  418. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  419. prompt = """
  420. You are an expert at query expansion to generate a paraphrasing of a question.
  421. I can't retrieval relevant information from the knowledge base by using user's question directly.
  422. You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
  423. writing the abbreviation in its entirety, adding some extra descriptions or explanations,
  424. changing the way of expression, translating the original question into another language (English/Chinese), etc.
  425. And return 5 versions of question and one is from translation.
  426. Just list the question. No other words are needed.
  427. """
  428. ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
  429. return ans
  430. def keyword_extraction(chat_mdl, content, topn=3):
  431. prompt = f"""
  432. Role: You're a text analyzer.
  433. Task: extract the most important keywords/phrases of a given piece of text content.
  434. Requirements:
  435. - Summarize the text content, and give top {topn} important keywords/phrases.
  436. - The keywords MUST be in language of the given piece of text content.
  437. - The keywords are delimited by ENGLISH COMMA.
  438. - Keywords ONLY in output.
  439. ### Text Content
  440. {content}
  441. """
  442. msg = [
  443. {"role": "system", "content": prompt},
  444. {"role": "user", "content": "Output: "}
  445. ]
  446. _, msg = message_fit_in(msg, chat_mdl.max_length)
  447. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  448. if isinstance(kwd, tuple):
  449. kwd = kwd[0]
  450. if kwd.find("**ERROR**") >= 0:
  451. return ""
  452. return kwd
  453. def question_proposal(chat_mdl, content, topn=3):
  454. prompt = f"""
  455. Role: You're a text analyzer.
  456. Task: propose {topn} questions about a given piece of text content.
  457. Requirements:
  458. - Understand and summarize the text content, and propose top {topn} important questions.
  459. - The questions SHOULD NOT have overlapping meanings.
  460. - The questions SHOULD cover the main content of the text as much as possible.
  461. - The questions MUST be in language of the given piece of text content.
  462. - One question per line.
  463. - Question ONLY in output.
  464. ### Text Content
  465. {content}
  466. """
  467. msg = [
  468. {"role": "system", "content": prompt},
  469. {"role": "user", "content": "Output: "}
  470. ]
  471. _, msg = message_fit_in(msg, chat_mdl.max_length)
  472. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  473. if isinstance(kwd, tuple):
  474. kwd = kwd[0]
  475. if kwd.find("**ERROR**") >= 0:
  476. return ""
  477. return kwd
  478. def full_question(tenant_id, llm_id, messages):
  479. if llm_id2llm_type(llm_id) == "image2text":
  480. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  481. else:
  482. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  483. conv = []
  484. for m in messages:
  485. if m["role"] not in ["user", "assistant"]:
  486. continue
  487. conv.append("{}: {}".format(m["role"].upper(), m["content"]))
  488. conv = "\n".join(conv)
  489. today = datetime.date.today().isoformat()
  490. yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
  491. tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
  492. prompt = f"""
  493. Role: A helpful assistant
  494. Task and steps:
  495. 1. Generate a full user question that would follow the conversation.
  496. 2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
  497. Requirements & Restrictions:
  498. - Text generated MUST be in the same language of the original user's question.
  499. - If the user's latest question is completely, don't do anything, just return the original question.
  500. - DON'T generate anything except a refined question.
  501. ######################
  502. -Examples-
  503. ######################
  504. # Example 1
  505. ## Conversation
  506. USER: What is the name of Donald Trump's father?
  507. ASSISTANT: Fred Trump.
  508. USER: And his mother?
  509. ###############
  510. Output: What's the name of Donald Trump's mother?
  511. ------------
  512. # Example 2
  513. ## Conversation
  514. USER: What is the name of Donald Trump's father?
  515. ASSISTANT: Fred Trump.
  516. USER: And his mother?
  517. ASSISTANT: Mary Trump.
  518. User: What's her full name?
  519. ###############
  520. Output: What's the full name of Donald Trump's mother Mary Trump?
  521. ------------
  522. # Example 3
  523. ## Conversation
  524. USER: What's the weather today in London?
  525. ASSISTANT: Cloudy.
  526. USER: What's about tomorrow in Rochester?
  527. ###############
  528. Output: What's the weather in Rochester on {tomorrow}?
  529. ######################
  530. # Real Data
  531. ## Conversation
  532. {conv}
  533. ###############
  534. """
  535. ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
  536. return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
  537. def tts(tts_mdl, text):
  538. if not tts_mdl or not text:
  539. return
  540. bin = b""
  541. for chunk in tts_mdl.tts(text):
  542. bin += chunk
  543. return binascii.hexlify(bin).decode("utf-8")
  544. def ask(question, kb_ids, tenant_id):
  545. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  546. embedding_list = list(set([kb.embd_id for kb in kbs]))
  547. is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
  548. retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
  549. embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
  550. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  551. max_tokens = chat_mdl.max_length
  552. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  553. kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
  554. knowledges = kb_prompt(kbinfos, max_tokens)
  555. prompt = """
  556. Role: You're a smart assistant. Your name is Miss R.
  557. Task: Summarize the information from knowledge bases and answer user's question.
  558. Requirements and restriction:
  559. - DO NOT make things up, especially for numbers.
  560. - If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
  561. - Answer with markdown format text.
  562. - Answer in language of user's question.
  563. - DO NOT make things up, especially for numbers.
  564. ### Information from knowledge bases
  565. %s
  566. The above is information from knowledge bases.
  567. """ % "\n".join(knowledges)
  568. msg = [{"role": "user", "content": question}]
  569. def decorate_answer(answer):
  570. nonlocal knowledges, kbinfos, prompt
  571. answer, idx = retriever.insert_citations(answer,
  572. [ck["content_ltks"]
  573. for ck in kbinfos["chunks"]],
  574. [ck["vector"]
  575. for ck in kbinfos["chunks"]],
  576. embd_mdl,
  577. tkweight=0.7,
  578. vtweight=0.3)
  579. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  580. recall_docs = [
  581. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  582. if not recall_docs:
  583. recall_docs = kbinfos["doc_aggs"]
  584. kbinfos["doc_aggs"] = recall_docs
  585. refs = deepcopy(kbinfos)
  586. for c in refs["chunks"]:
  587. if c.get("vector"):
  588. del c["vector"]
  589. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  590. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  591. return {"answer": answer, "reference": refs}
  592. answer = ""
  593. for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
  594. answer = ans
  595. yield {"answer": answer, "reference": {}}
  596. yield decorate_answer(answer)