Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

dialog_service.py 25KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import binascii
  17. import os
  18. import json
  19. import re
  20. from copy import deepcopy
  21. from timeit import default_timer as timer
  22. from api.db import LLMType, ParserType,StatusEnum
  23. from api.db.db_models import Dialog, Conversation,DB
  24. from api.db.services.common_service import CommonService
  25. from api.db.services.knowledgebase_service import KnowledgebaseService
  26. from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
  27. from api.settings import retrievaler, kg_retrievaler
  28. from rag.app.resume import forbidden_select_fields4resume
  29. from rag.nlp.search import index_name
  30. from rag.utils import rmSpace, num_tokens_from_string, encoder
  31. from api.utils.file_utils import get_project_base_directory
  32. from api.utils.log_utils import logger
  33. class DialogService(CommonService):
  34. model = Dialog
  35. @classmethod
  36. @DB.connection_context()
  37. def get_list(cls, tenant_id,
  38. page_number, items_per_page, orderby, desc, id , name):
  39. chats = cls.model.select()
  40. if id:
  41. chats = chats.where(cls.model.id == id)
  42. if name:
  43. chats = chats.where(cls.model.name == name)
  44. chats = chats.where(
  45. (cls.model.tenant_id == tenant_id)
  46. & (cls.model.status == StatusEnum.VALID.value)
  47. )
  48. if desc:
  49. chats = chats.order_by(cls.model.getter_by(orderby).desc())
  50. else:
  51. chats = chats.order_by(cls.model.getter_by(orderby).asc())
  52. chats = chats.paginate(page_number, items_per_page)
  53. return list(chats.dicts())
  54. class ConversationService(CommonService):
  55. model = Conversation
  56. @classmethod
  57. @DB.connection_context()
  58. def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
  59. sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
  60. if id:
  61. sessions = sessions.where(cls.model.id == id)
  62. if name:
  63. sessions = sessions.where(cls.model.name == name)
  64. if desc:
  65. sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
  66. else:
  67. sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
  68. sessions = sessions.paginate(page_number, items_per_page)
  69. return list(sessions.dicts())
  70. def message_fit_in(msg, max_length=4000):
  71. def count():
  72. nonlocal msg
  73. tks_cnts = []
  74. for m in msg:
  75. tks_cnts.append(
  76. {"role": m["role"], "count": num_tokens_from_string(m["content"])})
  77. total = 0
  78. for m in tks_cnts:
  79. total += m["count"]
  80. return total
  81. c = count()
  82. if c < max_length:
  83. return c, msg
  84. msg_ = [m for m in msg[:-1] if m["role"] == "system"]
  85. msg_.append(msg[-1])
  86. msg = msg_
  87. c = count()
  88. if c < max_length:
  89. return c, msg
  90. ll = num_tokens_from_string(msg_[0]["content"])
  91. l = num_tokens_from_string(msg_[-1]["content"])
  92. if ll / (ll + l) > 0.8:
  93. m = msg_[0]["content"]
  94. m = encoder.decode(encoder.encode(m)[:max_length - l])
  95. msg[0]["content"] = m
  96. return max_length, msg
  97. m = msg_[1]["content"]
  98. m = encoder.decode(encoder.encode(m)[:max_length - l])
  99. msg[1]["content"] = m
  100. return max_length, msg
  101. def llm_id2llm_type(llm_id):
  102. llm_id = llm_id.split("@")[0]
  103. fnm = os.path.join(get_project_base_directory(), "conf")
  104. llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
  105. for llm_factory in llm_factories["factory_llm_infos"]:
  106. for llm in llm_factory["llm"]:
  107. if llm_id == llm["llm_name"]:
  108. return llm["model_type"].strip(",")[-1]
  109. def chat(dialog, messages, stream=True, **kwargs):
  110. assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
  111. st = timer()
  112. tmp = dialog.llm_id.split("@")
  113. fid = None
  114. llm_id = tmp[0]
  115. if len(tmp)>1: fid = tmp[1]
  116. llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
  117. if not llm:
  118. llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
  119. TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=fid)
  120. if not llm:
  121. raise LookupError("LLM(%s) not found" % dialog.llm_id)
  122. max_tokens = 8192
  123. else:
  124. max_tokens = llm[0].max_tokens
  125. kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
  126. embd_nms = list(set([kb.embd_id for kb in kbs]))
  127. if len(embd_nms) != 1:
  128. yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  129. return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
  130. is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
  131. retr = retrievaler if not is_kg else kg_retrievaler
  132. questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
  133. attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
  134. if "doc_ids" in messages[-1]:
  135. attachments = messages[-1]["doc_ids"]
  136. for m in messages[:-1]:
  137. if "doc_ids" in m:
  138. attachments.extend(m["doc_ids"])
  139. embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
  140. if not embd_mdl:
  141. raise LookupError("Embedding model(%s) not found" % embd_nms[0])
  142. if llm_id2llm_type(dialog.llm_id) == "image2text":
  143. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
  144. else:
  145. chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
  146. prompt_config = dialog.prompt_config
  147. field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
  148. tts_mdl = None
  149. if prompt_config.get("tts"):
  150. tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
  151. # try to use sql if field mapping is good to go
  152. if field_map:
  153. logger.info("Use SQL to retrieval:{}".format(questions[-1]))
  154. ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
  155. if ans:
  156. yield ans
  157. return
  158. for p in prompt_config["parameters"]:
  159. if p["key"] == "knowledge":
  160. continue
  161. if p["key"] not in kwargs and not p["optional"]:
  162. raise KeyError("Miss parameter: " + p["key"])
  163. if p["key"] not in kwargs:
  164. prompt_config["system"] = prompt_config["system"].replace(
  165. "{%s}" % p["key"], " ")
  166. if len(questions) > 1 and prompt_config.get("refine_multiturn"):
  167. questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
  168. else:
  169. questions = questions[-1:]
  170. refineQ_tm = timer()
  171. keyword_tm = timer()
  172. rerank_mdl = None
  173. if dialog.rerank_id:
  174. rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
  175. for _ in range(len(questions) // 2):
  176. questions.append(questions[-1])
  177. if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
  178. kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
  179. else:
  180. if prompt_config.get("keyword", False):
  181. questions[-1] += keyword_extraction(chat_mdl, questions[-1])
  182. keyword_tm = timer()
  183. tenant_ids = list(set([kb.tenant_id for kb in kbs]))
  184. kbinfos = retr.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
  185. dialog.similarity_threshold,
  186. dialog.vector_similarity_weight,
  187. doc_ids=attachments,
  188. top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
  189. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  190. logger.info(
  191. "{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
  192. retrieval_tm = timer()
  193. if not knowledges and prompt_config.get("empty_response"):
  194. empty_res = prompt_config["empty_response"]
  195. yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
  196. return {"answer": prompt_config["empty_response"], "reference": kbinfos}
  197. kwargs["knowledge"] = "\n\n------\n\n".join(knowledges)
  198. gen_conf = dialog.llm_setting
  199. msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
  200. msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
  201. for m in messages if m["role"] != "system"])
  202. used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
  203. assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
  204. prompt = msg[0]["content"]
  205. prompt += "\n\n### Query:\n%s" % " ".join(questions)
  206. if "max_tokens" in gen_conf:
  207. gen_conf["max_tokens"] = min(
  208. gen_conf["max_tokens"],
  209. max_tokens - used_token_count)
  210. def decorate_answer(answer):
  211. nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
  212. refs = []
  213. if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
  214. answer, idx = retr.insert_citations(answer,
  215. [ck["content_ltks"]
  216. for ck in kbinfos["chunks"]],
  217. [ck["vector"]
  218. for ck in kbinfos["chunks"]],
  219. embd_mdl,
  220. tkweight=1 - dialog.vector_similarity_weight,
  221. vtweight=dialog.vector_similarity_weight)
  222. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  223. recall_docs = [
  224. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  225. if not recall_docs: recall_docs = kbinfos["doc_aggs"]
  226. kbinfos["doc_aggs"] = recall_docs
  227. refs = deepcopy(kbinfos)
  228. for c in refs["chunks"]:
  229. if c.get("vector"):
  230. del c["vector"]
  231. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  232. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  233. done_tm = timer()
  234. prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
  235. (refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
  236. (done_tm - retrieval_tm) * 1000)
  237. return {"answer": answer, "reference": refs, "prompt": prompt}
  238. if stream:
  239. last_ans = ""
  240. answer = ""
  241. for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
  242. answer = ans
  243. delta_ans = ans[len(last_ans):]
  244. if num_tokens_from_string(delta_ans) < 16:
  245. continue
  246. last_ans = answer
  247. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  248. delta_ans = answer[len(last_ans):]
  249. if delta_ans:
  250. yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
  251. yield decorate_answer(answer)
  252. else:
  253. answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
  254. logger.info("User: {}|Assistant: {}".format(
  255. msg[-1]["content"], answer))
  256. res = decorate_answer(answer)
  257. res["audio_binary"] = tts(tts_mdl, answer)
  258. yield res
  259. def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
  260. sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
  261. user_promt = """
  262. 表名:{};
  263. 数据库表字段说明如下:
  264. {}
  265. 问题如下:
  266. {}
  267. 请写出SQL, 且只要SQL,不要有其他说明及文字。
  268. """.format(
  269. index_name(tenant_id),
  270. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  271. question
  272. )
  273. tried_times = 0
  274. def get_table():
  275. nonlocal sys_prompt, user_promt, question, tried_times
  276. sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
  277. "temperature": 0.06})
  278. logger.info(f"{question} ==> {user_promt} get SQL: {sql}")
  279. sql = re.sub(r"[\r\n]+", " ", sql.lower())
  280. sql = re.sub(r".*select ", "select ", sql.lower())
  281. sql = re.sub(r" +", " ", sql)
  282. sql = re.sub(r"([;;]|```).*", "", sql)
  283. if sql[:len("select ")] != "select ":
  284. return None, None
  285. if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
  286. if sql[:len("select *")] != "select *":
  287. sql = "select doc_id,docnm_kwd," + sql[6:]
  288. else:
  289. flds = []
  290. for k in field_map.keys():
  291. if k in forbidden_select_fields4resume:
  292. continue
  293. if len(flds) > 11:
  294. break
  295. flds.append(k)
  296. sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
  297. logger.info(f"{question} get SQL(refined): {sql}")
  298. tried_times += 1
  299. return retrievaler.sql_retrieval(sql, format="json"), sql
  300. tbl, sql = get_table()
  301. if tbl is None:
  302. return None
  303. if tbl.get("error") and tried_times <= 2:
  304. user_promt = """
  305. 表名:{};
  306. 数据库表字段说明如下:
  307. {}
  308. 问题如下:
  309. {}
  310. 你上一次给出的错误SQL如下:
  311. {}
  312. 后台报错如下:
  313. {}
  314. 请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
  315. """.format(
  316. index_name(tenant_id),
  317. "\n".join([f"{k}: {v}" for k, v in field_map.items()]),
  318. question, sql, tbl["error"]
  319. )
  320. tbl, sql = get_table()
  321. logger.info("TRY it again: {}".format(sql))
  322. logger.info("GET table: {}".format(tbl))
  323. if tbl.get("error") or len(tbl["rows"]) == 0:
  324. return None
  325. docid_idx = set([ii for ii, c in enumerate(
  326. tbl["columns"]) if c["name"] == "doc_id"])
  327. docnm_idx = set([ii for ii, c in enumerate(
  328. tbl["columns"]) if c["name"] == "docnm_kwd"])
  329. clmn_idx = [ii for ii in range(
  330. len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
  331. # compose markdown table
  332. clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
  333. tbl["columns"][i]["name"])) for i in
  334. clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
  335. line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
  336. ("|------|" if docid_idx and docid_idx else "")
  337. rows = ["|" +
  338. "|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
  339. "|" for r in tbl["rows"]]
  340. rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
  341. if quota:
  342. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  343. else:
  344. rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
  345. rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
  346. if not docid_idx or not docnm_idx:
  347. logger.warning("SQL missing field: " + sql)
  348. return {
  349. "answer": "\n".join([clmns, line, rows]),
  350. "reference": {"chunks": [], "doc_aggs": []},
  351. "prompt": sys_prompt
  352. }
  353. docid_idx = list(docid_idx)[0]
  354. docnm_idx = list(docnm_idx)[0]
  355. doc_aggs = {}
  356. for r in tbl["rows"]:
  357. if r[docid_idx] not in doc_aggs:
  358. doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
  359. doc_aggs[r[docid_idx]]["count"] += 1
  360. return {
  361. "answer": "\n".join([clmns, line, rows]),
  362. "reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
  363. "doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
  364. doc_aggs.items()]},
  365. "prompt": sys_prompt
  366. }
  367. def relevant(tenant_id, llm_id, question, contents: list):
  368. if llm_id2llm_type(llm_id) == "image2text":
  369. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  370. else:
  371. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  372. prompt = """
  373. You are a grader assessing relevance of a retrieved document to a user question.
  374. It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
  375. If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
  376. Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
  377. No other words needed except 'yes' or 'no'.
  378. """
  379. if not contents:return False
  380. contents = "Documents: \n" + " - ".join(contents)
  381. contents = f"Question: {question}\n" + contents
  382. if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
  383. contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
  384. ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
  385. if ans.lower().find("yes") >= 0: return True
  386. return False
  387. def rewrite(tenant_id, llm_id, question):
  388. if llm_id2llm_type(llm_id) == "image2text":
  389. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  390. else:
  391. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  392. prompt = """
  393. You are an expert at query expansion to generate a paraphrasing of a question.
  394. I can't retrieval relevant information from the knowledge base by using user's question directly.
  395. You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
  396. writing the abbreviation in its entirety, adding some extra descriptions or explanations,
  397. changing the way of expression, translating the original question into another language (English/Chinese), etc.
  398. And return 5 versions of question and one is from translation.
  399. Just list the question. No other words are needed.
  400. """
  401. ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
  402. return ans
  403. def keyword_extraction(chat_mdl, content, topn=3):
  404. prompt = f"""
  405. Role: You're a text analyzer.
  406. Task: extract the most important keywords/phrases of a given piece of text content.
  407. Requirements:
  408. - Summarize the text content, and give top {topn} important keywords/phrases.
  409. - The keywords MUST be in language of the given piece of text content.
  410. - The keywords are delimited by ENGLISH COMMA.
  411. - Keywords ONLY in output.
  412. ### Text Content
  413. {content}
  414. """
  415. msg = [
  416. {"role": "system", "content": prompt},
  417. {"role": "user", "content": "Output: "}
  418. ]
  419. _, msg = message_fit_in(msg, chat_mdl.max_length)
  420. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  421. if isinstance(kwd, tuple): kwd = kwd[0]
  422. if kwd.find("**ERROR**") >=0: return ""
  423. return kwd
  424. def question_proposal(chat_mdl, content, topn=3):
  425. prompt = f"""
  426. Role: You're a text analyzer.
  427. Task: propose {topn} questions about a given piece of text content.
  428. Requirements:
  429. - Understand and summarize the text content, and propose top {topn} important questions.
  430. - The questions SHOULD NOT have overlapping meanings.
  431. - The questions SHOULD cover the main content of the text as much as possible.
  432. - The questions MUST be in language of the given piece of text content.
  433. - One question per line.
  434. - Question ONLY in output.
  435. ### Text Content
  436. {content}
  437. """
  438. msg = [
  439. {"role": "system", "content": prompt},
  440. {"role": "user", "content": "Output: "}
  441. ]
  442. _, msg = message_fit_in(msg, chat_mdl.max_length)
  443. kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
  444. if isinstance(kwd, tuple): kwd = kwd[0]
  445. if kwd.find("**ERROR**") >= 0: return ""
  446. return kwd
  447. def full_question(tenant_id, llm_id, messages):
  448. if llm_id2llm_type(llm_id) == "image2text":
  449. chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
  450. else:
  451. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
  452. conv = []
  453. for m in messages:
  454. if m["role"] not in ["user", "assistant"]: continue
  455. conv.append("{}: {}".format(m["role"].upper(), m["content"]))
  456. conv = "\n".join(conv)
  457. prompt = f"""
  458. Role: A helpful assistant
  459. Task: Generate a full user question that would follow the conversation.
  460. Requirements & Restrictions:
  461. - Text generated MUST be in the same language of the original user's question.
  462. - If the user's latest question is completely, don't do anything, just return the original question.
  463. - DON'T generate anything except a refined question.
  464. ######################
  465. -Examples-
  466. ######################
  467. # Example 1
  468. ## Conversation
  469. USER: What is the name of Donald Trump's father?
  470. ASSISTANT: Fred Trump.
  471. USER: And his mother?
  472. ###############
  473. Output: What's the name of Donald Trump's mother?
  474. ------------
  475. # Example 2
  476. ## Conversation
  477. USER: What is the name of Donald Trump's father?
  478. ASSISTANT: Fred Trump.
  479. USER: And his mother?
  480. ASSISTANT: Mary Trump.
  481. User: What's her full name?
  482. ###############
  483. Output: What's the full name of Donald Trump's mother Mary Trump?
  484. ######################
  485. # Real Data
  486. ## Conversation
  487. {conv}
  488. ###############
  489. """
  490. ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
  491. return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
  492. def tts(tts_mdl, text):
  493. if not tts_mdl or not text: return
  494. bin = b""
  495. for chunk in tts_mdl.tts(text):
  496. bin += chunk
  497. return binascii.hexlify(bin).decode("utf-8")
  498. def ask(question, kb_ids, tenant_id):
  499. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  500. embd_nms = list(set([kb.embd_id for kb in kbs]))
  501. is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
  502. retr = retrievaler if not is_kg else kg_retrievaler
  503. embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
  504. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  505. max_tokens = chat_mdl.max_length
  506. kbinfos = retr.retrieval(question, embd_mdl, tenant_id, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
  507. knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
  508. used_token_count = 0
  509. for i, c in enumerate(knowledges):
  510. used_token_count += num_tokens_from_string(c)
  511. if max_tokens * 0.97 < used_token_count:
  512. knowledges = knowledges[:i]
  513. break
  514. prompt = """
  515. Role: You're a smart assistant. Your name is Miss R.
  516. Task: Summarize the information from knowledge bases and answer user's question.
  517. Requirements and restriction:
  518. - DO NOT make things up, especially for numbers.
  519. - If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
  520. - Answer with markdown format text.
  521. - Answer in language of user's question.
  522. - DO NOT make things up, especially for numbers.
  523. ### Information from knowledge bases
  524. %s
  525. The above is information from knowledge bases.
  526. """%"\n".join(knowledges)
  527. msg = [{"role": "user", "content": question}]
  528. def decorate_answer(answer):
  529. nonlocal knowledges, kbinfos, prompt
  530. answer, idx = retr.insert_citations(answer,
  531. [ck["content_ltks"]
  532. for ck in kbinfos["chunks"]],
  533. [ck["vector"]
  534. for ck in kbinfos["chunks"]],
  535. embd_mdl,
  536. tkweight=0.7,
  537. vtweight=0.3)
  538. idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
  539. recall_docs = [
  540. d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
  541. if not recall_docs: recall_docs = kbinfos["doc_aggs"]
  542. kbinfos["doc_aggs"] = recall_docs
  543. refs = deepcopy(kbinfos)
  544. for c in refs["chunks"]:
  545. if c.get("vector"):
  546. del c["vector"]
  547. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  548. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  549. return {"answer": answer, "reference": refs}
  550. answer = ""
  551. for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
  552. answer = ans
  553. yield {"answer": answer, "reference": {}}
  554. yield decorate_answer(answer)