You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

conversation_app.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. import traceback
  19. from copy import deepcopy
  20. from api.db.services.conversation_service import ConversationService
  21. from api.db.services.user_service import UserTenantService
  22. from flask import request, Response
  23. from flask_login import login_required, current_user
  24. from api.db import LLMType
  25. from api.db.services.dialog_service import DialogService, chat, ask
  26. from api.db.services.knowledgebase_service import KnowledgebaseService
  27. from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
  28. from api import settings
  29. from api.utils.api_utils import get_json_result
  30. from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
  31. from graphrag.mind_map_extractor import MindMapExtractor
  32. @manager.route('/set', methods=['POST']) # noqa: F821
  33. @login_required
  34. def set_conversation():
  35. req = request.json
  36. conv_id = req.get("conversation_id")
  37. is_new = req.get("is_new")
  38. del req["is_new"]
  39. if not is_new:
  40. del req["conversation_id"]
  41. try:
  42. if not ConversationService.update_by_id(conv_id, req):
  43. return get_data_error_result(message="Conversation not found!")
  44. e, conv = ConversationService.get_by_id(conv_id)
  45. if not e:
  46. return get_data_error_result(
  47. message="Fail to update a conversation!")
  48. conv = conv.to_dict()
  49. return get_json_result(data=conv)
  50. except Exception as e:
  51. return server_error_response(e)
  52. try:
  53. e, dia = DialogService.get_by_id(req["dialog_id"])
  54. if not e:
  55. return get_data_error_result(message="Dialog not found")
  56. conv = {
  57. "id": conv_id,
  58. "dialog_id": req["dialog_id"],
  59. "name": req.get("name", "New conversation"),
  60. "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
  61. }
  62. ConversationService.save(**conv)
  63. e, conv = ConversationService.get_by_id(conv["id"])
  64. if not e:
  65. return get_data_error_result(message="Fail to new a conversation!")
  66. conv = conv.to_dict()
  67. return get_json_result(data=conv)
  68. except Exception as e:
  69. return server_error_response(e)
  70. @manager.route('/get', methods=['GET']) # noqa: F821
  71. @login_required
  72. def get():
  73. conv_id = request.args["conversation_id"]
  74. try:
  75. e, conv = ConversationService.get_by_id(conv_id)
  76. if not e:
  77. return get_data_error_result(message="Conversation not found!")
  78. tenants = UserTenantService.query(user_id=current_user.id)
  79. for tenant in tenants:
  80. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  81. break
  82. else:
  83. return get_json_result(
  84. data=False, message='Only owner of conversation authorized for this operation.',
  85. code=settings.RetCode.OPERATING_ERROR)
  86. conv = conv.to_dict()
  87. return get_json_result(data=conv)
  88. except Exception as e:
  89. return server_error_response(e)
  90. @manager.route('/rm', methods=['POST']) # noqa: F821
  91. @login_required
  92. def rm():
  93. conv_ids = request.json["conversation_ids"]
  94. try:
  95. for cid in conv_ids:
  96. exist, conv = ConversationService.get_by_id(cid)
  97. if not exist:
  98. return get_data_error_result(message="Conversation not found!")
  99. tenants = UserTenantService.query(user_id=current_user.id)
  100. for tenant in tenants:
  101. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  102. break
  103. else:
  104. return get_json_result(
  105. data=False, message='Only owner of conversation authorized for this operation.',
  106. code=settings.RetCode.OPERATING_ERROR)
  107. ConversationService.delete_by_id(cid)
  108. return get_json_result(data=True)
  109. except Exception as e:
  110. return server_error_response(e)
  111. @manager.route('/list', methods=['GET']) # noqa: F821
  112. @login_required
  113. def list_convsersation():
  114. dialog_id = request.args["dialog_id"]
  115. try:
  116. if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
  117. return get_json_result(
  118. data=False, message='Only owner of dialog authorized for this operation.',
  119. code=settings.RetCode.OPERATING_ERROR)
  120. convs = ConversationService.query(
  121. dialog_id=dialog_id,
  122. order_by=ConversationService.model.create_time,
  123. reverse=True)
  124. convs = [d.to_dict() for d in convs]
  125. return get_json_result(data=convs)
  126. except Exception as e:
  127. return server_error_response(e)
  128. @manager.route('/completion', methods=['POST']) # noqa: F821
  129. @login_required
  130. @validate_request("conversation_id", "messages")
  131. def completion():
  132. req = request.json
  133. msg = []
  134. for m in req["messages"]:
  135. if m["role"] == "system":
  136. continue
  137. if m["role"] == "assistant" and not msg:
  138. continue
  139. msg.append(m)
  140. message_id = msg[-1].get("id")
  141. try:
  142. e, conv = ConversationService.get_by_id(req["conversation_id"])
  143. if not e:
  144. return get_data_error_result(message="Conversation not found!")
  145. conv.message = deepcopy(req["messages"])
  146. e, dia = DialogService.get_by_id(conv.dialog_id)
  147. if not e:
  148. return get_data_error_result(message="Dialog not found!")
  149. del req["conversation_id"]
  150. del req["messages"]
  151. if not conv.reference:
  152. conv.reference = []
  153. conv.message.append({"role": "assistant", "content": "", "id": message_id})
  154. conv.reference.append({"chunks": [], "doc_aggs": []})
  155. def fillin_conv(ans):
  156. nonlocal conv, message_id
  157. if not conv.reference:
  158. conv.reference.append(ans["reference"])
  159. else:
  160. conv.reference[-1] = ans["reference"]
  161. conv.message[-1] = {"role": "assistant", "content": ans["answer"],
  162. "id": message_id, "prompt": ans.get("prompt", "")}
  163. ans["id"] = message_id
  164. def stream():
  165. nonlocal dia, msg, req, conv
  166. try:
  167. for ans in chat(dia, msg, True, **req):
  168. fillin_conv(ans)
  169. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  170. ConversationService.update_by_id(conv.id, conv.to_dict())
  171. except Exception as e:
  172. traceback.print_exc()
  173. yield "data:" + json.dumps({"code": 500, "message": str(e),
  174. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  175. ensure_ascii=False) + "\n\n"
  176. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  177. if req.get("stream", True):
  178. resp = Response(stream(), mimetype="text/event-stream")
  179. resp.headers.add_header("Cache-control", "no-cache")
  180. resp.headers.add_header("Connection", "keep-alive")
  181. resp.headers.add_header("X-Accel-Buffering", "no")
  182. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  183. return resp
  184. else:
  185. answer = None
  186. for ans in chat(dia, msg, **req):
  187. answer = ans
  188. fillin_conv(ans)
  189. ConversationService.update_by_id(conv.id, conv.to_dict())
  190. break
  191. return get_json_result(data=answer)
  192. except Exception as e:
  193. return server_error_response(e)
  194. @manager.route('/tts', methods=['POST']) # noqa: F821
  195. @login_required
  196. def tts():
  197. req = request.json
  198. text = req["text"]
  199. tenants = TenantService.get_info_by(current_user.id)
  200. if not tenants:
  201. return get_data_error_result(message="Tenant not found!")
  202. tts_id = tenants[0]["tts_id"]
  203. if not tts_id:
  204. return get_data_error_result(message="No default TTS model is set")
  205. tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
  206. def stream_audio():
  207. try:
  208. for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text):
  209. for chunk in tts_mdl.tts(txt):
  210. yield chunk
  211. except Exception as e:
  212. yield ("data:" + json.dumps({"code": 500, "message": str(e),
  213. "data": {"answer": "**ERROR**: " + str(e)}},
  214. ensure_ascii=False)).encode('utf-8')
  215. resp = Response(stream_audio(), mimetype="audio/mpeg")
  216. resp.headers.add_header("Cache-Control", "no-cache")
  217. resp.headers.add_header("Connection", "keep-alive")
  218. resp.headers.add_header("X-Accel-Buffering", "no")
  219. return resp
  220. @manager.route('/delete_msg', methods=['POST']) # noqa: F821
  221. @login_required
  222. @validate_request("conversation_id", "message_id")
  223. def delete_msg():
  224. req = request.json
  225. e, conv = ConversationService.get_by_id(req["conversation_id"])
  226. if not e:
  227. return get_data_error_result(message="Conversation not found!")
  228. conv = conv.to_dict()
  229. for i, msg in enumerate(conv["message"]):
  230. if req["message_id"] != msg.get("id", ""):
  231. continue
  232. assert conv["message"][i + 1]["id"] == req["message_id"]
  233. conv["message"].pop(i)
  234. conv["message"].pop(i)
  235. conv["reference"].pop(max(0, i // 2 - 1))
  236. break
  237. ConversationService.update_by_id(conv["id"], conv)
  238. return get_json_result(data=conv)
  239. @manager.route('/thumbup', methods=['POST']) # noqa: F821
  240. @login_required
  241. @validate_request("conversation_id", "message_id")
  242. def thumbup():
  243. req = request.json
  244. e, conv = ConversationService.get_by_id(req["conversation_id"])
  245. if not e:
  246. return get_data_error_result(message="Conversation not found!")
  247. up_down = req.get("set")
  248. feedback = req.get("feedback", "")
  249. conv = conv.to_dict()
  250. for i, msg in enumerate(conv["message"]):
  251. if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
  252. if up_down:
  253. msg["thumbup"] = True
  254. if "feedback" in msg:
  255. del msg["feedback"]
  256. else:
  257. msg["thumbup"] = False
  258. if feedback:
  259. msg["feedback"] = feedback
  260. break
  261. ConversationService.update_by_id(conv["id"], conv)
  262. return get_json_result(data=conv)
  263. @manager.route('/ask', methods=['POST']) # noqa: F821
  264. @login_required
  265. @validate_request("question", "kb_ids")
  266. def ask_about():
  267. req = request.json
  268. uid = current_user.id
  269. def stream():
  270. nonlocal req, uid
  271. try:
  272. for ans in ask(req["question"], req["kb_ids"], uid):
  273. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  274. except Exception as e:
  275. yield "data:" + json.dumps({"code": 500, "message": str(e),
  276. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  277. ensure_ascii=False) + "\n\n"
  278. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  279. resp = Response(stream(), mimetype="text/event-stream")
  280. resp.headers.add_header("Cache-control", "no-cache")
  281. resp.headers.add_header("Connection", "keep-alive")
  282. resp.headers.add_header("X-Accel-Buffering", "no")
  283. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  284. return resp
  285. @manager.route('/mindmap', methods=['POST']) # noqa: F821
  286. @login_required
  287. @validate_request("question", "kb_ids")
  288. def mindmap():
  289. req = request.json
  290. kb_ids = req["kb_ids"]
  291. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  292. if not e:
  293. return get_data_error_result(message="Knowledgebase not found!")
  294. embd_mdl = TenantLLMService.model_instance(
  295. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  296. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  297. ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
  298. 0.3, 0.3, aggs=False)
  299. mindmap = MindMapExtractor(chat_mdl)
  300. mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
  301. if "error" in mind_map:
  302. return server_error_response(Exception(mind_map["error"]))
  303. return get_json_result(data=mind_map)
  304. @manager.route('/related_questions', methods=['POST']) # noqa: F821
  305. @login_required
  306. @validate_request("question")
  307. def related_questions():
  308. req = request.json
  309. question = req["question"]
  310. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  311. prompt = """
  312. Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
  313. Instructions:
  314. - Based on the keywords provided by the user, generate 5-10 related search terms.
  315. - Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
  316. - Use common, general terms as much as possible, avoiding obscure words or technical jargon.
  317. - Keep the term length between 2-4 words, concise and clear.
  318. - DO NOT translate, use the language of the original keywords.
  319. ### Example:
  320. Keywords: Chinese football
  321. Related search terms:
  322. 1. Current status of Chinese football
  323. 2. Reform of Chinese football
  324. 3. Youth training of Chinese football
  325. 4. Chinese football in the Asian Cup
  326. 5. Chinese football in the World Cup
  327. Reason:
  328. - When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
  329. - Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
  330. - At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
  331. """
  332. ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
  333. Keywords: {question}
  334. Related search terms:
  335. """}], {"temperature": 0.9})
  336. return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])