You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

conversation_app.py 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. from copy import deepcopy
  19. from api.db.services.user_service import UserTenantService
  20. from flask import request, Response
  21. from flask_login import login_required, current_user
  22. from api.db import LLMType
  23. from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
  24. from api.db.services.knowledgebase_service import KnowledgebaseService
  25. from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
  26. from api.settings import RetCode, retrievaler
  27. from api.utils import get_uuid
  28. from api.utils.api_utils import get_json_result
  29. from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
  30. from graphrag.mind_map_extractor import MindMapExtractor
  31. @manager.route('/set', methods=['POST'])
  32. @login_required
  33. def set_conversation():
  34. req = request.json
  35. conv_id = req.get("conversation_id")
  36. if conv_id:
  37. del req["conversation_id"]
  38. try:
  39. if not ConversationService.update_by_id(conv_id, req):
  40. return get_data_error_result(retmsg="Conversation not found!")
  41. e, conv = ConversationService.get_by_id(conv_id)
  42. if not e:
  43. return get_data_error_result(
  44. retmsg="Fail to update a conversation!")
  45. conv = conv.to_dict()
  46. return get_json_result(data=conv)
  47. except Exception as e:
  48. return server_error_response(e)
  49. try:
  50. e, dia = DialogService.get_by_id(req["dialog_id"])
  51. if not e:
  52. return get_data_error_result(retmsg="Dialog not found")
  53. conv = {
  54. "id": get_uuid(),
  55. "dialog_id": req["dialog_id"],
  56. "name": req.get("name", "New conversation"),
  57. "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
  58. }
  59. ConversationService.save(**conv)
  60. e, conv = ConversationService.get_by_id(conv["id"])
  61. if not e:
  62. return get_data_error_result(retmsg="Fail to new a conversation!")
  63. conv = conv.to_dict()
  64. return get_json_result(data=conv)
  65. except Exception as e:
  66. return server_error_response(e)
  67. @manager.route('/get', methods=['GET'])
  68. @login_required
  69. def get():
  70. conv_id = request.args["conversation_id"]
  71. try:
  72. e, conv = ConversationService.get_by_id(conv_id)
  73. if not e:
  74. return get_data_error_result(retmsg="Conversation not found!")
  75. tenants = UserTenantService.query(user_id=current_user.id)
  76. for tenant in tenants:
  77. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  78. break
  79. else:
  80. return get_json_result(
  81. data=False, retmsg=f'Only owner of conversation authorized for this operation.',
  82. retcode=RetCode.OPERATING_ERROR)
  83. conv = conv.to_dict()
  84. return get_json_result(data=conv)
  85. except Exception as e:
  86. return server_error_response(e)
  87. @manager.route('/rm', methods=['POST'])
  88. @login_required
  89. def rm():
  90. conv_ids = request.json["conversation_ids"]
  91. try:
  92. for cid in conv_ids:
  93. exist, conv = ConversationService.get_by_id(cid)
  94. if not exist:
  95. return get_data_error_result(retmsg="Conversation not found!")
  96. tenants = UserTenantService.query(user_id=current_user.id)
  97. for tenant in tenants:
  98. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  99. break
  100. else:
  101. return get_json_result(
  102. data=False, retmsg=f'Only owner of conversation authorized for this operation.',
  103. retcode=RetCode.OPERATING_ERROR)
  104. ConversationService.delete_by_id(cid)
  105. return get_json_result(data=True)
  106. except Exception as e:
  107. return server_error_response(e)
  108. @manager.route('/list', methods=['GET'])
  109. @login_required
  110. def list_convsersation():
  111. dialog_id = request.args["dialog_id"]
  112. try:
  113. if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
  114. return get_json_result(
  115. data=False, retmsg=f'Only owner of dialog authorized for this operation.',
  116. retcode=RetCode.OPERATING_ERROR)
  117. convs = ConversationService.query(
  118. dialog_id=dialog_id,
  119. order_by=ConversationService.model.create_time,
  120. reverse=True)
  121. convs = [d.to_dict() for d in convs]
  122. return get_json_result(data=convs)
  123. except Exception as e:
  124. return server_error_response(e)
  125. @manager.route('/completion', methods=['POST'])
  126. @login_required
  127. @validate_request("conversation_id", "messages")
  128. def completion():
  129. req = request.json
  130. # req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
  131. # {"role": "user", "content": "上海有吗?"}
  132. # ]}
  133. msg = []
  134. for m in req["messages"]:
  135. if m["role"] == "system":
  136. continue
  137. if m["role"] == "assistant" and not msg:
  138. continue
  139. msg.append(m)
  140. message_id = msg[-1].get("id")
  141. try:
  142. e, conv = ConversationService.get_by_id(req["conversation_id"])
  143. if not e:
  144. return get_data_error_result(retmsg="Conversation not found!")
  145. conv.message = deepcopy(req["messages"])
  146. e, dia = DialogService.get_by_id(conv.dialog_id)
  147. if not e:
  148. return get_data_error_result(retmsg="Dialog not found!")
  149. del req["conversation_id"]
  150. del req["messages"]
  151. if not conv.reference:
  152. conv.reference = []
  153. conv.message.append({"role": "assistant", "content": "", "id": message_id})
  154. conv.reference.append({"chunks": [], "doc_aggs": []})
  155. def fillin_conv(ans):
  156. nonlocal conv, message_id
  157. if not conv.reference:
  158. conv.reference.append(ans["reference"])
  159. else:
  160. conv.reference[-1] = ans["reference"]
  161. conv.message[-1] = {"role": "assistant", "content": ans["answer"],
  162. "id": message_id, "prompt": ans.get("prompt", "")}
  163. ans["id"] = message_id
  164. def stream():
  165. nonlocal dia, msg, req, conv
  166. try:
  167. for ans in chat(dia, msg, True, **req):
  168. fillin_conv(ans)
  169. yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
  170. ConversationService.update_by_id(conv.id, conv.to_dict())
  171. except Exception as e:
  172. yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
  173. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  174. ensure_ascii=False) + "\n\n"
  175. yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
  176. if req.get("stream", True):
  177. resp = Response(stream(), mimetype="text/event-stream")
  178. resp.headers.add_header("Cache-control", "no-cache")
  179. resp.headers.add_header("Connection", "keep-alive")
  180. resp.headers.add_header("X-Accel-Buffering", "no")
  181. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  182. return resp
  183. else:
  184. answer = None
  185. for ans in chat(dia, msg, **req):
  186. answer = ans
  187. fillin_conv(ans)
  188. ConversationService.update_by_id(conv.id, conv.to_dict())
  189. break
  190. return get_json_result(data=answer)
  191. except Exception as e:
  192. return server_error_response(e)
  193. @manager.route('/tts', methods=['POST'])
  194. @login_required
  195. def tts():
  196. req = request.json
  197. text = req["text"]
  198. tenants = TenantService.get_by_user_id(current_user.id)
  199. if not tenants:
  200. return get_data_error_result(retmsg="Tenant not found!")
  201. tts_id = tenants[0]["tts_id"]
  202. if not tts_id:
  203. return get_data_error_result(retmsg="No default TTS model is set")
  204. tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
  205. def stream_audio():
  206. try:
  207. for chunk in tts_mdl.tts(text):
  208. yield chunk
  209. except Exception as e:
  210. yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
  211. "data": {"answer": "**ERROR**: " + str(e)}},
  212. ensure_ascii=False)).encode('utf-8')
  213. resp = Response(stream_audio(), mimetype="audio/mpeg")
  214. resp.headers.add_header("Cache-Control", "no-cache")
  215. resp.headers.add_header("Connection", "keep-alive")
  216. resp.headers.add_header("X-Accel-Buffering", "no")
  217. return resp
  218. @manager.route('/delete_msg', methods=['POST'])
  219. @login_required
  220. @validate_request("conversation_id", "message_id")
  221. def delete_msg():
  222. req = request.json
  223. e, conv = ConversationService.get_by_id(req["conversation_id"])
  224. if not e:
  225. return get_data_error_result(retmsg="Conversation not found!")
  226. conv = conv.to_dict()
  227. for i, msg in enumerate(conv["message"]):
  228. if req["message_id"] != msg.get("id", ""):
  229. continue
  230. assert conv["message"][i + 1]["id"] == req["message_id"]
  231. conv["message"].pop(i)
  232. conv["message"].pop(i)
  233. conv["reference"].pop(max(0, i // 2 - 1))
  234. break
  235. ConversationService.update_by_id(conv["id"], conv)
  236. return get_json_result(data=conv)
  237. @manager.route('/thumbup', methods=['POST'])
  238. @login_required
  239. @validate_request("conversation_id", "message_id")
  240. def thumbup():
  241. req = request.json
  242. e, conv = ConversationService.get_by_id(req["conversation_id"])
  243. if not e:
  244. return get_data_error_result(retmsg="Conversation not found!")
  245. up_down = req.get("set")
  246. feedback = req.get("feedback", "")
  247. conv = conv.to_dict()
  248. for i, msg in enumerate(conv["message"]):
  249. if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
  250. if up_down:
  251. msg["thumbup"] = True
  252. if "feedback" in msg: del msg["feedback"]
  253. else:
  254. msg["thumbup"] = False
  255. if feedback: msg["feedback"] = feedback
  256. break
  257. ConversationService.update_by_id(conv["id"], conv)
  258. return get_json_result(data=conv)
  259. @manager.route('/ask', methods=['POST'])
  260. @login_required
  261. @validate_request("question", "kb_ids")
  262. def ask_about():
  263. req = request.json
  264. uid = current_user.id
  265. def stream():
  266. nonlocal req, uid
  267. try:
  268. for ans in ask(req["question"], req["kb_ids"], uid):
  269. yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
  270. except Exception as e:
  271. yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
  272. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  273. ensure_ascii=False) + "\n\n"
  274. yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
  275. resp = Response(stream(), mimetype="text/event-stream")
  276. resp.headers.add_header("Cache-control", "no-cache")
  277. resp.headers.add_header("Connection", "keep-alive")
  278. resp.headers.add_header("X-Accel-Buffering", "no")
  279. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  280. return resp
  281. @manager.route('/mindmap', methods=['POST'])
  282. @login_required
  283. @validate_request("question", "kb_ids")
  284. def mindmap():
  285. req = request.json
  286. kb_ids = req["kb_ids"]
  287. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  288. if not e:
  289. return get_data_error_result(retmsg="Knowledgebase not found!")
  290. embd_mdl = TenantLLMService.model_instance(
  291. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  292. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  293. ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
  294. 0.3, 0.3, aggs=False)
  295. mindmap = MindMapExtractor(chat_mdl)
  296. mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
  297. return get_json_result(data=mind_map)
  298. @manager.route('/related_questions', methods=['POST'])
  299. @login_required
  300. @validate_request("question")
  301. def related_questions():
  302. req = request.json
  303. question = req["question"]
  304. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  305. prompt = """
  306. Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
  307. Instructions:
  308. - Based on the keywords provided by the user, generate 5-10 related search terms.
  309. - Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
  310. - Use common, general terms as much as possible, avoiding obscure words or technical jargon.
  311. - Keep the term length between 2-4 words, concise and clear.
  312. - DO NOT translate, use the language of the original keywords.
  313. ### Example:
  314. Keywords: Chinese football
  315. Related search terms:
  316. 1. Current status of Chinese football
  317. 2. Reform of Chinese football
  318. 3. Youth training of Chinese football
  319. 4. Chinese football in the Asian Cup
  320. 5. Chinese football in the World Cup
  321. Reason:
  322. - When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
  323. - Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
  324. - At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
  325. """
  326. ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
  327. Keywords: {question}
  328. Related search terms:
  329. """}], {"temperature": 0.9})
  330. return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])