Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

conversation_app.py 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. import traceback
  19. from copy import deepcopy
  20. from api.db.services.user_service import UserTenantService
  21. from flask import request, Response
  22. from flask_login import login_required, current_user
  23. from api.db import LLMType
  24. from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
  25. from api.db.services.knowledgebase_service import KnowledgebaseService
  26. from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
  27. from api import settings
  28. from api.utils.api_utils import get_json_result
  29. from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
  30. from graphrag.mind_map_extractor import MindMapExtractor
  31. @manager.route('/set', methods=['POST'])
  32. @login_required
  33. def set_conversation():
  34. req = request.json
  35. conv_id = req.get("conversation_id")
  36. is_new = req.get("is_new")
  37. del req["is_new"]
  38. if not is_new:
  39. del req["conversation_id"]
  40. try:
  41. if not ConversationService.update_by_id(conv_id, req):
  42. return get_data_error_result(message="Conversation not found!")
  43. e, conv = ConversationService.get_by_id(conv_id)
  44. if not e:
  45. return get_data_error_result(
  46. message="Fail to update a conversation!")
  47. conv = conv.to_dict()
  48. return get_json_result(data=conv)
  49. except Exception as e:
  50. return server_error_response(e)
  51. try:
  52. e, dia = DialogService.get_by_id(req["dialog_id"])
  53. if not e:
  54. return get_data_error_result(message="Dialog not found")
  55. conv = {
  56. "id": conv_id,
  57. "dialog_id": req["dialog_id"],
  58. "name": req.get("name", "New conversation"),
  59. "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
  60. }
  61. ConversationService.save(**conv)
  62. e, conv = ConversationService.get_by_id(conv["id"])
  63. if not e:
  64. return get_data_error_result(message="Fail to new a conversation!")
  65. conv = conv.to_dict()
  66. return get_json_result(data=conv)
  67. except Exception as e:
  68. return server_error_response(e)
  69. @manager.route('/get', methods=['GET'])
  70. @login_required
  71. def get():
  72. conv_id = request.args["conversation_id"]
  73. try:
  74. e, conv = ConversationService.get_by_id(conv_id)
  75. if not e:
  76. return get_data_error_result(message="Conversation not found!")
  77. tenants = UserTenantService.query(user_id=current_user.id)
  78. for tenant in tenants:
  79. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  80. break
  81. else:
  82. return get_json_result(
  83. data=False, message='Only owner of conversation authorized for this operation.',
  84. code=settings.RetCode.OPERATING_ERROR)
  85. conv = conv.to_dict()
  86. return get_json_result(data=conv)
  87. except Exception as e:
  88. return server_error_response(e)
  89. @manager.route('/rm', methods=['POST'])
  90. @login_required
  91. def rm():
  92. conv_ids = request.json["conversation_ids"]
  93. try:
  94. for cid in conv_ids:
  95. exist, conv = ConversationService.get_by_id(cid)
  96. if not exist:
  97. return get_data_error_result(message="Conversation not found!")
  98. tenants = UserTenantService.query(user_id=current_user.id)
  99. for tenant in tenants:
  100. if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
  101. break
  102. else:
  103. return get_json_result(
  104. data=False, message='Only owner of conversation authorized for this operation.',
  105. code=settings.RetCode.OPERATING_ERROR)
  106. ConversationService.delete_by_id(cid)
  107. return get_json_result(data=True)
  108. except Exception as e:
  109. return server_error_response(e)
  110. @manager.route('/list', methods=['GET'])
  111. @login_required
  112. def list_convsersation():
  113. dialog_id = request.args["dialog_id"]
  114. try:
  115. if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
  116. return get_json_result(
  117. data=False, message='Only owner of dialog authorized for this operation.',
  118. code=settings.RetCode.OPERATING_ERROR)
  119. convs = ConversationService.query(
  120. dialog_id=dialog_id,
  121. order_by=ConversationService.model.create_time,
  122. reverse=True)
  123. convs = [d.to_dict() for d in convs]
  124. return get_json_result(data=convs)
  125. except Exception as e:
  126. return server_error_response(e)
  127. @manager.route('/completion', methods=['POST'])
  128. @login_required
  129. @validate_request("conversation_id", "messages")
  130. def completion():
  131. req = request.json
  132. msg = []
  133. for m in req["messages"]:
  134. if m["role"] == "system":
  135. continue
  136. if m["role"] == "assistant" and not msg:
  137. continue
  138. msg.append(m)
  139. message_id = msg[-1].get("id")
  140. try:
  141. e, conv = ConversationService.get_by_id(req["conversation_id"])
  142. if not e:
  143. return get_data_error_result(message="Conversation not found!")
  144. conv.message = deepcopy(req["messages"])
  145. e, dia = DialogService.get_by_id(conv.dialog_id)
  146. if not e:
  147. return get_data_error_result(message="Dialog not found!")
  148. del req["conversation_id"]
  149. del req["messages"]
  150. if not conv.reference:
  151. conv.reference = []
  152. conv.message.append({"role": "assistant", "content": "", "id": message_id})
  153. conv.reference.append({"chunks": [], "doc_aggs": []})
  154. def fillin_conv(ans):
  155. nonlocal conv, message_id
  156. if not conv.reference:
  157. conv.reference.append(ans["reference"])
  158. else:
  159. conv.reference[-1] = ans["reference"]
  160. conv.message[-1] = {"role": "assistant", "content": ans["answer"],
  161. "id": message_id, "prompt": ans.get("prompt", "")}
  162. ans["id"] = message_id
  163. def stream():
  164. nonlocal dia, msg, req, conv
  165. try:
  166. for ans in chat(dia, msg, True, **req):
  167. fillin_conv(ans)
  168. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  169. ConversationService.update_by_id(conv.id, conv.to_dict())
  170. except Exception as e:
  171. traceback.print_exc()
  172. yield "data:" + json.dumps({"code": 500, "message": str(e),
  173. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  174. ensure_ascii=False) + "\n\n"
  175. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  176. if req.get("stream", True):
  177. resp = Response(stream(), mimetype="text/event-stream")
  178. resp.headers.add_header("Cache-control", "no-cache")
  179. resp.headers.add_header("Connection", "keep-alive")
  180. resp.headers.add_header("X-Accel-Buffering", "no")
  181. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  182. return resp
  183. else:
  184. answer = None
  185. for ans in chat(dia, msg, **req):
  186. answer = ans
  187. fillin_conv(ans)
  188. ConversationService.update_by_id(conv.id, conv.to_dict())
  189. break
  190. return get_json_result(data=answer)
  191. except Exception as e:
  192. return server_error_response(e)
  193. @manager.route('/tts', methods=['POST'])
  194. @login_required
  195. def tts():
  196. req = request.json
  197. text = req["text"]
  198. tenants = TenantService.get_info_by(current_user.id)
  199. if not tenants:
  200. return get_data_error_result(message="Tenant not found!")
  201. tts_id = tenants[0]["tts_id"]
  202. if not tts_id:
  203. return get_data_error_result(message="No default TTS model is set")
  204. tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
  205. def stream_audio():
  206. try:
  207. for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text):
  208. for chunk in tts_mdl.tts(txt):
  209. yield chunk
  210. except Exception as e:
  211. yield ("data:" + json.dumps({"code": 500, "message": str(e),
  212. "data": {"answer": "**ERROR**: " + str(e)}},
  213. ensure_ascii=False)).encode('utf-8')
  214. resp = Response(stream_audio(), mimetype="audio/mpeg")
  215. resp.headers.add_header("Cache-Control", "no-cache")
  216. resp.headers.add_header("Connection", "keep-alive")
  217. resp.headers.add_header("X-Accel-Buffering", "no")
  218. return resp
  219. @manager.route('/delete_msg', methods=['POST'])
  220. @login_required
  221. @validate_request("conversation_id", "message_id")
  222. def delete_msg():
  223. req = request.json
  224. e, conv = ConversationService.get_by_id(req["conversation_id"])
  225. if not e:
  226. return get_data_error_result(message="Conversation not found!")
  227. conv = conv.to_dict()
  228. for i, msg in enumerate(conv["message"]):
  229. if req["message_id"] != msg.get("id", ""):
  230. continue
  231. assert conv["message"][i + 1]["id"] == req["message_id"]
  232. conv["message"].pop(i)
  233. conv["message"].pop(i)
  234. conv["reference"].pop(max(0, i // 2 - 1))
  235. break
  236. ConversationService.update_by_id(conv["id"], conv)
  237. return get_json_result(data=conv)
  238. @manager.route('/thumbup', methods=['POST'])
  239. @login_required
  240. @validate_request("conversation_id", "message_id")
  241. def thumbup():
  242. req = request.json
  243. e, conv = ConversationService.get_by_id(req["conversation_id"])
  244. if not e:
  245. return get_data_error_result(message="Conversation not found!")
  246. up_down = req.get("set")
  247. feedback = req.get("feedback", "")
  248. conv = conv.to_dict()
  249. for i, msg in enumerate(conv["message"]):
  250. if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
  251. if up_down:
  252. msg["thumbup"] = True
  253. if "feedback" in msg: del msg["feedback"]
  254. else:
  255. msg["thumbup"] = False
  256. if feedback: msg["feedback"] = feedback
  257. break
  258. ConversationService.update_by_id(conv["id"], conv)
  259. return get_json_result(data=conv)
  260. @manager.route('/ask', methods=['POST'])
  261. @login_required
  262. @validate_request("question", "kb_ids")
  263. def ask_about():
  264. req = request.json
  265. uid = current_user.id
  266. def stream():
  267. nonlocal req, uid
  268. try:
  269. for ans in ask(req["question"], req["kb_ids"], uid):
  270. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  271. except Exception as e:
  272. yield "data:" + json.dumps({"code": 500, "message": str(e),
  273. "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
  274. ensure_ascii=False) + "\n\n"
  275. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  276. resp = Response(stream(), mimetype="text/event-stream")
  277. resp.headers.add_header("Cache-control", "no-cache")
  278. resp.headers.add_header("Connection", "keep-alive")
  279. resp.headers.add_header("X-Accel-Buffering", "no")
  280. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  281. return resp
  282. @manager.route('/mindmap', methods=['POST'])
  283. @login_required
  284. @validate_request("question", "kb_ids")
  285. def mindmap():
  286. req = request.json
  287. kb_ids = req["kb_ids"]
  288. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  289. if not e:
  290. return get_data_error_result(message="Knowledgebase not found!")
  291. embd_mdl = TenantLLMService.model_instance(
  292. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  293. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  294. ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
  295. 0.3, 0.3, aggs=False)
  296. mindmap = MindMapExtractor(chat_mdl)
  297. mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
  298. if "error" in mind_map:
  299. return server_error_response(Exception(mind_map["error"]))
  300. return get_json_result(data=mind_map)
  301. @manager.route('/related_questions', methods=['POST'])
  302. @login_required
  303. @validate_request("question")
  304. def related_questions():
  305. req = request.json
  306. question = req["question"]
  307. chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
  308. prompt = """
  309. Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
  310. Instructions:
  311. - Based on the keywords provided by the user, generate 5-10 related search terms.
  312. - Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
  313. - Use common, general terms as much as possible, avoiding obscure words or technical jargon.
  314. - Keep the term length between 2-4 words, concise and clear.
  315. - DO NOT translate, use the language of the original keywords.
  316. ### Example:
  317. Keywords: Chinese football
  318. Related search terms:
  319. 1. Current status of Chinese football
  320. 2. Reform of Chinese football
  321. 3. Youth training of Chinese football
  322. 4. Chinese football in the Asian Cup
  323. 5. Chinese football in the World Cup
  324. Reason:
  325. - When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
  326. - Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
  327. - At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
  328. """
  329. ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
  330. Keywords: {question}
  331. Related search terms:
  332. """}], {"temperature": 0.9})
  333. return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])