您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

session.py 44KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. import time
  19. import tiktoken
  20. from flask import Response, jsonify, request
  21. from agent.canvas import Canvas
  22. from api import settings
  23. from api.db import LLMType, StatusEnum
  24. from api.db.db_models import APIToken
  25. from api.db.services.api_service import API4ConversationService
  26. from api.db.services.canvas_service import UserCanvasService, completionOpenAI
  27. from api.db.services.canvas_service import completion as agent_completion
  28. from api.db.services.conversation_service import ConversationService, iframe_completion
  29. from api.db.services.conversation_service import completion as rag_completion
  30. from api.db.services.dialog_service import DialogService, ask, chat, gen_mindmap
  31. from api.db.services.knowledgebase_service import KnowledgebaseService
  32. from api.db.services.llm_service import LLMBundle
  33. from api.db.services.search_service import SearchService
  34. from api.db.services.user_service import UserTenantService
  35. from api.utils import get_uuid
  36. from api.utils.api_utils import check_duplicate_ids, get_data_openai, get_error_data_result, get_json_result, get_result, server_error_response, token_required, validate_request
  37. from rag.app.tag import label_question
  38. from rag.prompts import chunks_format
  39. from rag.prompts.prompt_template import load_prompt
  40. from rag.prompts.prompts import cross_languages, keyword_extraction
  41. @manager.route("/chats/<chat_id>/sessions", methods=["POST"]) # noqa: F821
  42. @token_required
  43. def create(tenant_id, chat_id):
  44. req = request.json
  45. req["dialog_id"] = chat_id
  46. dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
  47. if not dia:
  48. return get_error_data_result(message="You do not own the assistant.")
  49. conv = {
  50. "id": get_uuid(),
  51. "dialog_id": req["dialog_id"],
  52. "name": req.get("name", "New session"),
  53. "message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
  54. "user_id": req.get("user_id", ""),
  55. "reference": [{}],
  56. }
  57. if not conv.get("name"):
  58. return get_error_data_result(message="`name` can not be empty.")
  59. ConversationService.save(**conv)
  60. e, conv = ConversationService.get_by_id(conv["id"])
  61. if not e:
  62. return get_error_data_result(message="Fail to create a session!")
  63. conv = conv.to_dict()
  64. conv["messages"] = conv.pop("message")
  65. conv["chat_id"] = conv.pop("dialog_id")
  66. del conv["reference"]
  67. return get_result(data=conv)
  68. @manager.route("/agents/<agent_id>/sessions", methods=["POST"]) # noqa: F821
  69. @token_required
  70. def create_agent_session(tenant_id, agent_id):
  71. user_id = request.args.get("user_id", tenant_id)
  72. e, cvs = UserCanvasService.get_by_id(agent_id)
  73. if not e:
  74. return get_error_data_result("Agent not found.")
  75. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  76. return get_error_data_result("You cannot access the agent.")
  77. if not isinstance(cvs.dsl, str):
  78. cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
  79. session_id=get_uuid()
  80. canvas = Canvas(cvs.dsl, tenant_id, agent_id)
  81. canvas.reset()
  82. conv = {
  83. "id": session_id,
  84. "dialog_id": cvs.id,
  85. "user_id": user_id,
  86. "message": [],
  87. "source": "agent",
  88. "dsl": cvs.dsl
  89. }
  90. API4ConversationService.save(**conv)
  91. cvs.dsl = json.loads(str(canvas))
  92. conv = {"id": session_id, "dialog_id": cvs.id, "user_id": user_id, "message": [{"role": "assistant", "content": canvas.get_prologue()}], "source": "agent", "dsl": cvs.dsl}
  93. conv["agent_id"] = conv.pop("dialog_id")
  94. return get_result(data=conv)
  95. @manager.route("/chats/<chat_id>/sessions/<session_id>", methods=["PUT"]) # noqa: F821
  96. @token_required
  97. def update(tenant_id, chat_id, session_id):
  98. req = request.json
  99. req["dialog_id"] = chat_id
  100. conv_id = session_id
  101. conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
  102. if not conv:
  103. return get_error_data_result(message="Session does not exist")
  104. if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
  105. return get_error_data_result(message="You do not own the session")
  106. if "message" in req or "messages" in req:
  107. return get_error_data_result(message="`message` can not be change")
  108. if "reference" in req:
  109. return get_error_data_result(message="`reference` can not be change")
  110. if "name" in req and not req.get("name"):
  111. return get_error_data_result(message="`name` can not be empty.")
  112. if not ConversationService.update_by_id(conv_id, req):
  113. return get_error_data_result(message="Session updates error")
  114. return get_result()
  115. @manager.route("/chats/<chat_id>/completions", methods=["POST"]) # noqa: F821
  116. @token_required
  117. def chat_completion(tenant_id, chat_id):
  118. req = request.json
  119. if not req:
  120. req = {"question": ""}
  121. if not req.get("session_id"):
  122. req["question"] = ""
  123. if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
  124. return get_error_data_result(f"You don't own the chat {chat_id}")
  125. if req.get("session_id"):
  126. if not ConversationService.query(id=req["session_id"], dialog_id=chat_id):
  127. return get_error_data_result(f"You don't own the session {req['session_id']}")
  128. if req.get("stream", True):
  129. resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
  130. resp.headers.add_header("Cache-control", "no-cache")
  131. resp.headers.add_header("Connection", "keep-alive")
  132. resp.headers.add_header("X-Accel-Buffering", "no")
  133. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  134. return resp
  135. else:
  136. answer = None
  137. for ans in rag_completion(tenant_id, chat_id, **req):
  138. answer = ans
  139. break
  140. return get_result(data=answer)
  141. @manager.route("/chats_openai/<chat_id>/chat/completions", methods=["POST"]) # noqa: F821
  142. @validate_request("model", "messages") # noqa: F821
  143. @token_required
  144. def chat_completion_openai_like(tenant_id, chat_id):
  145. """
  146. OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
  147. This function allows users to interact with a model and receive responses based on a series of historical messages.
  148. If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
  149. Set `stream` to False explicitly, the response will be returned in a single complete answer.
  150. Reference:
  151. - If `stream` is True, the final answer and reference information will appear in the **last chunk** of the stream.
  152. - If `stream` is False, the reference will be included in `choices[0].message.reference`.
  153. Example usage:
  154. curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
  155. -H "Content-Type: application/json" \
  156. -H "Authorization: Bearer $RAGFLOW_API_KEY" \
  157. -d '{
  158. "model": "model",
  159. "messages": [{"role": "user", "content": "Say this is a test!"}],
  160. "stream": true
  161. }'
  162. Alternatively, you can use Python's `OpenAI` client:
  163. from openai import OpenAI
  164. model = "model"
  165. client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
  166. stream = True
  167. reference = True
  168. completion = client.chat.completions.create(
  169. model=model,
  170. messages=[
  171. {"role": "system", "content": "You are a helpful assistant."},
  172. {"role": "user", "content": "Who are you?"},
  173. {"role": "assistant", "content": "I am an AI assistant named..."},
  174. {"role": "user", "content": "Can you tell me how to install neovim"},
  175. ],
  176. stream=stream,
  177. extra_body={"reference": reference}
  178. )
  179. if stream:
  180. for chunk in completion:
  181. print(chunk)
  182. if reference and chunk.choices[0].finish_reason == "stop":
  183. print(f"Reference:\n{chunk.choices[0].delta.reference}")
  184. print(f"Final content:\n{chunk.choices[0].delta.final_content}")
  185. else:
  186. print(completion.choices[0].message.content)
  187. if reference:
  188. print(completion.choices[0].message.reference)
  189. """
  190. req = request.get_json()
  191. need_reference = bool(req.get("reference", False))
  192. messages = req.get("messages", [])
  193. # To prevent empty [] input
  194. if len(messages) < 1:
  195. return get_error_data_result("You have to provide messages.")
  196. if messages[-1]["role"] != "user":
  197. return get_error_data_result("The last content of this conversation is not from user.")
  198. prompt = messages[-1]["content"]
  199. # Treat context tokens as reasoning tokens
  200. context_token_used = sum(len(message["content"]) for message in messages)
  201. dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
  202. if not dia:
  203. return get_error_data_result(f"You don't own the chat {chat_id}")
  204. dia = dia[0]
  205. # Filter system and non-sense assistant messages
  206. msg = []
  207. for m in messages:
  208. if m["role"] == "system":
  209. continue
  210. if m["role"] == "assistant" and not msg:
  211. continue
  212. msg.append(m)
  213. # tools = get_tools()
  214. # toolcall_session = SimpleFunctionCallServer()
  215. tools = None
  216. toolcall_session = None
  217. if req.get("stream", True):
  218. # The value for the usage field on all chunks except for the last one will be null.
  219. # The usage field on the last chunk contains token usage statistics for the entire request.
  220. # The choices field on the last chunk will always be an empty array [].
  221. def streamed_response_generator(chat_id, dia, msg):
  222. token_used = 0
  223. answer_cache = ""
  224. reasoning_cache = ""
  225. last_ans = {}
  226. response = {
  227. "id": f"chatcmpl-{chat_id}",
  228. "choices": [
  229. {
  230. "delta": {
  231. "content": "",
  232. "role": "assistant",
  233. "function_call": None,
  234. "tool_calls": None,
  235. "reasoning_content": "",
  236. },
  237. "finish_reason": None,
  238. "index": 0,
  239. "logprobs": None,
  240. }
  241. ],
  242. "created": int(time.time()),
  243. "model": "model",
  244. "object": "chat.completion.chunk",
  245. "system_fingerprint": "",
  246. "usage": None,
  247. }
  248. try:
  249. for ans in chat(dia, msg, True, toolcall_session=toolcall_session, tools=tools, quote=need_reference):
  250. last_ans = ans
  251. answer = ans["answer"]
  252. reasoning_match = re.search(r"<think>(.*?)</think>", answer, flags=re.DOTALL)
  253. if reasoning_match:
  254. reasoning_part = reasoning_match.group(1)
  255. content_part = answer[reasoning_match.end() :]
  256. else:
  257. reasoning_part = ""
  258. content_part = answer
  259. reasoning_incremental = ""
  260. if reasoning_part:
  261. if reasoning_part.startswith(reasoning_cache):
  262. reasoning_incremental = reasoning_part.replace(reasoning_cache, "", 1)
  263. else:
  264. reasoning_incremental = reasoning_part
  265. reasoning_cache = reasoning_part
  266. content_incremental = ""
  267. if content_part:
  268. if content_part.startswith(answer_cache):
  269. content_incremental = content_part.replace(answer_cache, "", 1)
  270. else:
  271. content_incremental = content_part
  272. answer_cache = content_part
  273. token_used += len(reasoning_incremental) + len(content_incremental)
  274. if not any([reasoning_incremental, content_incremental]):
  275. continue
  276. if reasoning_incremental:
  277. response["choices"][0]["delta"]["reasoning_content"] = reasoning_incremental
  278. else:
  279. response["choices"][0]["delta"]["reasoning_content"] = None
  280. if content_incremental:
  281. response["choices"][0]["delta"]["content"] = content_incremental
  282. else:
  283. response["choices"][0]["delta"]["content"] = None
  284. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  285. except Exception as e:
  286. response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
  287. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  288. # The last chunk
  289. response["choices"][0]["delta"]["content"] = None
  290. response["choices"][0]["delta"]["reasoning_content"] = None
  291. response["choices"][0]["finish_reason"] = "stop"
  292. response["usage"] = {"prompt_tokens": len(prompt), "completion_tokens": token_used, "total_tokens": len(prompt) + token_used}
  293. if need_reference:
  294. response["choices"][0]["delta"]["reference"] = chunks_format(last_ans.get("reference", []))
  295. response["choices"][0]["delta"]["final_content"] = last_ans.get("answer", "")
  296. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  297. yield "data:[DONE]\n\n"
  298. resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
  299. resp.headers.add_header("Cache-control", "no-cache")
  300. resp.headers.add_header("Connection", "keep-alive")
  301. resp.headers.add_header("X-Accel-Buffering", "no")
  302. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  303. return resp
  304. else:
  305. answer = None
  306. for ans in chat(dia, msg, False, toolcall_session=toolcall_session, tools=tools, quote=need_reference):
  307. # focus answer content only
  308. answer = ans
  309. break
  310. content = answer["answer"]
  311. response = {
  312. "id": f"chatcmpl-{chat_id}",
  313. "object": "chat.completion",
  314. "created": int(time.time()),
  315. "model": req.get("model", ""),
  316. "usage": {
  317. "prompt_tokens": len(prompt),
  318. "completion_tokens": len(content),
  319. "total_tokens": len(prompt) + len(content),
  320. "completion_tokens_details": {
  321. "reasoning_tokens": context_token_used,
  322. "accepted_prediction_tokens": len(content),
  323. "rejected_prediction_tokens": 0, # 0 for simplicity
  324. },
  325. },
  326. "choices": [
  327. {
  328. "message": {
  329. "role": "assistant",
  330. "content": content,
  331. },
  332. "logprobs": None,
  333. "finish_reason": "stop",
  334. "index": 0,
  335. }
  336. ],
  337. }
  338. if need_reference:
  339. response["choices"][0]["message"]["reference"] = chunks_format(answer.get("reference", []))
  340. return jsonify(response)
  341. @manager.route("/agents_openai/<agent_id>/chat/completions", methods=["POST"]) # noqa: F821
  342. @validate_request("model", "messages") # noqa: F821
  343. @token_required
  344. def agents_completion_openai_compatibility(tenant_id, agent_id):
  345. req = request.json
  346. tiktokenenc = tiktoken.get_encoding("cl100k_base")
  347. messages = req.get("messages", [])
  348. if not messages:
  349. return get_error_data_result("You must provide at least one message.")
  350. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  351. return get_error_data_result(f"You don't own the agent {agent_id}")
  352. filtered_messages = [m for m in messages if m["role"] in ["user", "assistant"]]
  353. prompt_tokens = sum(len(tiktokenenc.encode(m["content"])) for m in filtered_messages)
  354. if not filtered_messages:
  355. return jsonify(
  356. get_data_openai(
  357. id=agent_id,
  358. content="No valid messages found (user or assistant).",
  359. finish_reason="stop",
  360. model=req.get("model", ""),
  361. completion_tokens=len(tiktokenenc.encode("No valid messages found (user or assistant).")),
  362. prompt_tokens=prompt_tokens,
  363. )
  364. )
  365. question = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
  366. stream = req.pop("stream", False)
  367. if stream:
  368. resp = Response(
  369. completionOpenAI(
  370. tenant_id,
  371. agent_id,
  372. question,
  373. session_id=req.get("id", req.get("metadata", {}).get("id", "")),
  374. stream=True,
  375. **req,
  376. ),
  377. mimetype="text/event-stream",
  378. )
  379. resp.headers.add_header("Cache-control", "no-cache")
  380. resp.headers.add_header("Connection", "keep-alive")
  381. resp.headers.add_header("X-Accel-Buffering", "no")
  382. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  383. return resp
  384. else:
  385. # For non-streaming, just return the response directly
  386. response = next(
  387. completionOpenAI(
  388. tenant_id,
  389. agent_id,
  390. question,
  391. session_id=req.get("id", req.get("metadata", {}).get("id", "")),
  392. stream=False,
  393. **req,
  394. )
  395. )
  396. return jsonify(response)
  397. @manager.route("/agents/<agent_id>/completions", methods=["POST"]) # noqa: F821
  398. @token_required
  399. def agent_completions(tenant_id, agent_id):
  400. req = request.json
  401. ans = {}
  402. if req.get("stream", True):
  403. def generate():
  404. for answer in agent_completion(tenant_id=tenant_id, agent_id=agent_id, **req):
  405. if isinstance(answer, str):
  406. try:
  407. ans = json.loads(answer[5:]) # remove "data:"
  408. except Exception:
  409. continue
  410. if ans.get("event") != "message":
  411. continue
  412. yield answer
  413. yield "data:[DONE]\n\n"
  414. resp = Response(generate(), mimetype="text/event-stream")
  415. resp.headers.add_header("Cache-control", "no-cache")
  416. resp.headers.add_header("Connection", "keep-alive")
  417. resp.headers.add_header("X-Accel-Buffering", "no")
  418. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  419. return resp
  420. for answer in agent_completion(tenant_id=tenant_id, agent_id=agent_id, **req):
  421. try:
  422. ans = json.loads(answer[5:]) # remove "data:"
  423. except Exception as e:
  424. return get_result(data=f"**ERROR**: {str(e)}")
  425. return get_result(data=ans)
  426. @manager.route("/chats/<chat_id>/sessions", methods=["GET"]) # noqa: F821
  427. @token_required
  428. def list_session(tenant_id, chat_id):
  429. if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
  430. return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
  431. id = request.args.get("id")
  432. name = request.args.get("name")
  433. page_number = int(request.args.get("page", 1))
  434. items_per_page = int(request.args.get("page_size", 30))
  435. orderby = request.args.get("orderby", "create_time")
  436. user_id = request.args.get("user_id")
  437. if request.args.get("desc") == "False" or request.args.get("desc") == "false":
  438. desc = False
  439. else:
  440. desc = True
  441. convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name, user_id)
  442. if not convs:
  443. return get_result(data=[])
  444. for conv in convs:
  445. conv["messages"] = conv.pop("message")
  446. infos = conv["messages"]
  447. for info in infos:
  448. if "prompt" in info:
  449. info.pop("prompt")
  450. conv["chat_id"] = conv.pop("dialog_id")
  451. ref_messages = conv["reference"]
  452. if ref_messages:
  453. messages = conv["messages"]
  454. message_num = 0
  455. ref_num = 0
  456. while message_num < len(messages) and ref_num < len(ref_messages):
  457. if messages[message_num]["role"] != "user":
  458. chunk_list = []
  459. if "chunks" in ref_messages[ref_num]:
  460. chunks = ref_messages[ref_num]["chunks"]
  461. for chunk in chunks:
  462. new_chunk = {
  463. "id": chunk.get("chunk_id", chunk.get("id")),
  464. "content": chunk.get("content_with_weight", chunk.get("content")),
  465. "document_id": chunk.get("doc_id", chunk.get("document_id")),
  466. "document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
  467. "dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
  468. "image_id": chunk.get("image_id", chunk.get("img_id")),
  469. "positions": chunk.get("positions", chunk.get("position_int")),
  470. }
  471. chunk_list.append(new_chunk)
  472. messages[message_num]["reference"] = chunk_list
  473. ref_num += 1
  474. message_num += 1
  475. del conv["reference"]
  476. return get_result(data=convs)
  477. @manager.route("/agents/<agent_id>/sessions", methods=["GET"]) # noqa: F821
  478. @token_required
  479. def list_agent_session(tenant_id, agent_id):
  480. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  481. return get_error_data_result(message=f"You don't own the agent {agent_id}.")
  482. id = request.args.get("id")
  483. user_id = request.args.get("user_id")
  484. page_number = int(request.args.get("page", 1))
  485. items_per_page = int(request.args.get("page_size", 30))
  486. orderby = request.args.get("orderby", "update_time")
  487. if request.args.get("desc") == "False" or request.args.get("desc") == "false":
  488. desc = False
  489. else:
  490. desc = True
  491. # dsl defaults to True in all cases except for False and false
  492. include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
  493. total, convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id, include_dsl)
  494. if not convs:
  495. return get_result(data=[])
  496. for conv in convs:
  497. conv["messages"] = conv.pop("message")
  498. infos = conv["messages"]
  499. for info in infos:
  500. if "prompt" in info:
  501. info.pop("prompt")
  502. conv["agent_id"] = conv.pop("dialog_id")
  503. # Fix for session listing endpoint
  504. if conv["reference"]:
  505. messages = conv["messages"]
  506. message_num = 0
  507. chunk_num = 0
  508. # Ensure reference is a list type to prevent KeyError
  509. if not isinstance(conv["reference"], list):
  510. conv["reference"] = []
  511. while message_num < len(messages):
  512. if message_num != 0 and messages[message_num]["role"] != "user":
  513. chunk_list = []
  514. # Add boundary and type checks to prevent KeyError
  515. if (chunk_num < len(conv["reference"]) and
  516. conv["reference"][chunk_num] is not None and
  517. isinstance(conv["reference"][chunk_num], dict) and
  518. "chunks" in conv["reference"][chunk_num]):
  519. chunks = conv["reference"][chunk_num]["chunks"]
  520. for chunk in chunks:
  521. new_chunk = {
  522. "id": chunk.get("chunk_id", chunk.get("id")),
  523. "content": chunk.get("content_with_weight", chunk.get("content")),
  524. "document_id": chunk.get("doc_id", chunk.get("document_id")),
  525. "document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
  526. "dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
  527. "image_id": chunk.get("image_id", chunk.get("img_id")),
  528. "positions": chunk.get("positions", chunk.get("position_int")),
  529. }
  530. chunk_list.append(new_chunk)
  531. chunk_num += 1
  532. messages[message_num]["reference"] = chunk_list
  533. message_num += 1
  534. del conv["reference"]
  535. return get_result(data=convs)
  536. @manager.route("/chats/<chat_id>/sessions", methods=["DELETE"]) # noqa: F821
  537. @token_required
  538. def delete(tenant_id, chat_id):
  539. if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
  540. return get_error_data_result(message="You don't own the chat")
  541. errors = []
  542. success_count = 0
  543. req = request.json
  544. convs = ConversationService.query(dialog_id=chat_id)
  545. if not req:
  546. ids = None
  547. else:
  548. ids = req.get("ids")
  549. if not ids:
  550. conv_list = []
  551. for conv in convs:
  552. conv_list.append(conv.id)
  553. else:
  554. conv_list = ids
  555. unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
  556. conv_list = unique_conv_ids
  557. for id in conv_list:
  558. conv = ConversationService.query(id=id, dialog_id=chat_id)
  559. if not conv:
  560. errors.append(f"The chat doesn't own the session {id}")
  561. continue
  562. ConversationService.delete_by_id(id)
  563. success_count += 1
  564. if errors:
  565. if success_count > 0:
  566. return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} sessions with {len(errors)} errors")
  567. else:
  568. return get_error_data_result(message="; ".join(errors))
  569. if duplicate_messages:
  570. if success_count > 0:
  571. return get_result(message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
  572. else:
  573. return get_error_data_result(message=";".join(duplicate_messages))
  574. return get_result()
  575. @manager.route("/agents/<agent_id>/sessions", methods=["DELETE"]) # noqa: F821
  576. @token_required
  577. def delete_agent_session(tenant_id, agent_id):
  578. errors = []
  579. success_count = 0
  580. req = request.json
  581. cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
  582. if not cvs:
  583. return get_error_data_result(f"You don't own the agent {agent_id}")
  584. convs = API4ConversationService.query(dialog_id=agent_id)
  585. if not convs:
  586. return get_error_data_result(f"Agent {agent_id} has no sessions")
  587. if not req:
  588. ids = None
  589. else:
  590. ids = req.get("ids")
  591. if not ids:
  592. conv_list = []
  593. for conv in convs:
  594. conv_list.append(conv.id)
  595. else:
  596. conv_list = ids
  597. unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
  598. conv_list = unique_conv_ids
  599. for session_id in conv_list:
  600. conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
  601. if not conv:
  602. errors.append(f"The agent doesn't own the session {session_id}")
  603. continue
  604. API4ConversationService.delete_by_id(session_id)
  605. success_count += 1
  606. if errors:
  607. if success_count > 0:
  608. return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} sessions with {len(errors)} errors")
  609. else:
  610. return get_error_data_result(message="; ".join(errors))
  611. if duplicate_messages:
  612. if success_count > 0:
  613. return get_result(message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
  614. else:
  615. return get_error_data_result(message=";".join(duplicate_messages))
  616. return get_result()
  617. @manager.route("/sessions/ask", methods=["POST"]) # noqa: F821
  618. @token_required
  619. def ask_about(tenant_id):
  620. req = request.json
  621. if not req.get("question"):
  622. return get_error_data_result("`question` is required.")
  623. if not req.get("dataset_ids"):
  624. return get_error_data_result("`dataset_ids` is required.")
  625. if not isinstance(req.get("dataset_ids"), list):
  626. return get_error_data_result("`dataset_ids` should be a list.")
  627. req["kb_ids"] = req.pop("dataset_ids")
  628. for kb_id in req["kb_ids"]:
  629. if not KnowledgebaseService.accessible(kb_id, tenant_id):
  630. return get_error_data_result(f"You don't own the dataset {kb_id}.")
  631. kbs = KnowledgebaseService.query(id=kb_id)
  632. kb = kbs[0]
  633. if kb.chunk_num == 0:
  634. return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
  635. uid = tenant_id
  636. def stream():
  637. nonlocal req, uid
  638. try:
  639. for ans in ask(req["question"], req["kb_ids"], uid):
  640. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  641. except Exception as e:
  642. yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
  643. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  644. resp = Response(stream(), mimetype="text/event-stream")
  645. resp.headers.add_header("Cache-control", "no-cache")
  646. resp.headers.add_header("Connection", "keep-alive")
  647. resp.headers.add_header("X-Accel-Buffering", "no")
  648. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  649. return resp
  650. @manager.route("/sessions/related_questions", methods=["POST"]) # noqa: F821
  651. @token_required
  652. def related_questions(tenant_id):
  653. req = request.json
  654. if not req.get("question"):
  655. return get_error_data_result("`question` is required.")
  656. question = req["question"]
  657. industry = req.get("industry", "")
  658. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  659. prompt = """
  660. Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
  661. Instructions:
  662. - Based on the keywords provided by the user, generate 5-10 related search terms.
  663. - Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
  664. - Use common, general terms as much as possible, avoiding obscure words or technical jargon.
  665. - Keep the term length between 2-4 words, concise and clear.
  666. - DO NOT translate, use the language of the original keywords.
  667. """
  668. if industry:
  669. prompt += f" - Ensure all search terms are relevant to the industry: {industry}.\n"
  670. prompt += """
  671. ### Example:
  672. Keywords: Chinese football
  673. Related search terms:
  674. 1. Current status of Chinese football
  675. 2. Reform of Chinese football
  676. 3. Youth training of Chinese football
  677. 4. Chinese football in the Asian Cup
  678. 5. Chinese football in the World Cup
  679. Reason:
  680. - When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
  681. - Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
  682. - At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
  683. """
  684. ans = chat_mdl.chat(
  685. prompt,
  686. [
  687. {
  688. "role": "user",
  689. "content": f"""
  690. Keywords: {question}
  691. Related search terms:
  692. """,
  693. }
  694. ],
  695. {"temperature": 0.9},
  696. )
  697. return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
  698. @manager.route("/chatbots/<dialog_id>/completions", methods=["POST"]) # noqa: F821
  699. def chatbot_completions(dialog_id):
  700. req = request.json
  701. token = request.headers.get("Authorization").split()
  702. if len(token) != 2:
  703. return get_error_data_result(message='Authorization is not valid!"')
  704. token = token[1]
  705. objs = APIToken.query(beta=token)
  706. if not objs:
  707. return get_error_data_result(message='Authentication error: API key is invalid!"')
  708. if "quote" not in req:
  709. req["quote"] = False
  710. if req.get("stream", True):
  711. resp = Response(iframe_completion(dialog_id, **req), mimetype="text/event-stream")
  712. resp.headers.add_header("Cache-control", "no-cache")
  713. resp.headers.add_header("Connection", "keep-alive")
  714. resp.headers.add_header("X-Accel-Buffering", "no")
  715. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  716. return resp
  717. for answer in iframe_completion(dialog_id, **req):
  718. return get_result(data=answer)
  719. @manager.route("/chatbots/<dialog_id>/info", methods=["GET"]) # noqa: F821
  720. def chatbots_inputs(dialog_id):
  721. token = request.headers.get("Authorization").split()
  722. if len(token) != 2:
  723. return get_error_data_result(message='Authorization is not valid!"')
  724. token = token[1]
  725. objs = APIToken.query(beta=token)
  726. if not objs:
  727. return get_error_data_result(message='Authentication error: API key is invalid!"')
  728. e, dialog = DialogService.get_by_id(dialog_id)
  729. if not e:
  730. return get_error_data_result(f"Can't find dialog by ID: {dialog_id}")
  731. return get_result(
  732. data={
  733. "title": dialog.name,
  734. "avatar": dialog.icon,
  735. "prologue": dialog.prompt_config.get("prologue", ""),
  736. }
  737. )
  738. @manager.route("/agentbots/<agent_id>/completions", methods=["POST"]) # noqa: F821
  739. def agent_bot_completions(agent_id):
  740. req = request.json
  741. token = request.headers.get("Authorization").split()
  742. if len(token) != 2:
  743. return get_error_data_result(message='Authorization is not valid!"')
  744. token = token[1]
  745. objs = APIToken.query(beta=token)
  746. if not objs:
  747. return get_error_data_result(message='Authentication error: API key is invalid!"')
  748. if req.get("stream", True):
  749. resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
  750. resp.headers.add_header("Cache-control", "no-cache")
  751. resp.headers.add_header("Connection", "keep-alive")
  752. resp.headers.add_header("X-Accel-Buffering", "no")
  753. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  754. return resp
  755. for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
  756. return get_result(data=answer)
  757. @manager.route("/agentbots/<agent_id>/inputs", methods=["GET"]) # noqa: F821
  758. def begin_inputs(agent_id):
  759. token = request.headers.get("Authorization").split()
  760. if len(token) != 2:
  761. return get_error_data_result(message='Authorization is not valid!"')
  762. token = token[1]
  763. objs = APIToken.query(beta=token)
  764. if not objs:
  765. return get_error_data_result(message='Authentication error: API key is invalid!"')
  766. e, cvs = UserCanvasService.get_by_id(agent_id)
  767. if not e:
  768. return get_error_data_result(f"Can't find agent by ID: {agent_id}")
  769. canvas = Canvas(json.dumps(cvs.dsl), objs[0].tenant_id)
  770. return get_result(
  771. data={
  772. "title": cvs.title,
  773. "avatar": cvs.avatar,
  774. "inputs": canvas.get_component_input_form("begin"),
  775. "prologue": canvas.get_prologue()
  776. }
  777. )
  778. @manager.route("/searchbots/ask", methods=["POST"]) # noqa: F821
  779. @validate_request("question", "kb_ids")
  780. def ask_about_embedded():
  781. token = request.headers.get("Authorization").split()
  782. if len(token) != 2:
  783. return get_error_data_result(message='Authorization is not valid!"')
  784. token = token[1]
  785. objs = APIToken.query(beta=token)
  786. if not objs:
  787. return get_error_data_result(message='Authentication error: API key is invalid!"')
  788. req = request.json
  789. uid = objs[0].tenant_id
  790. search_id = req.get("search_id", "")
  791. search_config = {}
  792. if search_id:
  793. if search_app := SearchService.get_detail(search_id):
  794. search_config = search_app.get("search_config", {})
  795. def stream():
  796. nonlocal req, uid
  797. try:
  798. for ans in ask(req["question"], req["kb_ids"], uid, search_config):
  799. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  800. except Exception as e:
  801. yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
  802. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  803. resp = Response(stream(), mimetype="text/event-stream")
  804. resp.headers.add_header("Cache-control", "no-cache")
  805. resp.headers.add_header("Connection", "keep-alive")
  806. resp.headers.add_header("X-Accel-Buffering", "no")
  807. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  808. return resp
  809. @manager.route("/searchbots/retrieval_test", methods=['POST']) # noqa: F821
  810. @validate_request("kb_id", "question")
  811. def retrieval_test_embedded():
  812. token = request.headers.get("Authorization").split()
  813. if len(token) != 2:
  814. return get_error_data_result(message='Authorization is not valid!"')
  815. token = token[1]
  816. objs = APIToken.query(beta=token)
  817. if not objs:
  818. return get_error_data_result(message='Authentication error: API key is invalid!"')
  819. req = request.json
  820. page = int(req.get("page", 1))
  821. size = int(req.get("size", 30))
  822. question = req["question"]
  823. kb_ids = req["kb_id"]
  824. if isinstance(kb_ids, str):
  825. kb_ids = [kb_ids]
  826. doc_ids = req.get("doc_ids", [])
  827. similarity_threshold = float(req.get("similarity_threshold", 0.0))
  828. vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
  829. use_kg = req.get("use_kg", False)
  830. top = int(req.get("top_k", 1024))
  831. langs = req.get("cross_languages", [])
  832. tenant_ids = []
  833. tenant_id = objs[0].tenant_id
  834. if not tenant_id:
  835. return get_error_data_result(message="permission denined.")
  836. try:
  837. tenants = UserTenantService.query(user_id=tenant_id)
  838. for kb_id in kb_ids:
  839. for tenant in tenants:
  840. if KnowledgebaseService.query(
  841. tenant_id=tenant.tenant_id, id=kb_id):
  842. tenant_ids.append(tenant.tenant_id)
  843. break
  844. else:
  845. return get_json_result(
  846. data=False, message='Only owner of knowledgebase authorized for this operation.',
  847. code=settings.RetCode.OPERATING_ERROR)
  848. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  849. if not e:
  850. return get_error_data_result(message="Knowledgebase not found!")
  851. if langs:
  852. question = cross_languages(kb.tenant_id, None, question, langs)
  853. embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  854. rerank_mdl = None
  855. if req.get("rerank_id"):
  856. rerank_mdl = LLMBundle(kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
  857. if req.get("keyword", False):
  858. chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
  859. question += keyword_extraction(chat_mdl, question)
  860. labels = label_question(question, [kb])
  861. ranks = settings.retrievaler.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
  862. similarity_threshold, vector_similarity_weight, top,
  863. doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"),
  864. rank_feature=labels
  865. )
  866. if use_kg:
  867. ck = settings.kg_retrievaler.retrieval(question,
  868. tenant_ids,
  869. kb_ids,
  870. embd_mdl,
  871. LLMBundle(kb.tenant_id, LLMType.CHAT))
  872. if ck["content_with_weight"]:
  873. ranks["chunks"].insert(0, ck)
  874. for c in ranks["chunks"]:
  875. c.pop("vector", None)
  876. ranks["labels"] = labels
  877. return get_json_result(data=ranks)
  878. except Exception as e:
  879. if str(e).find("not_found") > 0:
  880. return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
  881. code=settings.RetCode.DATA_ERROR)
  882. return server_error_response(e)
  883. @manager.route("/searchbots/related_questions", methods=["POST"]) # noqa: F821
  884. @validate_request("question")
  885. def related_questions_embedded():
  886. token = request.headers.get("Authorization").split()
  887. if len(token) != 2:
  888. return get_error_data_result(message='Authorization is not valid!"')
  889. token = token[1]
  890. objs = APIToken.query(beta=token)
  891. if not objs:
  892. return get_error_data_result(message='Authentication error: API key is invalid!"')
  893. req = request.json
  894. tenant_id = objs[0].tenant_id
  895. if not tenant_id:
  896. return get_error_data_result(message="permission denined.")
  897. search_id = req.get("search_id", "")
  898. search_config = {}
  899. if search_id:
  900. if search_app := SearchService.get_detail(search_id):
  901. search_config = search_app.get("search_config", {})
  902. question = req["question"]
  903. chat_id = search_config.get("chat_id", "")
  904. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, chat_id)
  905. gen_conf = search_config.get("llm_setting", {"temperature": 0.9})
  906. prompt = load_prompt("related_question")
  907. ans = chat_mdl.chat(
  908. prompt,
  909. [
  910. {
  911. "role": "user",
  912. "content": f"""
  913. Keywords: {question}
  914. Related search terms:
  915. """,
  916. }
  917. ],
  918. gen_conf,
  919. )
  920. return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
  921. @manager.route("/searchbots/detail", methods=["GET"]) # noqa: F821
  922. def detail_share_embedded():
  923. token = request.headers.get("Authorization").split()
  924. if len(token) != 2:
  925. return get_error_data_result(message='Authorization is not valid!"')
  926. token = token[1]
  927. objs = APIToken.query(beta=token)
  928. if not objs:
  929. return get_error_data_result(message='Authentication error: API key is invalid!"')
  930. search_id = request.args["search_id"]
  931. tenant_id = objs[0].tenant_id
  932. if not tenant_id:
  933. return get_error_data_result(message="permission denined.")
  934. try:
  935. tenants = UserTenantService.query(user_id=tenant_id)
  936. for tenant in tenants:
  937. if SearchService.query(tenant_id=tenant.tenant_id, id=search_id):
  938. break
  939. else:
  940. return get_json_result(data=False, message="Has no permission for this operation.", code=settings.RetCode.OPERATING_ERROR)
  941. search = SearchService.get_detail(search_id)
  942. if not search:
  943. return get_error_data_result(message="Can't find this Search App!")
  944. return get_json_result(data=search)
  945. except Exception as e:
  946. return server_error_response(e)
  947. @manager.route("/searchbots/mindmap", methods=["POST"]) # noqa: F821
  948. @validate_request("question", "kb_ids")
  949. def mindmap():
  950. token = request.headers.get("Authorization").split()
  951. if len(token) != 2:
  952. return get_error_data_result(message='Authorization is not valid!"')
  953. token = token[1]
  954. objs = APIToken.query(beta=token)
  955. if not objs:
  956. return get_error_data_result(message='Authentication error: API key is invalid!"')
  957. tenant_id = objs[0].tenant_id
  958. req = request.json
  959. search_id = req.get("search_id", "")
  960. search_app = SearchService.get_detail(search_id) if search_id else {}
  961. mind_map = gen_mindmap(req["question"], req["kb_ids"], tenant_id, search_app.get("search_config", {}))
  962. if "error" in mind_map:
  963. return server_error_response(Exception(mind_map["error"]))
  964. return get_json_result(data=mind_map)