You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

session.py 36KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. import time
  19. import tiktoken
  20. from flask import Response, jsonify, request
  21. from agent.canvas import Canvas
  22. from api.db import LLMType, StatusEnum
  23. from api.db.db_models import APIToken
  24. from api.db.services.api_service import API4ConversationService
  25. from api.db.services.canvas_service import UserCanvasService, completionOpenAI
  26. from api.db.services.canvas_service import completion as agent_completion
  27. from api.db.services.conversation_service import ConversationService, iframe_completion
  28. from api.db.services.conversation_service import completion as rag_completion
  29. from api.db.services.dialog_service import DialogService, ask, chat
  30. from api.db.services.file_service import FileService
  31. from api.db.services.knowledgebase_service import KnowledgebaseService
  32. from api.db.services.llm_service import LLMBundle
  33. from api.utils import get_uuid
  34. from api.utils.api_utils import check_duplicate_ids, get_data_openai, get_error_data_result, get_result, token_required, validate_request
  35. from rag.prompts import chunks_format
  36. @manager.route("/chats/<chat_id>/sessions", methods=["POST"]) # noqa: F821
  37. @token_required
  38. def create(tenant_id, chat_id):
  39. req = request.json
  40. req["dialog_id"] = chat_id
  41. dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
  42. if not dia:
  43. return get_error_data_result(message="You do not own the assistant.")
  44. conv = {
  45. "id": get_uuid(),
  46. "dialog_id": req["dialog_id"],
  47. "name": req.get("name", "New session"),
  48. "message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
  49. "user_id": req.get("user_id", ""),
  50. "reference": [{}],
  51. }
  52. if not conv.get("name"):
  53. return get_error_data_result(message="`name` can not be empty.")
  54. ConversationService.save(**conv)
  55. e, conv = ConversationService.get_by_id(conv["id"])
  56. if not e:
  57. return get_error_data_result(message="Fail to create a session!")
  58. conv = conv.to_dict()
  59. conv["messages"] = conv.pop("message")
  60. conv["chat_id"] = conv.pop("dialog_id")
  61. del conv["reference"]
  62. return get_result(data=conv)
  63. @manager.route("/agents/<agent_id>/sessions", methods=["POST"]) # noqa: F821
  64. @token_required
  65. def create_agent_session(tenant_id, agent_id):
  66. req = request.json
  67. if not request.is_json:
  68. req = request.form
  69. files = request.files
  70. user_id = request.args.get("user_id", "")
  71. e, cvs = UserCanvasService.get_by_id(agent_id)
  72. if not e:
  73. return get_error_data_result("Agent not found.")
  74. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  75. return get_error_data_result("You cannot access the agent.")
  76. if not isinstance(cvs.dsl, str):
  77. cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
  78. canvas = Canvas(cvs.dsl, tenant_id)
  79. canvas.reset()
  80. query = canvas.get_preset_param()
  81. if query:
  82. for ele in query:
  83. if not ele["optional"]:
  84. if ele["type"] == "file":
  85. if files is None or not files.get(ele["key"]):
  86. return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
  87. upload_file = files.get(ele["key"])
  88. file_content = FileService.parse_docs([upload_file], user_id)
  89. file_name = upload_file.filename
  90. ele["value"] = file_name + "\n" + file_content
  91. else:
  92. if req is None or not req.get(ele["key"]):
  93. return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
  94. ele["value"] = req[ele["key"]]
  95. else:
  96. if ele["type"] == "file":
  97. if files is not None and files.get(ele["key"]):
  98. upload_file = files.get(ele["key"])
  99. file_content = FileService.parse_docs([upload_file], user_id)
  100. file_name = upload_file.filename
  101. ele["value"] = file_name + "\n" + file_content
  102. else:
  103. if "value" in ele:
  104. ele.pop("value")
  105. else:
  106. if req is not None and req.get(ele["key"]):
  107. ele["value"] = req[ele["key"]]
  108. else:
  109. if "value" in ele:
  110. ele.pop("value")
  111. for ans in canvas.run(stream=False):
  112. pass
  113. cvs.dsl = json.loads(str(canvas))
  114. conv = {"id": get_uuid(), "dialog_id": cvs.id, "user_id": user_id, "message": [{"role": "assistant", "content": canvas.get_prologue()}], "source": "agent", "dsl": cvs.dsl}
  115. API4ConversationService.save(**conv)
  116. conv["agent_id"] = conv.pop("dialog_id")
  117. return get_result(data=conv)
  118. @manager.route("/chats/<chat_id>/sessions/<session_id>", methods=["PUT"]) # noqa: F821
  119. @token_required
  120. def update(tenant_id, chat_id, session_id):
  121. req = request.json
  122. req["dialog_id"] = chat_id
  123. conv_id = session_id
  124. conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
  125. if not conv:
  126. return get_error_data_result(message="Session does not exist")
  127. if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
  128. return get_error_data_result(message="You do not own the session")
  129. if "message" in req or "messages" in req:
  130. return get_error_data_result(message="`message` can not be change")
  131. if "reference" in req:
  132. return get_error_data_result(message="`reference` can not be change")
  133. if "name" in req and not req.get("name"):
  134. return get_error_data_result(message="`name` can not be empty.")
  135. if not ConversationService.update_by_id(conv_id, req):
  136. return get_error_data_result(message="Session updates error")
  137. return get_result()
  138. @manager.route("/chats/<chat_id>/completions", methods=["POST"]) # noqa: F821
  139. @token_required
  140. def chat_completion(tenant_id, chat_id):
  141. req = request.json
  142. if not req:
  143. req = {"question": ""}
  144. if not req.get("session_id"):
  145. req["question"] = ""
  146. if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
  147. return get_error_data_result(f"You don't own the chat {chat_id}")
  148. if req.get("session_id"):
  149. if not ConversationService.query(id=req["session_id"], dialog_id=chat_id):
  150. return get_error_data_result(f"You don't own the session {req['session_id']}")
  151. if req.get("stream", True):
  152. resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
  153. resp.headers.add_header("Cache-control", "no-cache")
  154. resp.headers.add_header("Connection", "keep-alive")
  155. resp.headers.add_header("X-Accel-Buffering", "no")
  156. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  157. return resp
  158. else:
  159. answer = None
  160. for ans in rag_completion(tenant_id, chat_id, **req):
  161. answer = ans
  162. break
  163. return get_result(data=answer)
  164. @manager.route("/chats_openai/<chat_id>/chat/completions", methods=["POST"]) # noqa: F821
  165. @validate_request("model", "messages") # noqa: F821
  166. @token_required
  167. def chat_completion_openai_like(tenant_id, chat_id):
  168. """
  169. OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
  170. This function allows users to interact with a model and receive responses based on a series of historical messages.
  171. If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
  172. Set `stream` to False explicitly, the response will be returned in a single complete answer.
  173. Reference:
  174. - If `stream` is True, the final answer and reference information will appear in the **last chunk** of the stream.
  175. - If `stream` is False, the reference will be included in `choices[0].message.reference`.
  176. Example usage:
  177. curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
  178. -H "Content-Type: application/json" \
  179. -H "Authorization: Bearer $RAGFLOW_API_KEY" \
  180. -d '{
  181. "model": "model",
  182. "messages": [{"role": "user", "content": "Say this is a test!"}],
  183. "stream": true
  184. }'
  185. Alternatively, you can use Python's `OpenAI` client:
  186. from openai import OpenAI
  187. model = "model"
  188. client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
  189. stream = True
  190. reference = True
  191. completion = client.chat.completions.create(
  192. model=model,
  193. messages=[
  194. {"role": "system", "content": "You are a helpful assistant."},
  195. {"role": "user", "content": "Who are you?"},
  196. {"role": "assistant", "content": "I am an AI assistant named..."},
  197. {"role": "user", "content": "Can you tell me how to install neovim"},
  198. ],
  199. stream=stream,
  200. extra_body={"reference": reference}
  201. )
  202. if stream:
  203. for chunk in completion:
  204. print(chunk)
  205. if reference and chunk.choices[0].finish_reason == "stop":
  206. print(f"Reference:\n{chunk.choices[0].delta.reference}")
  207. print(f"Final content:\n{chunk.choices[0].delta.final_content}")
  208. else:
  209. print(completion.choices[0].message.content)
  210. if reference:
  211. print(completion.choices[0].message.reference)
  212. """
  213. req = request.get_json()
  214. need_reference = bool(req.get("reference", False))
  215. messages = req.get("messages", [])
  216. # To prevent empty [] input
  217. if len(messages) < 1:
  218. return get_error_data_result("You have to provide messages.")
  219. if messages[-1]["role"] != "user":
  220. return get_error_data_result("The last content of this conversation is not from user.")
  221. prompt = messages[-1]["content"]
  222. # Treat context tokens as reasoning tokens
  223. context_token_used = sum(len(message["content"]) for message in messages)
  224. dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
  225. if not dia:
  226. return get_error_data_result(f"You don't own the chat {chat_id}")
  227. dia = dia[0]
  228. # Filter system and non-sense assistant messages
  229. msg = []
  230. for m in messages:
  231. if m["role"] == "system":
  232. continue
  233. if m["role"] == "assistant" and not msg:
  234. continue
  235. msg.append(m)
  236. # tools = get_tools()
  237. # toolcall_session = SimpleFunctionCallServer()
  238. tools = None
  239. toolcall_session = None
  240. if req.get("stream", True):
  241. # The value for the usage field on all chunks except for the last one will be null.
  242. # The usage field on the last chunk contains token usage statistics for the entire request.
  243. # The choices field on the last chunk will always be an empty array [].
  244. def streamed_response_generator(chat_id, dia, msg):
  245. token_used = 0
  246. answer_cache = ""
  247. reasoning_cache = ""
  248. last_ans = {}
  249. response = {
  250. "id": f"chatcmpl-{chat_id}",
  251. "choices": [
  252. {
  253. "delta": {
  254. "content": "",
  255. "role": "assistant",
  256. "function_call": None,
  257. "tool_calls": None,
  258. "reasoning_content": "",
  259. },
  260. "finish_reason": None,
  261. "index": 0,
  262. "logprobs": None,
  263. }
  264. ],
  265. "created": int(time.time()),
  266. "model": "model",
  267. "object": "chat.completion.chunk",
  268. "system_fingerprint": "",
  269. "usage": None,
  270. }
  271. try:
  272. for ans in chat(dia, msg, True, toolcall_session=toolcall_session, tools=tools, quote=need_reference):
  273. last_ans = ans
  274. answer = ans["answer"]
  275. reasoning_match = re.search(r"<think>(.*?)</think>", answer, flags=re.DOTALL)
  276. if reasoning_match:
  277. reasoning_part = reasoning_match.group(1)
  278. content_part = answer[reasoning_match.end() :]
  279. else:
  280. reasoning_part = ""
  281. content_part = answer
  282. reasoning_incremental = ""
  283. if reasoning_part:
  284. if reasoning_part.startswith(reasoning_cache):
  285. reasoning_incremental = reasoning_part.replace(reasoning_cache, "", 1)
  286. else:
  287. reasoning_incremental = reasoning_part
  288. reasoning_cache = reasoning_part
  289. content_incremental = ""
  290. if content_part:
  291. if content_part.startswith(answer_cache):
  292. content_incremental = content_part.replace(answer_cache, "", 1)
  293. else:
  294. content_incremental = content_part
  295. answer_cache = content_part
  296. token_used += len(reasoning_incremental) + len(content_incremental)
  297. if not any([reasoning_incremental, content_incremental]):
  298. continue
  299. if reasoning_incremental:
  300. response["choices"][0]["delta"]["reasoning_content"] = reasoning_incremental
  301. else:
  302. response["choices"][0]["delta"]["reasoning_content"] = None
  303. if content_incremental:
  304. response["choices"][0]["delta"]["content"] = content_incremental
  305. else:
  306. response["choices"][0]["delta"]["content"] = None
  307. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  308. except Exception as e:
  309. response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
  310. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  311. # The last chunk
  312. response["choices"][0]["delta"]["content"] = None
  313. response["choices"][0]["delta"]["reasoning_content"] = None
  314. response["choices"][0]["finish_reason"] = "stop"
  315. response["usage"] = {"prompt_tokens": len(prompt), "completion_tokens": token_used, "total_tokens": len(prompt) + token_used}
  316. if need_reference:
  317. response["choices"][0]["delta"]["reference"] = chunks_format(last_ans.get("reference", []))
  318. response["choices"][0]["delta"]["final_content"] = last_ans.get("answer", "")
  319. yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
  320. yield "data:[DONE]\n\n"
  321. resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
  322. resp.headers.add_header("Cache-control", "no-cache")
  323. resp.headers.add_header("Connection", "keep-alive")
  324. resp.headers.add_header("X-Accel-Buffering", "no")
  325. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  326. return resp
  327. else:
  328. answer = None
  329. for ans in chat(dia, msg, False, toolcall_session=toolcall_session, tools=tools, quote=need_reference):
  330. # focus answer content only
  331. answer = ans
  332. break
  333. content = answer["answer"]
  334. response = {
  335. "id": f"chatcmpl-{chat_id}",
  336. "object": "chat.completion",
  337. "created": int(time.time()),
  338. "model": req.get("model", ""),
  339. "usage": {
  340. "prompt_tokens": len(prompt),
  341. "completion_tokens": len(content),
  342. "total_tokens": len(prompt) + len(content),
  343. "completion_tokens_details": {
  344. "reasoning_tokens": context_token_used,
  345. "accepted_prediction_tokens": len(content),
  346. "rejected_prediction_tokens": 0, # 0 for simplicity
  347. },
  348. },
  349. "choices": [
  350. {
  351. "message": {
  352. "role": "assistant",
  353. "content": content,
  354. },
  355. "logprobs": None,
  356. "finish_reason": "stop",
  357. "index": 0,
  358. }
  359. ],
  360. }
  361. if need_reference:
  362. response["choices"][0]["message"]["reference"] = chunks_format(answer.get("reference", []))
  363. return jsonify(response)
  364. @manager.route("/agents_openai/<agent_id>/chat/completions", methods=["POST"]) # noqa: F821
  365. @validate_request("model", "messages") # noqa: F821
  366. @token_required
  367. def agents_completion_openai_compatibility(tenant_id, agent_id):
  368. req = request.json
  369. tiktokenenc = tiktoken.get_encoding("cl100k_base")
  370. messages = req.get("messages", [])
  371. if not messages:
  372. return get_error_data_result("You must provide at least one message.")
  373. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  374. return get_error_data_result(f"You don't own the agent {agent_id}")
  375. filtered_messages = [m for m in messages if m["role"] in ["user", "assistant"]]
  376. prompt_tokens = sum(len(tiktokenenc.encode(m["content"])) for m in filtered_messages)
  377. if not filtered_messages:
  378. return jsonify(
  379. get_data_openai(
  380. id=agent_id,
  381. content="No valid messages found (user or assistant).",
  382. finish_reason="stop",
  383. model=req.get("model", ""),
  384. completion_tokens=len(tiktokenenc.encode("No valid messages found (user or assistant).")),
  385. prompt_tokens=prompt_tokens,
  386. )
  387. )
  388. question = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
  389. stream = req.pop("stream", False)
  390. if stream:
  391. resp = Response(
  392. completionOpenAI(
  393. tenant_id,
  394. agent_id,
  395. question,
  396. session_id=req.get("id", req.get("metadata", {}).get("id", "")),
  397. stream=True,
  398. **req,
  399. ),
  400. mimetype="text/event-stream",
  401. )
  402. resp.headers.add_header("Cache-control", "no-cache")
  403. resp.headers.add_header("Connection", "keep-alive")
  404. resp.headers.add_header("X-Accel-Buffering", "no")
  405. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  406. return resp
  407. else:
  408. # For non-streaming, just return the response directly
  409. response = next(
  410. completionOpenAI(
  411. tenant_id,
  412. agent_id,
  413. question,
  414. session_id=req.get("id", req.get("metadata", {}).get("id", "")),
  415. stream=False,
  416. **req,
  417. )
  418. )
  419. return jsonify(response)
  420. @manager.route("/agents/<agent_id>/completions", methods=["POST"]) # noqa: F821
  421. @token_required
  422. def agent_completions(tenant_id, agent_id):
  423. req = request.json
  424. ans = {}
  425. if req.get("stream", True):
  426. def generate():
  427. for answer in agent_completion(tenant_id=tenant_id, agent_id=agent_id, **req):
  428. if isinstance(answer, str):
  429. try:
  430. ans = json.loads(answer[5:]) # remove "data:"
  431. except Exception:
  432. continue
  433. if ans.get("event") != "message":
  434. continue
  435. yield answer
  436. yield "data:[DONE]\n\n"
  437. resp = Response(generate(), mimetype="text/event-stream")
  438. resp.headers.add_header("Cache-control", "no-cache")
  439. resp.headers.add_header("Connection", "keep-alive")
  440. resp.headers.add_header("X-Accel-Buffering", "no")
  441. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  442. return resp
  443. for answer in agent_completion(tenant_id=tenant_id, agent_id=agent_id, **req):
  444. try:
  445. ans = json.loads(answer[5:]) # remove "data:"
  446. except Exception as e:
  447. return get_result(data=f"**ERROR**: {str(e)}")
  448. return get_result(data=ans)
  449. @manager.route("/chats/<chat_id>/sessions", methods=["GET"]) # noqa: F821
  450. @token_required
  451. def list_session(tenant_id, chat_id):
  452. if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
  453. return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
  454. id = request.args.get("id")
  455. name = request.args.get("name")
  456. page_number = int(request.args.get("page", 1))
  457. items_per_page = int(request.args.get("page_size", 30))
  458. orderby = request.args.get("orderby", "create_time")
  459. user_id = request.args.get("user_id")
  460. if request.args.get("desc") == "False" or request.args.get("desc") == "false":
  461. desc = False
  462. else:
  463. desc = True
  464. convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name, user_id)
  465. if not convs:
  466. return get_result(data=[])
  467. for conv in convs:
  468. conv["messages"] = conv.pop("message")
  469. infos = conv["messages"]
  470. for info in infos:
  471. if "prompt" in info:
  472. info.pop("prompt")
  473. conv["chat_id"] = conv.pop("dialog_id")
  474. ref_messages = conv["reference"]
  475. if ref_messages:
  476. messages = conv["messages"]
  477. message_num = 0
  478. ref_num = 0
  479. while message_num < len(messages) and ref_num < len(ref_messages):
  480. if messages[message_num]["role"] != "user":
  481. chunk_list = []
  482. if "chunks" in ref_messages[ref_num]:
  483. chunks = ref_messages[ref_num]["chunks"]
  484. for chunk in chunks:
  485. new_chunk = {
  486. "id": chunk.get("chunk_id", chunk.get("id")),
  487. "content": chunk.get("content_with_weight", chunk.get("content")),
  488. "document_id": chunk.get("doc_id", chunk.get("document_id")),
  489. "document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
  490. "dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
  491. "image_id": chunk.get("image_id", chunk.get("img_id")),
  492. "positions": chunk.get("positions", chunk.get("position_int")),
  493. }
  494. chunk_list.append(new_chunk)
  495. messages[message_num]["reference"] = chunk_list
  496. ref_num += 1
  497. message_num += 1
  498. del conv["reference"]
  499. return get_result(data=convs)
  500. @manager.route("/agents/<agent_id>/sessions", methods=["GET"]) # noqa: F821
  501. @token_required
  502. def list_agent_session(tenant_id, agent_id):
  503. if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
  504. return get_error_data_result(message=f"You don't own the agent {agent_id}.")
  505. id = request.args.get("id")
  506. user_id = request.args.get("user_id")
  507. page_number = int(request.args.get("page", 1))
  508. items_per_page = int(request.args.get("page_size", 30))
  509. orderby = request.args.get("orderby", "update_time")
  510. if request.args.get("desc") == "False" or request.args.get("desc") == "false":
  511. desc = False
  512. else:
  513. desc = True
  514. # dsl defaults to True in all cases except for False and false
  515. include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
  516. total, convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id, include_dsl)
  517. if not convs:
  518. return get_result(data=[])
  519. for conv in convs:
  520. conv["messages"] = conv.pop("message")
  521. infos = conv["messages"]
  522. for info in infos:
  523. if "prompt" in info:
  524. info.pop("prompt")
  525. conv["agent_id"] = conv.pop("dialog_id")
  526. if conv["reference"]:
  527. messages = conv["messages"]
  528. message_num = 0
  529. chunk_num = 0
  530. while message_num < len(messages):
  531. if message_num != 0 and messages[message_num]["role"] != "user":
  532. chunk_list = []
  533. if "chunks" in conv["reference"][chunk_num]:
  534. chunks = conv["reference"][chunk_num]["chunks"]
  535. for chunk in chunks:
  536. new_chunk = {
  537. "id": chunk.get("chunk_id", chunk.get("id")),
  538. "content": chunk.get("content_with_weight", chunk.get("content")),
  539. "document_id": chunk.get("doc_id", chunk.get("document_id")),
  540. "document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
  541. "dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
  542. "image_id": chunk.get("image_id", chunk.get("img_id")),
  543. "positions": chunk.get("positions", chunk.get("position_int")),
  544. }
  545. chunk_list.append(new_chunk)
  546. chunk_num += 1
  547. messages[message_num]["reference"] = chunk_list
  548. message_num += 1
  549. del conv["reference"]
  550. return get_result(data=convs)
  551. @manager.route("/chats/<chat_id>/sessions", methods=["DELETE"]) # noqa: F821
  552. @token_required
  553. def delete(tenant_id, chat_id):
  554. if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
  555. return get_error_data_result(message="You don't own the chat")
  556. errors = []
  557. success_count = 0
  558. req = request.json
  559. convs = ConversationService.query(dialog_id=chat_id)
  560. if not req:
  561. ids = None
  562. else:
  563. ids = req.get("ids")
  564. if not ids:
  565. conv_list = []
  566. for conv in convs:
  567. conv_list.append(conv.id)
  568. else:
  569. conv_list = ids
  570. unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
  571. conv_list = unique_conv_ids
  572. for id in conv_list:
  573. conv = ConversationService.query(id=id, dialog_id=chat_id)
  574. if not conv:
  575. errors.append(f"The chat doesn't own the session {id}")
  576. continue
  577. ConversationService.delete_by_id(id)
  578. success_count += 1
  579. if errors:
  580. if success_count > 0:
  581. return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} sessions with {len(errors)} errors")
  582. else:
  583. return get_error_data_result(message="; ".join(errors))
  584. if duplicate_messages:
  585. if success_count > 0:
  586. return get_result(message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
  587. else:
  588. return get_error_data_result(message=";".join(duplicate_messages))
  589. return get_result()
  590. @manager.route("/agents/<agent_id>/sessions", methods=["DELETE"]) # noqa: F821
  591. @token_required
  592. def delete_agent_session(tenant_id, agent_id):
  593. errors = []
  594. success_count = 0
  595. req = request.json
  596. cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
  597. if not cvs:
  598. return get_error_data_result(f"You don't own the agent {agent_id}")
  599. convs = API4ConversationService.query(dialog_id=agent_id)
  600. if not convs:
  601. return get_error_data_result(f"Agent {agent_id} has no sessions")
  602. if not req:
  603. ids = None
  604. else:
  605. ids = req.get("ids")
  606. if not ids:
  607. conv_list = []
  608. for conv in convs:
  609. conv_list.append(conv.id)
  610. else:
  611. conv_list = ids
  612. unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
  613. conv_list = unique_conv_ids
  614. for session_id in conv_list:
  615. conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
  616. if not conv:
  617. errors.append(f"The agent doesn't own the session {session_id}")
  618. continue
  619. API4ConversationService.delete_by_id(session_id)
  620. success_count += 1
  621. if errors:
  622. if success_count > 0:
  623. return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} sessions with {len(errors)} errors")
  624. else:
  625. return get_error_data_result(message="; ".join(errors))
  626. if duplicate_messages:
  627. if success_count > 0:
  628. return get_result(message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
  629. else:
  630. return get_error_data_result(message=";".join(duplicate_messages))
  631. return get_result()
  632. @manager.route("/sessions/ask", methods=["POST"]) # noqa: F821
  633. @token_required
  634. def ask_about(tenant_id):
  635. req = request.json
  636. if not req.get("question"):
  637. return get_error_data_result("`question` is required.")
  638. if not req.get("dataset_ids"):
  639. return get_error_data_result("`dataset_ids` is required.")
  640. if not isinstance(req.get("dataset_ids"), list):
  641. return get_error_data_result("`dataset_ids` should be a list.")
  642. req["kb_ids"] = req.pop("dataset_ids")
  643. for kb_id in req["kb_ids"]:
  644. if not KnowledgebaseService.accessible(kb_id, tenant_id):
  645. return get_error_data_result(f"You don't own the dataset {kb_id}.")
  646. kbs = KnowledgebaseService.query(id=kb_id)
  647. kb = kbs[0]
  648. if kb.chunk_num == 0:
  649. return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
  650. uid = tenant_id
  651. def stream():
  652. nonlocal req, uid
  653. try:
  654. for ans in ask(req["question"], req["kb_ids"], uid):
  655. yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
  656. except Exception as e:
  657. yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
  658. yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
  659. resp = Response(stream(), mimetype="text/event-stream")
  660. resp.headers.add_header("Cache-control", "no-cache")
  661. resp.headers.add_header("Connection", "keep-alive")
  662. resp.headers.add_header("X-Accel-Buffering", "no")
  663. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  664. return resp
  665. @manager.route("/sessions/related_questions", methods=["POST"]) # noqa: F821
  666. @token_required
  667. def related_questions(tenant_id):
  668. req = request.json
  669. if not req.get("question"):
  670. return get_error_data_result("`question` is required.")
  671. question = req["question"]
  672. industry = req.get("industry", "")
  673. chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
  674. prompt = """
  675. Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
  676. Instructions:
  677. - Based on the keywords provided by the user, generate 5-10 related search terms.
  678. - Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
  679. - Use common, general terms as much as possible, avoiding obscure words or technical jargon.
  680. - Keep the term length between 2-4 words, concise and clear.
  681. - DO NOT translate, use the language of the original keywords.
  682. """
  683. if industry:
  684. prompt += f" - Ensure all search terms are relevant to the industry: {industry}.\n"
  685. prompt += """
  686. ### Example:
  687. Keywords: Chinese football
  688. Related search terms:
  689. 1. Current status of Chinese football
  690. 2. Reform of Chinese football
  691. 3. Youth training of Chinese football
  692. 4. Chinese football in the Asian Cup
  693. 5. Chinese football in the World Cup
  694. Reason:
  695. - When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
  696. - Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
  697. - At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
  698. """
  699. ans = chat_mdl.chat(
  700. prompt,
  701. [
  702. {
  703. "role": "user",
  704. "content": f"""
  705. Keywords: {question}
  706. Related search terms:
  707. """,
  708. }
  709. ],
  710. {"temperature": 0.9},
  711. )
  712. return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
  713. @manager.route("/chatbots/<dialog_id>/completions", methods=["POST"]) # noqa: F821
  714. def chatbot_completions(dialog_id):
  715. req = request.json
  716. token = request.headers.get("Authorization").split()
  717. if len(token) != 2:
  718. return get_error_data_result(message='Authorization is not valid!"')
  719. token = token[1]
  720. objs = APIToken.query(beta=token)
  721. if not objs:
  722. return get_error_data_result(message='Authentication error: API key is invalid!"')
  723. if "quote" not in req:
  724. req["quote"] = False
  725. if req.get("stream", True):
  726. resp = Response(iframe_completion(dialog_id, **req), mimetype="text/event-stream")
  727. resp.headers.add_header("Cache-control", "no-cache")
  728. resp.headers.add_header("Connection", "keep-alive")
  729. resp.headers.add_header("X-Accel-Buffering", "no")
  730. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  731. return resp
  732. for answer in iframe_completion(dialog_id, **req):
  733. return get_result(data=answer)
  734. @manager.route("/agentbots/<agent_id>/completions", methods=["POST"]) # noqa: F821
  735. def agent_bot_completions(agent_id):
  736. req = request.json
  737. token = request.headers.get("Authorization").split()
  738. if len(token) != 2:
  739. return get_error_data_result(message='Authorization is not valid!"')
  740. token = token[1]
  741. objs = APIToken.query(beta=token)
  742. if not objs:
  743. return get_error_data_result(message='Authentication error: API key is invalid!"')
  744. if req.get("stream", True):
  745. resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
  746. resp.headers.add_header("Cache-control", "no-cache")
  747. resp.headers.add_header("Connection", "keep-alive")
  748. resp.headers.add_header("X-Accel-Buffering", "no")
  749. resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
  750. return resp
  751. for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
  752. return get_result(data=answer)
  753. @manager.route("/agentbots/<agent_id>/inputs", methods=["GET"]) # noqa: F821
  754. def begin_inputs(agent_id):
  755. token = request.headers.get("Authorization").split()
  756. if len(token) != 2:
  757. return get_error_data_result(message='Authorization is not valid!"')
  758. token = token[1]
  759. objs = APIToken.query(beta=token)
  760. if not objs:
  761. return get_error_data_result(message='Authentication error: API key is invalid!"')
  762. e, cvs = UserCanvasService.get_by_id(agent_id)
  763. if not e:
  764. return get_error_data_result(f"Can't find agent by ID: {agent_id}")
  765. canvas = Canvas(json.dumps(cvs.dsl), objs[0].tenant_id)
  766. return get_result(
  767. data={
  768. "title": cvs.title,
  769. "avatar": cvs.avatar,
  770. "inputs": canvas.get_component_input_form("begin"),
  771. "prologue": canvas.get_prologue()
  772. }
  773. )