選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import logging
  18. import os
  19. import re
  20. from typing import Any, Generator
  21. import json_repair
  22. from copy import deepcopy
  23. from functools import partial
  24. from api.db import LLMType
  25. from api.db.services.llm_service import LLMBundle
  26. from api.db.services.tenant_llm_service import TenantLLMService
  27. from agent.component.base import ComponentBase, ComponentParamBase
  28. from api.utils.api_utils import timeout
  29. from rag.prompts import message_fit_in, citation_prompt
  30. from rag.prompts.prompts import tool_call_summary
  31. class LLMParam(ComponentParamBase):
  32. """
  33. Define the LLM component parameters.
  34. """
  35. def __init__(self):
  36. super().__init__()
  37. self.llm_id = ""
  38. self.sys_prompt = ""
  39. self.prompts = [{"role": "user", "content": "{sys.query}"}]
  40. self.max_tokens = 0
  41. self.temperature = 0
  42. self.top_p = 0
  43. self.presence_penalty = 0
  44. self.frequency_penalty = 0
  45. self.output_structure = None
  46. self.cite = True
  47. self.visual_files_var = None
  48. def check(self):
  49. self.check_decimal_float(float(self.temperature), "[Agent] Temperature")
  50. self.check_decimal_float(float(self.presence_penalty), "[Agent] Presence penalty")
  51. self.check_decimal_float(float(self.frequency_penalty), "[Agent] Frequency penalty")
  52. self.check_nonnegative_number(int(self.max_tokens), "[Agent] Max tokens")
  53. self.check_decimal_float(float(self.top_p), "[Agent] Top P")
  54. self.check_empty(self.llm_id, "[Agent] LLM")
  55. self.check_empty(self.sys_prompt, "[Agent] System prompt")
  56. self.check_empty(self.prompts, "[Agent] User prompt")
  57. def gen_conf(self):
  58. conf = {}
  59. def get_attr(nm):
  60. try:
  61. return getattr(self, nm)
  62. except Exception:
  63. pass
  64. if int(self.max_tokens) > 0 and get_attr("maxTokensEnabled"):
  65. conf["max_tokens"] = int(self.max_tokens)
  66. if float(self.temperature) > 0 and get_attr("temperatureEnabled"):
  67. conf["temperature"] = float(self.temperature)
  68. if float(self.top_p) > 0 and get_attr("topPEnabled"):
  69. conf["top_p"] = float(self.top_p)
  70. if float(self.presence_penalty) > 0 and get_attr("presencePenaltyEnabled"):
  71. conf["presence_penalty"] = float(self.presence_penalty)
  72. if float(self.frequency_penalty) > 0 and get_attr("frequencyPenaltyEnabled"):
  73. conf["frequency_penalty"] = float(self.frequency_penalty)
  74. return conf
  75. class LLM(ComponentBase):
  76. component_name = "LLM"
  77. def __init__(self, canvas, id, param: ComponentParamBase):
  78. super().__init__(canvas, id, param)
  79. self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id),
  80. self._param.llm_id, max_retries=self._param.max_retries,
  81. retry_interval=self._param.delay_after_error
  82. )
  83. self.imgs = []
  84. def get_input_form(self) -> dict[str, dict]:
  85. res = {}
  86. for k, v in self.get_input_elements().items():
  87. res[k] = {
  88. "type": "line",
  89. "name": v["name"]
  90. }
  91. return res
  92. def get_input_elements(self) -> dict[str, Any]:
  93. res = self.get_input_elements_from_text(self._param.sys_prompt)
  94. for prompt in self._param.prompts:
  95. d = self.get_input_elements_from_text(prompt["content"])
  96. res.update(d)
  97. return res
  98. def set_debug_inputs(self, inputs: dict[str, dict]):
  99. self._param.debug_inputs = inputs
  100. def add2system_prompt(self, txt):
  101. self._param.sys_prompt += txt
  102. def _prepare_prompt_variables(self):
  103. if self._param.visual_files_var:
  104. self.imgs = self._canvas.get_variable_value(self._param.visual_files_var)
  105. if not self.imgs:
  106. self.imgs = []
  107. self.imgs = [img for img in self.imgs if img[:len("data:image/")] == "data:image/"]
  108. if self.imgs and TenantLLMService.llm_id2llm_type(self._param.llm_id) == LLMType.CHAT.value:
  109. self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT.value,
  110. self._param.llm_id, max_retries=self._param.max_retries,
  111. retry_interval=self._param.delay_after_error
  112. )
  113. args = {}
  114. vars = self.get_input_elements() if not self._param.debug_inputs else self._param.debug_inputs
  115. prompt = self._param.sys_prompt
  116. for k, o in vars.items():
  117. args[k] = o["value"]
  118. if not isinstance(args[k], str):
  119. try:
  120. args[k] = json.dumps(args[k], ensure_ascii=False)
  121. except Exception:
  122. args[k] = str(args[k])
  123. self.set_input_value(k, args[k])
  124. msg = self._canvas.get_history(self._param.message_history_window_size)[:-1]
  125. msg.extend(deepcopy(self._param.prompts))
  126. prompt = self.string_format(prompt, args)
  127. for m in msg:
  128. m["content"] = self.string_format(m["content"], args)
  129. if self._param.cite and self._canvas.get_reference()["chunks"]:
  130. prompt += citation_prompt()
  131. return prompt, msg
  132. def _generate(self, msg:list[dict], **kwargs) -> str:
  133. if not self.imgs:
  134. return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
  135. return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
  136. def _generate_streamly(self, msg:list[dict], **kwargs) -> Generator[str, None, None]:
  137. ans = ""
  138. last_idx = 0
  139. endswith_think = False
  140. def delta(txt):
  141. nonlocal ans, last_idx, endswith_think
  142. delta_ans = txt[last_idx:]
  143. ans = txt
  144. if delta_ans.find("<think>") == 0:
  145. last_idx += len("<think>")
  146. return "<think>"
  147. elif delta_ans.find("<think>") > 0:
  148. delta_ans = txt[last_idx:last_idx+delta_ans.find("<think>")]
  149. last_idx += delta_ans.find("<think>")
  150. return delta_ans
  151. elif delta_ans.endswith("</think>"):
  152. endswith_think = True
  153. elif endswith_think:
  154. endswith_think = False
  155. return "</think>"
  156. last_idx = len(ans)
  157. if ans.endswith("</think>"):
  158. last_idx -= len("</think>")
  159. return re.sub(r"(<think>|</think>)", "", delta_ans)
  160. if not self.imgs:
  161. for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs):
  162. yield delta(txt)
  163. else:
  164. for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs):
  165. yield delta(txt)
  166. @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
  167. def _invoke(self, **kwargs):
  168. def clean_formated_answer(ans: str) -> str:
  169. ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
  170. ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
  171. return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
  172. prompt, msg = self._prepare_prompt_variables()
  173. error = ""
  174. if self._param.output_structure:
  175. prompt += "\nThe output MUST follow this JSON format:\n"+json.dumps(self._param.output_structure, ensure_ascii=False, indent=2)
  176. prompt += "\nRedundant information is FORBIDDEN."
  177. for _ in range(self._param.max_retries+1):
  178. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
  179. error = ""
  180. ans = self._generate(msg)
  181. msg.pop(0)
  182. if ans.find("**ERROR**") >= 0:
  183. logging.error(f"LLM response error: {ans}")
  184. error = ans
  185. continue
  186. try:
  187. self.set_output("structured_content", json_repair.loads(clean_formated_answer(ans)))
  188. return
  189. except Exception:
  190. msg.append({"role": "user", "content": "The answer can't not be parsed as JSON"})
  191. error = "The answer can't not be parsed as JSON"
  192. if error:
  193. self.set_output("_ERROR", error)
  194. return
  195. downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
  196. ex = self.exception_handler()
  197. if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
  198. self.set_output("content", partial(self._stream_output, prompt, msg))
  199. return
  200. for _ in range(self._param.max_retries+1):
  201. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
  202. error = ""
  203. ans = self._generate(msg)
  204. msg.pop(0)
  205. if ans.find("**ERROR**") >= 0:
  206. logging.error(f"LLM response error: {ans}")
  207. error = ans
  208. continue
  209. self.set_output("content", ans)
  210. break
  211. if error:
  212. if self.get_exception_default_value():
  213. self.set_output("content", self.get_exception_default_value())
  214. else:
  215. self.set_output("_ERROR", error)
  216. def _stream_output(self, prompt, msg):
  217. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
  218. answer = ""
  219. for ans in self._generate_streamly(msg):
  220. if ans.find("**ERROR**") >= 0:
  221. if self.get_exception_default_value():
  222. self.set_output("content", self.get_exception_default_value())
  223. yield self.get_exception_default_value()
  224. else:
  225. self.set_output("_ERROR", ans)
  226. return
  227. yield ans
  228. answer += ans
  229. self.set_output("content", answer)
  230. def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str):
  231. summ = tool_call_summary(self.chat_mdl, func_name, params, results)
  232. logging.info(f"[MEMORY]: {summ}")
  233. self._canvas.add_memory(user, assist, summ)
  234. def thoughts(self) -> str:
  235. _, msg = self._prepare_prompt_variables()
  236. return "⌛Give me a moment—starting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nI’ll figure out our best next move."