You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

generate.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import re
  18. from functools import partial
  19. from typing import Any
  20. import pandas as pd
  21. from api.db import LLMType
  22. from api.db.services.conversation_service import structure_answer
  23. from api.db.services.llm_service import LLMBundle
  24. from api import settings
  25. from agent.component.base import ComponentBase, ComponentParamBase
  26. from plugin import GlobalPluginManager
  27. from plugin.llm_tool_plugin import llm_tool_metadata_to_openai_tool
  28. from rag.llm.chat_model import ToolCallSession
  29. from rag.prompts import message_fit_in
  30. class LLMToolPluginCallSession(ToolCallSession):
  31. def tool_call(self, name: str, arguments: dict[str, Any]) -> str:
  32. tool = GlobalPluginManager.get_llm_tool_by_name(name)
  33. if tool is None:
  34. raise ValueError(f"LLM tool {name} does not exist")
  35. return tool().invoke(**arguments)
  36. class GenerateParam(ComponentParamBase):
  37. """
  38. Define the Generate component parameters.
  39. """
  40. def __init__(self):
  41. super().__init__()
  42. self.llm_id = ""
  43. self.prompt = ""
  44. self.max_tokens = 0
  45. self.temperature = 0
  46. self.top_p = 0
  47. self.presence_penalty = 0
  48. self.frequency_penalty = 0
  49. self.cite = True
  50. self.parameters = []
  51. self.llm_enabled_tools = []
  52. def check(self):
  53. self.check_decimal_float(self.temperature, "[Generate] Temperature")
  54. self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
  55. self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
  56. self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
  57. self.check_decimal_float(self.top_p, "[Generate] Top P")
  58. self.check_empty(self.llm_id, "[Generate] LLM")
  59. # self.check_defined_type(self.parameters, "Parameters", ["list"])
  60. def gen_conf(self):
  61. conf = {}
  62. if self.max_tokens > 0:
  63. conf["max_tokens"] = self.max_tokens
  64. if self.temperature > 0:
  65. conf["temperature"] = self.temperature
  66. if self.top_p > 0:
  67. conf["top_p"] = self.top_p
  68. if self.presence_penalty > 0:
  69. conf["presence_penalty"] = self.presence_penalty
  70. if self.frequency_penalty > 0:
  71. conf["frequency_penalty"] = self.frequency_penalty
  72. return conf
  73. class Generate(ComponentBase):
  74. component_name = "Generate"
  75. def get_dependent_components(self):
  76. inputs = self.get_input_elements()
  77. cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
  78. return list(cpnts)
  79. def set_cite(self, retrieval_res, answer):
  80. if "empty_response" in retrieval_res.columns:
  81. retrieval_res["empty_response"].fillna("", inplace=True)
  82. chunks = json.loads(retrieval_res["chunks"][0])
  83. answer, idx = settings.retrievaler.insert_citations(answer,
  84. [ck["content_ltks"] for ck in chunks],
  85. [ck["vector"] for ck in chunks],
  86. LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
  87. self._canvas.get_embedding_model()), tkweight=0.7,
  88. vtweight=0.3)
  89. doc_ids = set([])
  90. recall_docs = []
  91. for i in idx:
  92. did = chunks[int(i)]["doc_id"]
  93. if did in doc_ids:
  94. continue
  95. doc_ids.add(did)
  96. recall_docs.append({"doc_id": did, "doc_name": chunks[int(i)]["docnm_kwd"]})
  97. for c in chunks:
  98. del c["vector"]
  99. del c["content_ltks"]
  100. reference = {
  101. "chunks": chunks,
  102. "doc_aggs": recall_docs
  103. }
  104. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  105. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  106. res = {"content": answer, "reference": reference}
  107. res = structure_answer(None, res, "", "")
  108. return res
  109. def get_input_elements(self):
  110. key_set = set([])
  111. res = [{"key": "user", "name": "Input your question here:"}]
  112. for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
  113. cpn_id = r.group(1)
  114. if cpn_id in key_set:
  115. continue
  116. if cpn_id.lower().find("begin@") == 0:
  117. cpn_id, key = cpn_id.split("@")
  118. for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
  119. if p["key"] != key:
  120. continue
  121. res.append({"key": r.group(1), "name": p["name"]})
  122. key_set.add(r.group(1))
  123. continue
  124. cpn_nm = self._canvas.get_component_name(cpn_id)
  125. if not cpn_nm:
  126. continue
  127. res.append({"key": cpn_id, "name": cpn_nm})
  128. key_set.add(cpn_id)
  129. return res
  130. def _run(self, history, **kwargs):
  131. chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
  132. if len(self._param.llm_enabled_tools) > 0:
  133. tools = GlobalPluginManager.get_llm_tools_by_names(self._param.llm_enabled_tools)
  134. chat_mdl.bind_tools(
  135. LLMToolPluginCallSession(),
  136. [llm_tool_metadata_to_openai_tool(t.get_metadata()) for t in tools]
  137. )
  138. prompt = self._param.prompt
  139. retrieval_res = []
  140. self._param.inputs = []
  141. for para in self.get_input_elements()[1:]:
  142. if para["key"].lower().find("begin@") == 0:
  143. cpn_id, key = para["key"].split("@")
  144. for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
  145. if p["key"] == key:
  146. kwargs[para["key"]] = p.get("value", "")
  147. self._param.inputs.append(
  148. {"component_id": para["key"], "content": kwargs[para["key"]]})
  149. break
  150. else:
  151. assert False, f"Can't find parameter '{key}' for {cpn_id}"
  152. continue
  153. component_id = para["key"]
  154. cpn = self._canvas.get_component(component_id)["obj"]
  155. if cpn.component_name.lower() == "answer":
  156. hist = self._canvas.get_history(1)
  157. if hist:
  158. hist = hist[0]["content"]
  159. else:
  160. hist = ""
  161. kwargs[para["key"]] = hist
  162. continue
  163. _, out = cpn.output(allow_partial=False)
  164. if "content" not in out.columns:
  165. kwargs[para["key"]] = ""
  166. else:
  167. if cpn.component_name.lower() == "retrieval":
  168. retrieval_res.append(out)
  169. kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
  170. self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
  171. if retrieval_res:
  172. retrieval_res = pd.concat(retrieval_res, ignore_index=True)
  173. else:
  174. retrieval_res = pd.DataFrame([])
  175. for n, v in kwargs.items():
  176. prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
  177. if not self._param.inputs and prompt.find("{input}") >= 0:
  178. retrieval_res = self.get_input()
  179. input = (" - " + "\n - ".join(
  180. [c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
  181. prompt = re.sub(r"\{input\}", re.escape(input), prompt)
  182. downstreams = self._canvas.get_component(self._id)["downstream"]
  183. if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
  184. "obj"].component_name.lower() == "answer":
  185. return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
  186. if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
  187. empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
  188. res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
  189. return pd.DataFrame([res])
  190. msg = self._canvas.get_history(self._param.message_history_window_size)
  191. if len(msg) < 1:
  192. msg.append({"role": "user", "content": "Output: "})
  193. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
  194. if len(msg) < 2:
  195. msg.append({"role": "user", "content": "Output: "})
  196. ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
  197. ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
  198. self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
  199. if self._param.cite and "chunks" in retrieval_res.columns:
  200. res = self.set_cite(retrieval_res, ans)
  201. return pd.DataFrame([res])
  202. return Generate.be_output(ans)
  203. def stream_output(self, chat_mdl, prompt, retrieval_res):
  204. res = None
  205. if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
  206. empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
  207. res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
  208. yield res
  209. self.set_output(res)
  210. return
  211. msg = self._canvas.get_history(self._param.message_history_window_size)
  212. if msg and msg[0]['role'] == 'assistant':
  213. msg.pop(0)
  214. if len(msg) < 1:
  215. msg.append({"role": "user", "content": "Output: "})
  216. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
  217. if len(msg) < 2:
  218. msg.append({"role": "user", "content": "Output: "})
  219. answer = ""
  220. for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
  221. res = {"content": ans, "reference": []}
  222. answer = ans
  223. yield res
  224. if self._param.cite and "chunks" in retrieval_res.columns:
  225. res = self.set_cite(retrieval_res, answer)
  226. yield res
  227. self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
  228. self.set_output(Generate.be_output(res))
  229. def debug(self, **kwargs):
  230. chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
  231. prompt = self._param.prompt
  232. for para in self._param.debug_inputs:
  233. kwargs[para["key"]] = para.get("value", "")
  234. for n, v in kwargs.items():
  235. prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
  236. u = kwargs.get("user")
  237. ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
  238. return pd.DataFrame([ans])