You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

generate.py 9.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. from functools import partial
  18. import pandas as pd
  19. from api.db import LLMType
  20. from api.db.services.dialog_service import message_fit_in
  21. from api.db.services.llm_service import LLMBundle
  22. from api import settings
  23. from agent.component.base import ComponentBase, ComponentParamBase
  24. class GenerateParam(ComponentParamBase):
  25. """
  26. Define the Generate component parameters.
  27. """
  28. def __init__(self):
  29. super().__init__()
  30. self.llm_id = ""
  31. self.prompt = ""
  32. self.max_tokens = 0
  33. self.temperature = 0
  34. self.top_p = 0
  35. self.presence_penalty = 0
  36. self.frequency_penalty = 0
  37. self.cite = True
  38. self.parameters = []
  39. def check(self):
  40. self.check_decimal_float(self.temperature, "[Generate] Temperature")
  41. self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
  42. self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
  43. self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
  44. self.check_decimal_float(self.top_p, "[Generate] Top P")
  45. self.check_empty(self.llm_id, "[Generate] LLM")
  46. # self.check_defined_type(self.parameters, "Parameters", ["list"])
  47. def gen_conf(self):
  48. conf = {}
  49. if self.max_tokens > 0:
  50. conf["max_tokens"] = self.max_tokens
  51. if self.temperature > 0:
  52. conf["temperature"] = self.temperature
  53. if self.top_p > 0:
  54. conf["top_p"] = self.top_p
  55. if self.presence_penalty > 0:
  56. conf["presence_penalty"] = self.presence_penalty
  57. if self.frequency_penalty > 0:
  58. conf["frequency_penalty"] = self.frequency_penalty
  59. return conf
  60. class Generate(ComponentBase):
  61. component_name = "Generate"
  62. def get_dependent_components(self):
  63. cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
  64. if para.get("component_id") \
  65. and para["component_id"].lower().find("answer") < 0 \
  66. and para["component_id"].lower().find("begin") < 0])
  67. return list(cpnts)
  68. def set_cite(self, retrieval_res, answer):
  69. retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
  70. if "empty_response" in retrieval_res.columns:
  71. retrieval_res["empty_response"].fillna("", inplace=True)
  72. answer, idx = settings.retrievaler.insert_citations(answer,
  73. [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
  74. [ck["vector"] for _, ck in retrieval_res.iterrows()],
  75. LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
  76. self._canvas.get_embedding_model()), tkweight=0.7,
  77. vtweight=0.3)
  78. doc_ids = set([])
  79. recall_docs = []
  80. for i in idx:
  81. did = retrieval_res.loc[int(i), "doc_id"]
  82. if did in doc_ids:
  83. continue
  84. doc_ids.add(did)
  85. recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
  86. del retrieval_res["vector"]
  87. del retrieval_res["content_ltks"]
  88. reference = {
  89. "chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
  90. "doc_aggs": recall_docs
  91. }
  92. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  93. answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
  94. res = {"content": answer, "reference": reference}
  95. return res
  96. def _run(self, history, **kwargs):
  97. chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
  98. prompt = self._param.prompt
  99. retrieval_res = []
  100. self._param.inputs = []
  101. for para in self._param.parameters:
  102. if not para.get("component_id"):
  103. continue
  104. component_id = para["component_id"].split("@")[0]
  105. if para["component_id"].lower().find("@") >= 0:
  106. cpn_id, key = para["component_id"].split("@")
  107. for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
  108. if p["key"] == key:
  109. kwargs[para["key"]] = p.get("value", "")
  110. self._param.inputs.append(
  111. {"component_id": para["component_id"], "content": kwargs[para["key"]]})
  112. break
  113. else:
  114. assert False, f"Can't find parameter '{key}' for {cpn_id}"
  115. continue
  116. cpn = self._canvas.get_component(component_id)["obj"]
  117. if cpn.component_name.lower() == "answer":
  118. hist = self._canvas.get_history(1)
  119. if hist:
  120. hist = hist[0]["content"]
  121. else:
  122. hist = ""
  123. kwargs[para["key"]] = hist
  124. continue
  125. _, out = cpn.output(allow_partial=False)
  126. if "content" not in out.columns:
  127. kwargs[para["key"]] = ""
  128. else:
  129. if cpn.component_name.lower() == "retrieval":
  130. retrieval_res.append(out)
  131. kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
  132. self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
  133. if retrieval_res:
  134. retrieval_res = pd.concat(retrieval_res, ignore_index=True)
  135. else:
  136. retrieval_res = pd.DataFrame([])
  137. for n, v in kwargs.items():
  138. prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
  139. if not self._param.inputs and prompt.find("{input}") >= 0:
  140. retrieval_res = self.get_input()
  141. input = (" - " + "\n - ".join(
  142. [c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
  143. prompt = re.sub(r"\{input\}", re.escape(input), prompt)
  144. downstreams = self._canvas.get_component(self._id)["downstream"]
  145. if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
  146. "obj"].component_name.lower() == "answer":
  147. return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
  148. if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
  149. res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
  150. retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
  151. return pd.DataFrame([res])
  152. msg = self._canvas.get_history(self._param.message_history_window_size)
  153. if len(msg) < 1:
  154. msg.append({"role": "user", "content": ""})
  155. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
  156. if len(msg) < 2:
  157. msg.append({"role": "user", "content": ""})
  158. ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
  159. if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
  160. res = self.set_cite(retrieval_res, ans)
  161. return pd.DataFrame([res])
  162. return Generate.be_output(ans)
  163. def stream_output(self, chat_mdl, prompt, retrieval_res):
  164. res = None
  165. if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
  166. res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
  167. retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
  168. yield res
  169. self.set_output(res)
  170. return
  171. msg = self._canvas.get_history(self._param.message_history_window_size)
  172. if len(msg) < 1:
  173. msg.append({"role": "user", "content": ""})
  174. _, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
  175. if len(msg) < 2:
  176. msg.append({"role": "user", "content": ""})
  177. answer = ""
  178. for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
  179. res = {"content": ans, "reference": []}
  180. answer = ans
  181. yield res
  182. if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
  183. res = self.set_cite(retrieval_res, answer)
  184. yield res
  185. self.set_output(res)