Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

generate.py 6.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. from functools import partial
  18. import pandas as pd
  19. from api.db import LLMType
  20. from api.db.services.llm_service import LLMBundle
  21. from api.settings import retrievaler
  22. from graph.component.base import ComponentBase, ComponentParamBase
  23. class GenerateParam(ComponentParamBase):
  24. """
  25. Define the Generate component parameters.
  26. """
  27. def __init__(self):
  28. super().__init__()
  29. self.llm_id = ""
  30. self.prompt = ""
  31. self.max_tokens = 256
  32. self.temperature = 0.1
  33. self.top_p = 0.3
  34. self.presence_penalty = 0.4
  35. self.frequency_penalty = 0.7
  36. self.cite = True
  37. #self.parameters = []
  38. def check(self):
  39. self.check_decimal_float(self.temperature, "Temperature")
  40. self.check_decimal_float(self.presence_penalty, "Presence penalty")
  41. self.check_decimal_float(self.frequency_penalty, "Frequency penalty")
  42. self.check_positive_number(self.max_tokens, "Max tokens")
  43. self.check_decimal_float(self.top_p, "Top P")
  44. self.check_empty(self.llm_id, "LLM")
  45. #self.check_defined_type(self.parameters, "Parameters", ["list"])
  46. def gen_conf(self):
  47. return {
  48. "max_tokens": self.max_tokens,
  49. "temperature": self.temperature,
  50. "top_p": self.top_p,
  51. "presence_penalty": self.presence_penalty,
  52. "frequency_penalty": self.frequency_penalty,
  53. }
  54. class Generate(ComponentBase):
  55. component_name = "Generate"
  56. def _run(self, history, **kwargs):
  57. chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
  58. prompt = self._param.prompt
  59. retrieval_res = self.get_input()
  60. input = "\n- ".join(retrieval_res["content"])
  61. kwargs["input"] = input
  62. for n, v in kwargs.items():
  63. #prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
  64. prompt = re.sub(r"\{%s\}"%n, str(v), prompt)
  65. if kwargs.get("stream"):
  66. return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
  67. if "empty_response" in retrieval_res.columns:
  68. return Generate.be_output(input)
  69. ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf())
  70. if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
  71. ans, idx = retrievaler.insert_citations(ans,
  72. [ck["content_ltks"]
  73. for _, ck in retrieval_res.iterrows()],
  74. [ck["vector"]
  75. for _,ck in retrieval_res.iterrows()],
  76. LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
  77. tkweight=0.7,
  78. vtweight=0.3)
  79. del retrieval_res["vector"]
  80. retrieval_res = retrieval_res.to_dict("records")
  81. df = []
  82. for i in idx:
  83. df.append(retrieval_res[int(i)])
  84. r = re.search(r"^((.|[\r\n])*? ##%s\$\$)"%str(i), ans)
  85. assert r, f"{i} => {ans}"
  86. df[-1]["content"] = r.group(1)
  87. ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
  88. if ans: df.append({"content": ans})
  89. return pd.DataFrame(df)
  90. return Generate.be_output(ans)
  91. def stream_output(self, chat_mdl, prompt, retrieval_res):
  92. res = None
  93. if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
  94. res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
  95. yield res
  96. self.set_output(res)
  97. return
  98. answer = ""
  99. for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf()):
  100. res = {"content": ans, "reference": []}
  101. answer = ans
  102. yield res
  103. if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
  104. answer, idx = retrievaler.insert_citations(answer,
  105. [ck["content_ltks"]
  106. for _, ck in retrieval_res.iterrows()],
  107. [ck["vector"]
  108. for _, ck in retrieval_res.iterrows()],
  109. LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
  110. tkweight=0.7,
  111. vtweight=0.3)
  112. doc_ids = set([])
  113. recall_docs = []
  114. for i in idx:
  115. did = retrieval_res.loc[int(i), "doc_id"]
  116. if did in doc_ids: continue
  117. doc_ids.add(did)
  118. recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
  119. del retrieval_res["vector"]
  120. del retrieval_res["content_ltks"]
  121. reference = {
  122. "chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
  123. "doc_aggs": recall_docs
  124. }
  125. if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
  126. answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
  127. res = {"content": answer, "reference": reference}
  128. yield res
  129. self.set_output(res)