You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

llm_app.py 7.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from flask import request
  17. from flask_login import login_required, current_user
  18. from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
  19. from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
  20. from api.db import StatusEnum, LLMType
  21. from api.db.db_models import TenantLLM
  22. from api.utils.api_utils import get_json_result
  23. from rag.llm import EmbeddingModel, ChatModel
  24. @manager.route('/factories', methods=['GET'])
  25. @login_required
  26. def factories():
  27. try:
  28. fac = LLMFactoriesService.get_all()
  29. return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed"]])
  30. except Exception as e:
  31. return server_error_response(e)
  32. @manager.route('/set_api_key', methods=['POST'])
  33. @login_required
  34. @validate_request("llm_factory", "api_key")
  35. def set_api_key():
  36. req = request.json
  37. # test if api key works
  38. chat_passed = False
  39. factory = req["llm_factory"]
  40. msg = ""
  41. for llm in LLMService.query(fid=factory):
  42. if llm.model_type == LLMType.EMBEDDING.value:
  43. mdl = EmbeddingModel[factory](
  44. req["api_key"], llm.llm_name, base_url=req.get("base_url"))
  45. try:
  46. arr, tc = mdl.encode(["Test if the api key is available"])
  47. if len(arr[0]) == 0 or tc == 0:
  48. raise Exception("Fail")
  49. except Exception as e:
  50. msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
  51. elif not chat_passed and llm.model_type == LLMType.CHAT.value:
  52. mdl = ChatModel[factory](
  53. req["api_key"], llm.llm_name, base_url=req.get("base_url"))
  54. try:
  55. m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
  56. "temperature": 0.9})
  57. if not tc:
  58. raise Exception(m)
  59. chat_passed = True
  60. except Exception as e:
  61. msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
  62. e)
  63. if msg:
  64. return get_data_error_result(retmsg=msg)
  65. llm = {
  66. "api_key": req["api_key"],
  67. "api_base": req.get("base_url", "")
  68. }
  69. for n in ["model_type", "llm_name"]:
  70. if n in req:
  71. llm[n] = req[n]
  72. if not TenantLLMService.filter_update(
  73. [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
  74. for llm in LLMService.query(fid=factory):
  75. TenantLLMService.save(
  76. tenant_id=current_user.id,
  77. llm_factory=factory,
  78. llm_name=llm.llm_name,
  79. model_type=llm.model_type,
  80. api_key=req["api_key"],
  81. api_base=req.get("base_url", "")
  82. )
  83. return get_json_result(data=True)
  84. @manager.route('/add_llm', methods=['POST'])
  85. @login_required
  86. @validate_request("llm_factory", "llm_name", "model_type")
  87. def add_llm():
  88. req = request.json
  89. llm = {
  90. "tenant_id": current_user.id,
  91. "llm_factory": req["llm_factory"],
  92. "model_type": req["model_type"],
  93. "llm_name": req["llm_name"],
  94. "api_base": req.get("api_base", ""),
  95. "api_key": "xxxxxxxxxxxxxxx"
  96. }
  97. factory = req["llm_factory"]
  98. msg = ""
  99. if llm["model_type"] == LLMType.EMBEDDING.value:
  100. mdl = EmbeddingModel[factory](
  101. key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
  102. try:
  103. arr, tc = mdl.encode(["Test if the api key is available"])
  104. if len(arr[0]) == 0 or tc == 0:
  105. raise Exception("Fail")
  106. except Exception as e:
  107. msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
  108. elif llm["model_type"] == LLMType.CHAT.value:
  109. mdl = ChatModel[factory](
  110. key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
  111. try:
  112. m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
  113. "temperature": 0.9})
  114. if not tc:
  115. raise Exception(m)
  116. except Exception as e:
  117. msg += f"\nFail to access model({llm['llm_name']})." + str(
  118. e)
  119. else:
  120. # TODO: check other type of models
  121. pass
  122. if msg:
  123. return get_data_error_result(retmsg=msg)
  124. if not TenantLLMService.filter_update(
  125. [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
  126. TenantLLMService.save(**llm)
  127. return get_json_result(data=True)
  128. @manager.route('/delete_llm', methods=['POST'])
  129. @login_required
  130. @validate_request("llm_factory", "llm_name")
  131. def delete_llm():
  132. req = request.json
  133. TenantLLMService.filter_delete(
  134. [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
  135. return get_json_result(data=True)
  136. @manager.route('/my_llms', methods=['GET'])
  137. @login_required
  138. def my_llms():
  139. try:
  140. res = {}
  141. for o in TenantLLMService.get_my_llms(current_user.id):
  142. if o["llm_factory"] not in res:
  143. res[o["llm_factory"]] = {
  144. "tags": o["tags"],
  145. "llm": []
  146. }
  147. res[o["llm_factory"]]["llm"].append({
  148. "type": o["model_type"],
  149. "name": o["llm_name"],
  150. "used_token": o["used_tokens"]
  151. })
  152. return get_json_result(data=res)
  153. except Exception as e:
  154. return server_error_response(e)
  155. @manager.route('/list', methods=['GET'])
  156. @login_required
  157. def list_app():
  158. model_type = request.args.get("model_type")
  159. try:
  160. objs = TenantLLMService.query(tenant_id=current_user.id)
  161. facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
  162. llms = LLMService.get_all()
  163. llms = [m.to_dict()
  164. for m in llms if m.status == StatusEnum.VALID.value]
  165. for m in llms:
  166. m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed"]
  167. llm_set = set([m["llm_name"] for m in llms])
  168. for o in objs:
  169. if not o.api_key:continue
  170. if o.llm_name in llm_set:continue
  171. llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
  172. res = {}
  173. for m in llms:
  174. if model_type and m["model_type"].find(model_type)<0:
  175. continue
  176. if m["fid"] not in res:
  177. res[m["fid"]] = []
  178. res[m["fid"]].append(m)
  179. return get_json_result(data=res)
  180. except Exception as e:
  181. return server_error_response(e)