Browse Source

Add api for sessions and add max_tokens for tenant_llm (#3472)

### What problem does this PR solve?

Add api for sessions and add max_tokens for tenant_llm

### Type of change

- [x] New Feature (non-breaking change which adds functionality)

---------

Co-authored-by: liuhua <10215101452@stu.ecun.edu.cn>
tags/v0.14.0
liuhua 11 months ago
parent
commit
d42362deb6
No account linked to committer's email address

+ 33
- 28
api/apps/llm_app.py View File

mdl = ChatModel[factory]( mdl = ChatModel[factory](
req["api_key"], llm.llm_name, base_url=req.get("base_url")) req["api_key"], llm.llm_name, base_url=req.get("base_url"))
try: try:
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
{"temperature": 0.9,'max_tokens':50})
if m.find("**ERROR**") >=0:
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
{"temperature": 0.9, 'max_tokens': 50})
if m.find("**ERROR**") >= 0:
raise Exception(m) raise Exception(m)
chat_passed = True chat_passed = True
except Exception as e: except Exception as e:
llm_config[n] = req[n] llm_config[n] = req[n]


for llm in LLMService.query(fid=factory): for llm in LLMService.query(fid=factory):
llm_config["max_tokens"]=llm.max_tokens
if not TenantLLMService.filter_update( if not TenantLLMService.filter_update(
[TenantLLM.tenant_id == current_user.id, [TenantLLM.tenant_id == current_user.id,
TenantLLM.llm_factory == factory, TenantLLM.llm_factory == factory,
llm_name=llm.llm_name, llm_name=llm.llm_name,
model_type=llm.model_type, model_type=llm.model_type,
api_key=llm_config["api_key"], api_key=llm_config["api_key"],
api_base=llm_config["api_base"]
api_base=llm_config["api_base"],
max_tokens=llm_config["max_tokens"]
) )


return get_json_result(data=True) return get_json_result(data=True)
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"]) api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])


elif factory == "LocalAI": elif factory == "LocalAI":
llm_name = req["llm_name"]+"___LocalAI"
llm_name = req["llm_name"] + "___LocalAI"
api_key = "xxxxxxxxxxxxxxx" api_key = "xxxxxxxxxxxxxxx"
elif factory == "HuggingFace": elif factory == "HuggingFace":
llm_name = req["llm_name"]+"___HuggingFace"
llm_name = req["llm_name"] + "___HuggingFace"
api_key = "xxxxxxxxxxxxxxx" api_key = "xxxxxxxxxxxxxxx"


elif factory == "OpenAI-API-Compatible": elif factory == "OpenAI-API-Compatible":
llm_name = req["llm_name"]+"___OpenAI-API"
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
llm_name = req["llm_name"] + "___OpenAI-API"
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")


elif factory =="XunFei Spark":
elif factory == "XunFei Spark":
llm_name = req["llm_name"] llm_name = req["llm_name"]
if req["model_type"] == "chat": if req["model_type"] == "chat":
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx") api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
elif req["model_type"] == "tts": elif req["model_type"] == "tts":
api_key = apikey_json(["spark_app_id", "spark_api_secret","spark_api_key"])
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])


elif factory == "BaiduYiyan": elif factory == "BaiduYiyan":
llm_name = req["llm_name"] llm_name = req["llm_name"]
"model_type": req["model_type"], "model_type": req["model_type"],
"llm_name": llm_name, "llm_name": llm_name,
"api_base": req.get("api_base", ""), "api_base": req.get("api_base", ""),
"api_key": api_key
"api_key": api_key,
"max_tokens": req.get("max_tokens")
} }


msg = "" msg = ""
if llm["model_type"] == LLMType.EMBEDDING.value: if llm["model_type"] == LLMType.EMBEDDING.value:
mdl = EmbeddingModel[factory]( mdl = EmbeddingModel[factory](
key=llm['api_key'], key=llm['api_key'],
model_name=llm["llm_name"],
model_name=llm["llm_name"],
base_url=llm["api_base"]) base_url=llm["api_base"])
try: try:
arr, tc = mdl.encode(["Test if the api key is available"]) arr, tc = mdl.encode(["Test if the api key is available"])
) )
try: try:
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], { m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
"temperature": 0.9})
"temperature": 0.9})
if not tc: if not tc:
raise Exception(m) raise Exception(m)
except Exception as e: except Exception as e:
e) e)
elif llm["model_type"] == LLMType.RERANK: elif llm["model_type"] == LLMType.RERANK:
mdl = RerankModel[factory]( mdl = RerankModel[factory](
key=llm["api_key"],
model_name=llm["llm_name"],
key=llm["api_key"],
model_name=llm["llm_name"],
base_url=llm["api_base"] base_url=llm["api_base"]
) )
try: try:
e) e)
elif llm["model_type"] == LLMType.IMAGE2TEXT.value: elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
mdl = CvModel[factory]( mdl = CvModel[factory](
key=llm["api_key"],
model_name=llm["llm_name"],
key=llm["api_key"],
model_name=llm["llm_name"],
base_url=llm["api_base"] base_url=llm["api_base"]
) )
try: try:
return get_data_error_result(message=msg) return get_data_error_result(message=msg)


if not TenantLLMService.filter_update( if not TenantLLMService.filter_update(
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
TenantLLM.llm_name == llm["llm_name"]], llm):
TenantLLMService.save(**llm) TenantLLMService.save(**llm)


return get_json_result(data=True) return get_json_result(data=True)
def delete_llm(): def delete_llm():
req = request.json req = request.json
TenantLLMService.filter_delete( TenantLLMService.filter_delete(
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
TenantLLM.llm_name == req["llm_name"]])
return get_json_result(data=True) return get_json_result(data=True)




def delete_factory(): def delete_factory():
req = request.json req = request.json
TenantLLMService.filter_delete( TenantLLMService.filter_delete(
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
return get_json_result(data=True) return get_json_result(data=True)




@manager.route('/list', methods=['GET']) @manager.route('/list', methods=['GET'])
@login_required @login_required
def list_app(): def list_app():
self_deploied = ["Youdao","FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
weighted = ["Youdao","FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
model_type = request.args.get("model_type") model_type = request.args.get("model_type")
try: try:
objs = TenantLLMService.query(tenant_id=current_user.id) objs = TenantLLMService.query(tenant_id=current_user.id)
for m in llms: for m in llms:
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied


llm_set = set([m["llm_name"]+"@"+m["fid"] for m in llms])
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
for o in objs: for o in objs:
if not o.api_key:continue
if o.llm_name+"@"+o.llm_factory in llm_set:continue
if not o.api_key: continue
if o.llm_name + "@" + o.llm_factory in llm_set: continue
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True}) llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})


res = {} res = {}
for m in llms: for m in llms:
if model_type and m["model_type"].find(model_type)<0:
if model_type and m["model_type"].find(model_type) < 0:
continue continue
if m["fid"] not in res: if m["fid"] not in res:
res[m["fid"]] = [] res[m["fid"]] = []


return get_json_result(data=res) return get_json_result(data=res)
except Exception as e: except Exception as e:
return server_error_response(e)
return server_error_response(e)

+ 84
- 5
api/apps/sdk/session.py View File

# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
import re
import json import json
from functools import partial from functools import partial
from uuid import uuid4 from uuid import uuid4
from api.db import LLMType
from flask import request, Response from flask import request, Response
from api.db.services.dialog_service import ask
from agent.canvas import Canvas from agent.canvas import Canvas
from api.db import StatusEnum from api.db import StatusEnum
from api.db.db_models import API4Conversation from api.db.db_models import API4Conversation
from api.db.services.api_service import API4ConversationService from api.db.services.api_service import API4ConversationService
from api.db.services.canvas_service import UserCanvasService from api.db.services.canvas_service import UserCanvasService
from api.db.services.dialog_service import DialogService, ConversationService, chat from api.db.services.dialog_service import DialogService, ConversationService, chat
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.utils import get_uuid from api.utils import get_uuid
from api.utils.api_utils import get_error_data_result from api.utils.api_utils import get_error_data_result
from api.utils.api_utils import get_result, token_required from api.utils.api_utils import get_result, token_required
from api.db.services.llm_service import LLMBundle




@manager.route('/chats/<chat_id>/sessions', methods=['POST']) @manager.route('/chats/<chat_id>/sessions', methods=['POST'])
yield "data:" + json.dumps({"code": 500, "message": str(e), yield "data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e), "reference": []}}, "data": {"answer": "**ERROR**: " + str(e), "reference": []}},
ensure_ascii=False) + "\n\n" ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"


resp = Response(sse(), mimetype="text/event-stream") resp = Response(sse(), mimetype="text/event-stream")
resp.headers.add_header("Cache-control", "no-cache") resp.headers.add_header("Cache-control", "no-cache")


@manager.route('/chats/<chat_id>/sessions', methods=['GET']) @manager.route('/chats/<chat_id>/sessions', methods=['GET'])
@token_required @token_required
def list(chat_id,tenant_id):
def list_session(chat_id,tenant_id):
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value): if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
return get_error_data_result(message=f"You don't own the assistant {chat_id}.") return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
id = request.args.get("id") id = request.args.get("id")
if not conv: if not conv:
return get_error_data_result(message="The chat doesn't own the session") return get_error_data_result(message="The chat doesn't own the session")
ConversationService.delete_by_id(id) ConversationService.delete_by_id(id)
return get_result()
return get_result()

@manager.route('/sessions/ask', methods=['POST'])
@token_required
def ask_about(tenant_id):
req = request.json
if not req.get("question"):
return get_error_data_result("`question` is required.")
if not req.get("dataset_ids"):
return get_error_data_result("`dataset_ids` is required.")
if not isinstance(req.get("dataset_ids"),list):
return get_error_data_result("`dataset_ids` should be a list.")
req["kb_ids"]=req.pop("dataset_ids")
for kb_id in req["kb_ids"]:
if not KnowledgebaseService.accessible(kb_id,tenant_id):
return get_error_data_result(f"You don't own the dataset {kb_id}.")
kbs = KnowledgebaseService.query(id=kb_id)
kb = kbs[0]
if kb.chunk_num == 0:
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
uid = tenant_id
def stream():
nonlocal req, uid
try:
for ans in ask(req["question"], req["kb_ids"], uid):
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
except Exception as e:
yield "data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"

resp = Response(stream(), mimetype="text/event-stream")
resp.headers.add_header("Cache-control", "no-cache")
resp.headers.add_header("Connection", "keep-alive")
resp.headers.add_header("X-Accel-Buffering", "no")
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
return resp


@manager.route('/sessions/related_questions', methods=['POST'])
@token_required
def related_questions(tenant_id):
req = request.json
if not req.get("question"):
return get_error_data_result("`question` is required.")
question = req["question"]
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
prompt = """
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
Instructions:
- Based on the keywords provided by the user, generate 5-10 related search terms.
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
- Keep the term length between 2-4 words, concise and clear.
- DO NOT translate, use the language of the original keywords.

### Example:
Keywords: Chinese football
Related search terms:
1. Current status of Chinese football
2. Reform of Chinese football
3. Youth training of Chinese football
4. Chinese football in the Asian Cup
5. Chinese football in the World Cup

Reason:
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.

"""
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
Keywords: {question}
Related search terms:
"""}], {"temperature": 0.9})
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])

+ 24
- 10
api/db/db_models.py View File

import inspect import inspect
import os import os
import sys import sys
import typing
import operator import operator
from enum import Enum from enum import Enum
from functools import wraps from functools import wraps
Field, Model, Metadata Field, Model, Metadata
) )
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase


from api.db import SerializedType, ParserType from api.db import SerializedType, ParserType
from api import settings from api import settings
from api import utils from api import utils



def singleton(cls, *args, **kw): def singleton(cls, *args, **kw):
instances = {} instances = {}


f"the serialized type {self._serialized_type} is not supported") f"the serialized type {self._serialized_type} is not supported")




def is_continuous_field(cls: type) -> bool:
def is_continuous_field(cls: typing.Type) -> bool:
if cls in CONTINUOUS_FIELD_TYPE: if cls in CONTINUOUS_FIELD_TYPE:
return True return True
for p in cls.__bases__: for p in cls.__bases__:
if p in CONTINUOUS_FIELD_TYPE: if p in CONTINUOUS_FIELD_TYPE:
return True return True
elif p is not Field and p is not object:
elif p != Field and p != object:
if is_continuous_field(p): if is_continuous_field(p):
return True return True
else: else:
def to_dict(self): def to_dict(self):
return self.__dict__['__data__'] return self.__dict__['__data__']


def to_human_model_dict(self, only_primary_with: list | None = None):
def to_human_model_dict(self, only_primary_with: list = None):
model_dict = self.__dict__['__data__'] model_dict = self.__dict__['__data__']


if not only_primary_with: if not only_primary_with:
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook, super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
object_pairs_hook=object_pairs_hook, **kwargs) object_pairs_hook=object_pairs_hook, **kwargs)



class PooledDatabase(Enum): class PooledDatabase(Enum):
MYSQL = PooledMySQLDatabase MYSQL = PooledMySQLDatabase
POSTGRES = PooledPostgresqlDatabase POSTGRES = PooledPostgresqlDatabase
self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config) self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config)
logging.info('init database on cluster mode successfully') logging.info('init database on cluster mode successfully')



class PostgresDatabaseLock: class PostgresDatabaseLock:
def __init__(self, lock_name, timeout=10, db=None): def __init__(self, lock_name, timeout=10, db=None):
self.lock_name = lock_name self.lock_name = lock_name


return magic return magic



class MysqlDatabaseLock: class MysqlDatabaseLock:
def __init__(self, lock_name, timeout=10, db=None): def __init__(self, lock_name, timeout=10, db=None):
self.lock_name = lock_name self.lock_name = lock_name
index=True) index=True)
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True) api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
api_base = CharField(max_length=255, null=True, help_text="API Base") api_base = CharField(max_length=255, null=True, help_text="API Base")
max_tokens = IntegerField(default=8192, index=True)
used_tokens = IntegerField(default=0, index=True) used_tokens = IntegerField(default=0, index=True)


def __str__(self): def __str__(self):
default="simple", default="simple",
help_text="simple|advanced", help_text="simple|advanced",
index=True) index=True)
prompt_config = JSONField(null=False, default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
"parameters": [], "empty_response": "Sorry! No relevant content was found in the knowledge base!"})
prompt_config = JSONField(null=False,
default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
"parameters": [],
"empty_response": "Sorry! No relevant content was found in the knowledge base!"})


similarity_threshold = FloatField(default=0.2) similarity_threshold = FloatField(default=0.2)
vector_similarity_weight = FloatField(default=0.3) vector_similarity_weight = FloatField(default=0.3)
null=False, null=False,
default="1", default="1",
help_text="it needs to insert reference index into answer or not") help_text="it needs to insert reference index into answer or not")
rerank_id = CharField( rerank_id = CharField(
max_length=128, max_length=128,
null=False, null=False,
pass pass
try: try:
migrate( migrate(
migrator.add_column("tenant","tts_id",
CharField(max_length=256,null=True,help_text="default tts model ID",index=True))
migrator.add_column("tenant", "tts_id",
CharField(max_length=256, null=True, help_text="default tts model ID", index=True))
) )
except Exception: except Exception:
pass pass
) )
except Exception: except Exception:
pass pass

try:
migrate(
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
)
except Exception:
pass

+ 1
- 1
rag/llm/embedding_model.py View File

def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"): def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"):
if not base_url: if not base_url:
base_url = "https://api.together.xyz/v1" base_url = "https://api.together.xyz/v1"
super().__init__(key, model_name, base_url)
super().__init__(key, model_name, base_url=base_url)




class PerfXCloudEmbed(OpenAIEmbed): class PerfXCloudEmbed(OpenAIEmbed):

+ 1
- 0
web/src/interfaces/request/llm.ts View File

model_type: string; model_type: string;
api_base?: string; // chat|embedding|speech2text|image2text api_base?: string; // chat|embedding|speech2text|image2text
api_key: string; api_key: string;
max_tokens: number;
} }


export interface IDeleteLlmRequestBody { export interface IDeleteLlmRequestBody {

+ 8
- 0
web/src/locales/en.ts View File

maxTokensMessage: 'Max Tokens is required', maxTokensMessage: 'Max Tokens is required',
maxTokensTip: maxTokensTip:
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).', 'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
quote: 'Show Quote', quote: 'Show Quote',
quoteTip: 'Should the source of the original text be displayed?', quoteTip: 'Should the source of the original text be displayed?',
selfRag: 'Self-RAG', selfRag: 'Self-RAG',
setting: { setting: {
profile: 'Profile', profile: 'Profile',
profileDescription: 'Update your photo and personal details here.', profileDescription: 'Update your photo and personal details here.',
maxTokens: 'Max Tokens',
maxTokensMessage: 'Max Tokens is required',
maxTokensTip:
'This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).',
maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
password: 'Password', password: 'Password',
passwordDescription: passwordDescription:
'Please enter your current password to change your password.', 'Please enter your current password to change your password.',

+ 8
- 0
web/src/locales/es.ts View File

maxTokensMessage: 'El máximo de tokens es obligatorio', maxTokensMessage: 'El máximo de tokens es obligatorio',
maxTokensTip: maxTokensTip:
'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).', 'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).',
maxTokensInvalidMessage: 'Por favor, ingresa un número válido para Max Tokens.',
maxTokensMinMessage: 'Max Tokens no puede ser menor que 0.',
quote: 'Mostrar cita', quote: 'Mostrar cita',
quoteTip: '¿Debe mostrarse la fuente del texto original?', quoteTip: '¿Debe mostrarse la fuente del texto original?',
selfRag: 'Self-RAG', selfRag: 'Self-RAG',
setting: { setting: {
profile: 'Perfil', profile: 'Perfil',
profileDescription: 'Actualiza tu foto y tus datos personales aquí.', profileDescription: 'Actualiza tu foto y tus datos personales aquí.',
maxTokens: 'Máximo de tokens',
maxTokensMessage: 'El máximo de tokens es obligatorio',
maxTokensTip:
'Esto establece la longitud máxima de la salida del modelo, medida en el número de tokens (palabras o piezas de palabras).',
maxTokensInvalidMessage: 'Por favor, ingresa un número válido para Max Tokens.',
maxTokensMinMessage: 'Max Tokens no puede ser menor que 0.',
password: 'Contraseña', password: 'Contraseña',
passwordDescription: passwordDescription:
'Por favor ingresa tu contraseña actual para cambiarla.', 'Por favor ingresa tu contraseña actual para cambiarla.',

+ 8
- 0
web/src/locales/id.ts View File

maxTokensMessage: 'Token Maksimum diperlukan', maxTokensMessage: 'Token Maksimum diperlukan',
maxTokensTip: maxTokensTip:
'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).', 'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).',
maxTokensInvalidMessage: 'Silakan masukkan angka yang valid untuk Max Tokens.',
maxTokensMinMessage: 'Max Tokens tidak boleh kurang dari 0.',
quote: 'Tampilkan Kutipan', quote: 'Tampilkan Kutipan',
quoteTip: 'Haruskah sumber teks asli ditampilkan?', quoteTip: 'Haruskah sumber teks asli ditampilkan?',
selfRag: 'Self-RAG', selfRag: 'Self-RAG',
setting: { setting: {
profile: 'Profil', profile: 'Profil',
profileDescription: 'Perbarui foto dan detail pribadi Anda di sini.', profileDescription: 'Perbarui foto dan detail pribadi Anda di sini.',
maxTokens: 'Token Maksimum',
maxTokensMessage: 'Token Maksimum diperlukan',
maxTokensTip:
'Ini menetapkan panjang maksimum keluaran model, diukur dalam jumlah token (kata atau potongan kata).',
maxTokensInvalidMessage: 'Silakan masukkan angka yang valid untuk Max Tokens.',
maxTokensMinMessage: 'Max Tokens tidak boleh kurang dari 0.',
password: 'Kata Sandi', password: 'Kata Sandi',
passwordDescription: passwordDescription:
'Silakan masukkan kata sandi Anda saat ini untuk mengubah kata sandi Anda.', 'Silakan masukkan kata sandi Anda saat ini untuk mengubah kata sandi Anda.',

+ 8
- 0
web/src/locales/zh-traditional.ts View File

maxTokensMessage: '最大token數是必填項', maxTokensMessage: '最大token數是必填項',
maxTokensTip: maxTokensTip:
'這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。', '這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。',
maxTokensInvalidMessage: '請輸入有效的最大標記數。',
maxTokensMinMessage: '最大標記數不能小於 0。',
quote: '顯示引文', quote: '顯示引文',
quoteTip: '是否應該顯示原文出處?', quoteTip: '是否應該顯示原文出處?',
selfRag: 'Self-RAG', selfRag: 'Self-RAG',
setting: { setting: {
profile: '概述', profile: '概述',
profileDescription: '在此更新您的照片和個人詳細信息。', profileDescription: '在此更新您的照片和個人詳細信息。',
maxTokens: '最大token數',
maxTokensMessage: '最大token數是必填項',
maxTokensTip:
'這設置了模型輸出的最大長度,以標記(單詞或單詞片段)的數量來衡量。',
maxTokensInvalidMessage: '請輸入有效的最大標記數。',
maxTokensMinMessage: '最大標記數不能小於 0。',
password: '密碼', password: '密碼',
passwordDescription: '請輸入您當前的密碼以更改您的密碼。', passwordDescription: '請輸入您當前的密碼以更改您的密碼。',
model: '模型提供商', model: '模型提供商',

+ 8
- 0
web/src/locales/zh.ts View File

maxTokensMessage: '最大token数是必填项', maxTokensMessage: '最大token数是必填项',
maxTokensTip: maxTokensTip:
'这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。', '这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
maxTokensInvalidMessage: '请输入有效的最大令牌数。',
maxTokensMinMessage: '最大令牌数不能小于 0。',
quote: '显示引文', quote: '显示引文',
quoteTip: '是否应该显示原文出处?', quoteTip: '是否应该显示原文出处?',
selfRag: 'Self-RAG', selfRag: 'Self-RAG',
setting: { setting: {
profile: '概要', profile: '概要',
profileDescription: '在此更新您的照片和个人详细信息。', profileDescription: '在此更新您的照片和个人详细信息。',
maxTokens: '最大token数',
maxTokensMessage: '最大token数是必填项',
maxTokensTip:
'这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
maxTokensInvalidMessage: '请输入有效的最大令牌数。',
maxTokensMinMessage: '最大令牌数不能小于 0。',
password: '密码', password: '密码',
passwordDescription: '请输入您当前的密码以更改您的密码。', passwordDescription: '请输入您当前的密码以更改您的密码。',
model: '模型提供商', model: '模型提供商',

+ 2
- 1
web/src/pages/user-setting/setting-model/Tencent-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values), ...omit(values),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:16000,
}; };
console.info(data); console.info(data);



+ 27
- 1
web/src/pages/user-setting/setting-model/azure-openai-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Form, Input, Modal, Select, Switch } from 'antd';
import { Form, Input, Modal, Select, Switch, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values, ['vision']), ...omit(values, ['vision']),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


> >
<Input placeholder={t('apiVersionMessage')} /> <Input placeholder={t('apiVersionMessage')} />
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

<Form.Item noStyle dependencies={['model_type']}> <Form.Item noStyle dependencies={['model_type']}>
{({ getFieldValue }) => {({ getFieldValue }) =>
getFieldValue('model_type') === 'chat' && ( getFieldValue('model_type') === 'chat' && (

+ 27
- 1
web/src/pages/user-setting/setting-model/bedrock-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
import { useMemo } from 'react'; import { useMemo } from 'react';
import { BedrockRegionList } from '../constant'; import { BedrockRegionList } from '../constant';


const data = { const data = {
...values, ...values,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };


onOk?.(data); onOk?.(data);
allowClear allowClear
></Select> ></Select>
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

</Form> </Form>
</Modal> </Modal>
); );

+ 27
- 1
web/src/pages/user-setting/setting-model/fish-audio-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space } from 'antd';
import { Flex, Form, Input, Modal, Select, Space, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values), ...omit(values),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


> >
<Input placeholder={t('FishAudioRefIDMessage')} /> <Input placeholder={t('FishAudioRefIDMessage')} />
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

</Form> </Form>
</Modal> </Modal>
); );

+ 27
- 1
web/src/pages/user-setting/setting-model/google-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Form, Input, Modal, Select } from 'antd';
import { Form, Input, Modal, Select, InputNumber } from 'antd';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
google_project_id: string; google_project_id: string;
const data = { const data = {
...values, ...values,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };


onOk?.(data); onOk?.(data);
> >
<Input placeholder={t('GoogleServiceAccountKeyMessage')} /> <Input placeholder={t('GoogleServiceAccountKeyMessage')} />
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

</Form> </Form>
</Modal> </Modal>
); );

+ 1
- 1
web/src/pages/user-setting/setting-model/hunyuan-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Form, Input, Modal, Select } from 'antd';
import { Form, Input, Modal, Select} from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {

+ 1
- 1
web/src/pages/user-setting/setting-model/index.tsx View File

hideModal={hideTencentCloudAddingModal} hideModal={hideTencentCloudAddingModal}
onOk={onTencentCloudAddingOk} onOk={onTencentCloudAddingOk}
loading={TencentCloudAddingLoading} loading={TencentCloudAddingLoading}
llmFactory={'Tencent TencentCloud'}
llmFactory={'Tencent Cloud'}
></TencentCloudModal> ></TencentCloudModal>
<SparkModal <SparkModal
visible={SparkAddingVisible} visible={SparkAddingVisible}

+ 27
- 1
web/src/pages/user-setting/setting-model/ollama-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space, Switch } from 'antd';
import { Flex, Form, Input, Modal, Select, Space, Switch, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { vision: boolean }; type FieldType = IAddLlmRequestBody & { vision: boolean };
...omit(values, ['vision']), ...omit(values, ['vision']),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


> >
<Input placeholder={t('apiKeyMessage')} /> <Input placeholder={t('apiKeyMessage')} />
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

<Form.Item noStyle dependencies={['model_type']}> <Form.Item noStyle dependencies={['model_type']}>
{({ getFieldValue }) => {({ getFieldValue }) =>
getFieldValue('model_type') === 'chat' && ( getFieldValue('model_type') === 'chat' && (

+ 27
- 1
web/src/pages/user-setting/setting-model/spark-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Form, Input, Modal, Select } from 'antd';
import { Form, Input, Modal, Select, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values, ['vision']), ...omit(values, ['vision']),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


) )
} }
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>

</Form> </Form>
</Modal> </Modal>
); );

+ 26
- 13
web/src/pages/user-setting/setting-model/volcengine-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space, Switch } from 'antd';
import { Flex, Form, Input, Modal, Select, Space, Switch, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values, ['vision']), ...omit(values, ['vision']),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


> >
<Input placeholder={t('ArkApiKeyMessage')} /> <Input placeholder={t('ArkApiKeyMessage')} />
</Form.Item> </Form.Item>
<Form.Item noStyle dependencies={['model_type']}>
{({ getFieldValue }) =>
getFieldValue('model_type') === 'chat' && (
<Form.Item
label={t('vision')}
valuePropName="checked"
name={'vision'}
>
<Switch />
</Form.Item>
)
}
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item> </Form.Item>

</Form> </Form>
</Modal> </Modal>
); );

+ 26
- 1
web/src/pages/user-setting/setting-model/yiyan-modal/index.tsx View File

import { useTranslate } from '@/hooks/common-hooks'; import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm'; import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Form, Input, Modal, Select } from 'antd';
import { Form, Input, Modal, Select, InputNumber } from 'antd';
import omit from 'lodash/omit'; import omit from 'lodash/omit';


type FieldType = IAddLlmRequestBody & { type FieldType = IAddLlmRequestBody & {
...omit(values, ['vision']), ...omit(values, ['vision']),
model_type: modelType, model_type: modelType,
llm_factory: llmFactory, llm_factory: llmFactory,
max_tokens:values.max_tokens,
}; };
console.info(data); console.info(data);


> >
<Input placeholder={t('yiyanSKMessage')} /> <Input placeholder={t('yiyanSKMessage')} />
</Form.Item> </Form.Item>
<Form.Item<FieldType>
label={t('maxTokens')}
name="max_tokens"
rules={[
{ required: true, message: t('maxTokensMessage') },
{
type: 'number',
message: t('maxTokensInvalidMessage'),
},
({ getFieldValue }) => ({
validator(_, value) {
if (value < 0) {
return Promise.reject(new Error(t('maxTokensMinMessage')));
}
return Promise.resolve();
},
}),
]}
>
<InputNumber
placeholder={t('maxTokensTip')}
style={{ width: '100%' }}
/>
</Form.Item>
</Form> </Form>
</Modal> </Modal>
); );

Loading…
Cancel
Save