Przeglądaj źródła

feat: set default memory messages limit to infinite (#5002)

tags/0.6.11
takatost 1 rok temu
rodzic
commit
3929d289e0
No account linked to committer's email address

+ 11
- 4
api/core/memory/token_buffer_memory.py Wyświetl plik

from typing import Optional

from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.file.message_file_parser import MessageFileParser from core.file.message_file_parser import MessageFileParser
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
self.model_instance = model_instance self.model_instance = model_instance


def get_history_prompt_messages(self, max_token_limit: int = 2000, def get_history_prompt_messages(self, max_token_limit: int = 2000,
message_limit: int = 10) -> list[PromptMessage]:
message_limit: Optional[int] = None) -> list[PromptMessage]:
""" """
Get history prompt messages. Get history prompt messages.
:param max_token_limit: max token limit :param max_token_limit: max token limit
app_record = self.conversation.app app_record = self.conversation.app


# fetch limited messages, and return reversed # fetch limited messages, and return reversed
messages = db.session.query(Message).filter(
query = db.session.query(Message).filter(
Message.conversation_id == self.conversation.id, Message.conversation_id == self.conversation.id,
Message.answer != '' Message.answer != ''
).order_by(Message.created_at.desc()).limit(message_limit).all()
).order_by(Message.created_at.desc())

if message_limit and message_limit > 0:
messages = query.limit(message_limit).all()
else:
messages = query.all()


messages = list(reversed(messages)) messages = list(reversed(messages))
message_file_parser = MessageFileParser( message_file_parser = MessageFileParser(
def get_history_prompt_text(self, human_prefix: str = "Human", def get_history_prompt_text(self, human_prefix: str = "Human",
ai_prefix: str = "Assistant", ai_prefix: str = "Assistant",
max_token_limit: int = 2000, max_token_limit: int = 2000,
message_limit: int = 10) -> str:
message_limit: Optional[int] = None) -> str:
""" """
Get history prompt text. Get history prompt text.
:param human_prefix: human prefix :param human_prefix: human prefix

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml Wyświetl plik

- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 4096
context_size: 16385
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

+ 3
- 3
api/core/prompt/prompt_transform.py Wyświetl plik

max_token_limit=max_token_limit, max_token_limit=max_token_limit,
message_limit=memory_config.window.size message_limit=memory_config.window.size
if (memory_config.window.enabled if (memory_config.window.enabled
and memory_config.window.size is not None
and memory_config.window.size > 0)
else 10
and memory_config.window.size is not None
and memory_config.window.size > 0)
else None
) )

Ładowanie…
Anuluj
Zapisz