Browse Source

feat: set default memory messages limit to infinite (#5002)

tags/0.6.11
takatost 1 year ago
parent
commit
3929d289e0
No account linked to committer's email address

+ 11
- 4
api/core/memory/token_buffer_memory.py View File

@@ -1,3 +1,5 @@
from typing import Optional

from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.file.message_file_parser import MessageFileParser
from core.model_manager import ModelInstance
@@ -19,7 +21,7 @@ class TokenBufferMemory:
self.model_instance = model_instance

def get_history_prompt_messages(self, max_token_limit: int = 2000,
message_limit: int = 10) -> list[PromptMessage]:
message_limit: Optional[int] = None) -> list[PromptMessage]:
"""
Get history prompt messages.
:param max_token_limit: max token limit
@@ -28,10 +30,15 @@ class TokenBufferMemory:
app_record = self.conversation.app

# fetch limited messages, and return reversed
messages = db.session.query(Message).filter(
query = db.session.query(Message).filter(
Message.conversation_id == self.conversation.id,
Message.answer != ''
).order_by(Message.created_at.desc()).limit(message_limit).all()
).order_by(Message.created_at.desc())

if message_limit and message_limit > 0:
messages = query.limit(message_limit).all()
else:
messages = query.all()

messages = list(reversed(messages))
message_file_parser = MessageFileParser(
@@ -93,7 +100,7 @@ class TokenBufferMemory:
def get_history_prompt_text(self, human_prefix: str = "Human",
ai_prefix: str = "Assistant",
max_token_limit: int = 2000,
message_limit: int = 10) -> str:
message_limit: Optional[int] = None) -> str:
"""
Get history prompt text.
:param human_prefix: human prefix

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml View File

@@ -9,7 +9,7 @@ features:
- stream-tool-call
model_properties:
mode: chat
context_size: 4096
context_size: 16385
parameter_rules:
- name: temperature
use_template: temperature

+ 3
- 3
api/core/prompt/prompt_transform.py View File

@@ -77,7 +77,7 @@ class PromptTransform:
max_token_limit=max_token_limit,
message_limit=memory_config.window.size
if (memory_config.window.enabled
and memory_config.window.size is not None
and memory_config.window.size > 0)
else 10
and memory_config.window.size is not None
and memory_config.window.size > 0)
else None
)

Loading…
Cancel
Save