瀏覽代碼

fix: respect resolution settings for vision for basic chatbot, text generator, and parameter extractor node (#16041)

tags/1.1.0
kurokobo 7 月之前
父節點
當前提交
86d3fff666
No account linked to committer's email address

+ 8
- 1
api/core/app/apps/base_app_runner.py 查看文件

from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessage,
)
from core.model_runtime.entities.model_entities import ModelPropertyKey from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.errors.invoke import InvokeBadRequestError from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.moderation.input_moderation import InputModeration from core.moderation.input_moderation import InputModeration
query: Optional[str] = None, query: Optional[str] = None,
context: Optional[str] = None, context: Optional[str] = None,
memory: Optional[TokenBufferMemory] = None, memory: Optional[TokenBufferMemory] = None,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], Optional[list[str]]]: ) -> tuple[list[PromptMessage], Optional[list[str]]]:
""" """
Organize prompt messages Organize prompt messages
context=context, context=context,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )
else: else:
memory_config = MemoryConfig(window=MemoryConfig.WindowConfig(enabled=False)) memory_config = MemoryConfig(window=MemoryConfig.WindowConfig(enabled=False))
memory_config=memory_config, memory_config=memory_config,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )
stop = model_config.stop stop = model_config.stop



+ 13
- 0
api/core/app/apps/chat/app_runner.py 查看文件

from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.moderation.base import ModerationError from core.moderation.base import ModerationError
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from extensions.ext_database import db from extensions.ext_database import db
query = application_generate_entity.query query = application_generate_entity.query
files = application_generate_entity.files files = application_generate_entity.files


image_detail_config = (
application_generate_entity.file_upload_config.image_config.detail
if (
application_generate_entity.file_upload_config
and application_generate_entity.file_upload_config.image_config
)
else None
)
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW

# Pre-calculate the number of tokens of the prompt messages, # Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit. # and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception. # If the rest number of tokens is not enough, raise exception.
files=files, files=files,
query=query, query=query,
memory=memory, memory=memory,
image_detail_config=image_detail_config,
) )


# moderation # moderation
query=query, query=query,
context=context, context=context,
memory=memory, memory=memory,
image_detail_config=image_detail_config,
) )


# check hosting moderation # check hosting moderation

+ 13
- 0
api/core/app/apps/completion/app_runner.py 查看文件

) )
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.moderation.base import ModerationError from core.moderation.base import ModerationError
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from extensions.ext_database import db from extensions.ext_database import db
query = application_generate_entity.query query = application_generate_entity.query
files = application_generate_entity.files files = application_generate_entity.files


image_detail_config = (
application_generate_entity.file_upload_config.image_config.detail
if (
application_generate_entity.file_upload_config
and application_generate_entity.file_upload_config.image_config
)
else None
)
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW

# Pre-calculate the number of tokens of the prompt messages, # Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit. # and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception. # If the rest number of tokens is not enough, raise exception.
inputs=inputs, inputs=inputs,
files=files, files=files,
query=query, query=query,
image_detail_config=image_detail_config,
) )


# moderation # moderation
files=files, files=files,
query=query, query=query,
context=context, context=context,
image_detail_config=image_detail_config,
) )


# check hosting moderation # check hosting moderation

+ 20
- 5
api/core/prompt/advanced_prompt_transform.py 查看文件

memory_config: Optional[MemoryConfig], memory_config: Optional[MemoryConfig],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
prompt_messages = [] prompt_messages = []


memory_config=memory_config, memory_config=memory_config,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )
elif isinstance(prompt_template, list) and all(isinstance(item, ChatModelMessage) for item in prompt_template): elif isinstance(prompt_template, list) and all(isinstance(item, ChatModelMessage) for item in prompt_template):
prompt_messages = self._get_chat_model_prompt_messages( prompt_messages = self._get_chat_model_prompt_messages(
memory_config=memory_config, memory_config=memory_config,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )


return prompt_messages return prompt_messages
memory_config: Optional[MemoryConfig], memory_config: Optional[MemoryConfig],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
Get completion model prompt messages. Get completion model prompt messages.
prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=prompt)) prompt_message_contents.append(TextPromptMessageContent(data=prompt))
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)


prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else: else:
memory_config: Optional[MemoryConfig], memory_config: Optional[MemoryConfig],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
Get chat model prompt messages. Get chat model prompt messages.
prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=query)) prompt_message_contents.append(TextPromptMessageContent(data=query))
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else: else:
prompt_messages.append(UserPromptMessage(content=query)) prompt_messages.append(UserPromptMessage(content=query))
# get last user message content and add files # get last user message content and add files
prompt_message_contents = [TextPromptMessageContent(data=cast(str, last_message.content))] prompt_message_contents = [TextPromptMessageContent(data=cast(str, last_message.content))]
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)


last_message.content = prompt_message_contents last_message.content = prompt_message_contents
else: else:
prompt_message_contents = [TextPromptMessageContent(data="")] # not for query prompt_message_contents = [TextPromptMessageContent(data="")] # not for query
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)


prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else: else:
prompt_message_contents = [TextPromptMessageContent(data=query)] prompt_message_contents = [TextPromptMessageContent(data=query)]
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)


prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
elif query: elif query:

+ 18
- 5
api/core/prompt/simple_prompt_transform.py 查看文件

from core.file import file_manager from core.file import file_manager
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
ImagePromptMessageContent,
PromptMessage, PromptMessage,
PromptMessageContent, PromptMessageContent,
SystemPromptMessage, SystemPromptMessage,
context: Optional[str], context: Optional[str],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], Optional[list[str]]]: ) -> tuple[list[PromptMessage], Optional[list[str]]]:
inputs = {key: str(value) for key, value in inputs.items()} inputs = {key: str(value) for key, value in inputs.items()}


context=context, context=context,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )
else: else:
prompt_messages, stops = self._get_completion_model_prompt_messages( prompt_messages, stops = self._get_completion_model_prompt_messages(
context=context, context=context,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=image_detail_config,
) )


return prompt_messages, stops return prompt_messages, stops
files: Sequence["File"], files: Sequence["File"],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], Optional[list[str]]]: ) -> tuple[list[PromptMessage], Optional[list[str]]]:
prompt_messages: list[PromptMessage] = [] prompt_messages: list[PromptMessage] = []


) )


if query: if query:
prompt_messages.append(self.get_last_user_message(query, files))
prompt_messages.append(self.get_last_user_message(query, files, image_detail_config))
else: else:
prompt_messages.append(self.get_last_user_message(prompt, files))
prompt_messages.append(self.get_last_user_message(prompt, files, image_detail_config))


return prompt_messages, None return prompt_messages, None


files: Sequence["File"], files: Sequence["File"],
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], Optional[list[str]]]: ) -> tuple[list[PromptMessage], Optional[list[str]]]:
# get prompt # get prompt
prompt, prompt_rules = self.get_prompt_str_and_rules( prompt, prompt_rules = self.get_prompt_str_and_rules(
if stops is not None and len(stops) == 0: if stops is not None and len(stops) == 0:
stops = None stops = None


return [self.get_last_user_message(prompt, files)], stops
return [self.get_last_user_message(prompt, files, image_detail_config)], stops


def get_last_user_message(self, prompt: str, files: Sequence["File"]) -> UserPromptMessage:
def get_last_user_message(
self,
prompt: str,
files: Sequence["File"],
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> UserPromptMessage:
if files: if files:
prompt_message_contents: list[PromptMessageContent] = [] prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=prompt)) prompt_message_contents.append(TextPromptMessageContent(data=prompt))
for file in files: for file in files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
prompt_message_contents.append(
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
)


prompt_message = UserPromptMessage(content=prompt_message_contents) prompt_message = UserPromptMessage(content=prompt_message_contents)
else: else:

+ 12
- 0
api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py 查看文件

from core.file import File from core.file import File
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities import ImagePromptMessageContent
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
model_config=model_config, model_config=model_config,
memory=memory, memory=memory,
files=files, files=files,
vision_detail=node_data.vision.configs.detail,
) )
else: else:
# use prompt engineering # use prompt engineering
model_config=model_config, model_config=model_config,
memory=memory, memory=memory,
files=files, files=files,
vision_detail=node_data.vision.configs.detail,
) )


prompt_message_tools = [] prompt_message_tools = []
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
files: Sequence[File], files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> tuple[list[PromptMessage], list[PromptMessageTool]]: ) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
""" """
Generate function call prompt. Generate function call prompt.
memory_config=node_data.memory, memory_config=node_data.memory,
memory=None, memory=None,
model_config=model_config, model_config=model_config,
image_detail_config=vision_detail,
) )


# find last user message # find last user message
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
files: Sequence[File], files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
Generate prompt engineering prompt. Generate prompt engineering prompt.
model_config=model_config, model_config=model_config,
memory=memory, memory=memory,
files=files, files=files,
vision_detail=vision_detail,
) )
elif model_mode == ModelMode.CHAT: elif model_mode == ModelMode.CHAT:
return self._generate_prompt_engineering_chat_prompt( return self._generate_prompt_engineering_chat_prompt(
model_config=model_config, model_config=model_config,
memory=memory, memory=memory,
files=files, files=files,
vision_detail=vision_detail,
) )
else: else:
raise InvalidModelModeError(f"Invalid model mode: {model_mode}") raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
files: Sequence[File], files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
Generate completion prompt. Generate completion prompt.
memory_config=node_data.memory, memory_config=node_data.memory,
memory=memory, memory=memory,
model_config=model_config, model_config=model_config,
image_detail_config=vision_detail,
) )


return prompt_messages return prompt_messages
model_config: ModelConfigWithCredentialsEntity, model_config: ModelConfigWithCredentialsEntity,
memory: Optional[TokenBufferMemory], memory: Optional[TokenBufferMemory],
files: Sequence[File], files: Sequence[File],
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
Generate chat prompt. Generate chat prompt.
memory_config=node_data.memory, memory_config=node_data.memory,
memory=None, memory=None,
model_config=model_config, model_config=model_config,
image_detail_config=vision_detail,
) )


# find last user message # find last user message

Loading…
取消
儲存