Pārlūkot izejas kodu

feat: re-add prompt messages to result and chunks in llm (#17883)

Signed-off-by: -LAN- <laipz8200@outlook.com>
tags/1.3.0
-LAN- pirms 6 mēnešiem
vecāks
revīzija
8e6f6d64a4
Revīzijas autora e-pasta adrese nav piesaistīta nevienam kontam

+ 1
- 1
api/core/model_manager.py Parādīt failu

) )


def get_llm_num_tokens( def get_llm_num_tokens(
self, prompt_messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None
self, prompt_messages: Sequence[PromptMessage], tools: Optional[Sequence[PromptMessageTool]] = None
) -> int: ) -> int:
""" """
Get number of tokens for llm Get number of tokens for llm

+ 2
- 2
api/core/model_runtime/callbacks/base_callback.py Parādīt failu

chunk: LLMResultChunk, chunk: LLMResultChunk,
model: str, model: str,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,
result: LLMResult, result: LLMResult,
model: str, model: str,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,

+ 2
- 2
api/core/model_runtime/callbacks/logging_callback.py Parādīt failu

chunk: LLMResultChunk, chunk: LLMResultChunk,
model: str, model: str,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,
result: LLMResult, result: LLMResult,
model: str, model: str,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,

+ 4
- 3
api/core/model_runtime/entities/llm_entities.py Parādīt failu

from collections.abc import Sequence
from decimal import Decimal from decimal import Decimal
from enum import StrEnum from enum import StrEnum
from typing import Optional from typing import Optional


from pydantic import BaseModel
from pydantic import BaseModel, Field


from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
from core.model_runtime.entities.model_entities import ModelUsage, PriceInfo from core.model_runtime.entities.model_entities import ModelUsage, PriceInfo


id: Optional[str] = None id: Optional[str] = None
model: str model: str
prompt_messages: list[PromptMessage]
prompt_messages: Sequence[PromptMessage] = Field(default_factory=list)
message: AssistantPromptMessage message: AssistantPromptMessage
usage: LLMUsage usage: LLMUsage
system_fingerprint: Optional[str] = None system_fingerprint: Optional[str] = None
""" """


model: str model: str
prompt_messages: list[PromptMessage]
prompt_messages: Sequence[PromptMessage] = Field(default_factory=list)
system_fingerprint: Optional[str] = None system_fingerprint: Optional[str] = None
delta: LLMResultChunkDelta delta: LLMResultChunkDelta



+ 15
- 7
api/core/model_runtime/model_providers/__base/large_language_model.py Parādīt failu

stream: bool = True, stream: bool = True,
user: Optional[str] = None, user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None, callbacks: Optional[list[Callback]] = None,
) -> Union[LLMResult, Generator]:
) -> Union[LLMResult, Generator[LLMResultChunk, None, None]]:
""" """
Invoke large language model Invoke large language model


user=user, user=user,
callbacks=callbacks, callbacks=callbacks,
) )

return result
# Following https://github.com/langgenius/dify/issues/17799,
# we removed the prompt_messages from the chunk on the plugin daemon side.
# To ensure compatibility, we add the prompt_messages back here.
result.prompt_messages = prompt_messages
return result
raise NotImplementedError("unsupported invoke result type", type(result))


def _invoke_result_generator( def _invoke_result_generator(
self, self,
model: str, model: str,
result: Generator, result: Generator,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,
stream: bool = True, stream: bool = True,
user: Optional[str] = None, user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None, callbacks: Optional[list[Callback]] = None,
) -> Generator:
) -> Generator[LLMResultChunk, None, None]:
""" """
Invoke result generator Invoke result generator




try: try:
for chunk in result: for chunk in result:
# Following https://github.com/langgenius/dify/issues/17799,
# we removed the prompt_messages from the chunk on the plugin daemon side.
# To ensure compatibility, we add the prompt_messages back here.
chunk.prompt_messages = prompt_messages
yield chunk yield chunk


self._trigger_new_chunk_callbacks( self._trigger_new_chunk_callbacks(
chunk: LLMResultChunk, chunk: LLMResultChunk,
model: str, model: str,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,
model: str, model: str,
result: LLMResult, result: LLMResult,
credentials: dict, credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict, model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None, stop: Optional[Sequence[str]] = None,

Notiek ielāde…
Atcelt
Saglabāt