Quellcode durchsuchen

🐛 Fix(Gemini LLM): Support Gemini 0.2.x plugin on agent app (#20794)

Co-authored-by: QuantumGhost <obelisk.reg+git@gmail.com>
tags/1.4.3
Takuya Ono vor 4 Monaten
Ursprung
Commit
af83120832
Es ist kein Account mit der E-Mail-Adresse des Committers verbunden

+ 17
- 4
api/core/app/apps/base_app_runner.py Datei anzeigen

@@ -1,3 +1,4 @@
import logging
import time
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Optional, Union
@@ -33,6 +34,8 @@ from models.model import App, AppMode, Message, MessageAnnotation
if TYPE_CHECKING:
from core.file.models import File

_logger = logging.getLogger(__name__)


class AppRunner:
def get_pre_calculate_rest_tokens(
@@ -298,7 +301,7 @@ class AppRunner:
)

def _handle_invoke_result_stream(
self, invoke_result: Generator, queue_manager: AppQueueManager, agent: bool
self, invoke_result: Generator[LLMResultChunk, None, None], queue_manager: AppQueueManager, agent: bool
) -> None:
"""
Handle invoke result
@@ -317,18 +320,28 @@ class AppRunner:
else:
queue_manager.publish(QueueAgentMessageEvent(chunk=result), PublishFrom.APPLICATION_MANAGER)

text += result.delta.message.content
message = result.delta.message
if isinstance(message.content, str):
text += message.content
elif isinstance(message.content, list):
for content in message.content:
if not isinstance(content, str):
# TODO(QuantumGhost): Add multimodal output support for easy ui.
_logger.warning("received multimodal output, type=%s", type(content))
text += content.data
else:
text += content # failback to str

if not model:
model = result.model

if not prompt_messages:
prompt_messages = result.prompt_messages
prompt_messages = list(result.prompt_messages)

if result.delta.usage:
usage = result.delta.usage

if not usage:
if usage is None:
usage = LLMUsage.empty_usage()

llm_result = LLMResult(

+ 18
- 0
api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py Datei anzeigen

@@ -48,6 +48,7 @@ from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
TextPromptMessageContent,
)
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.ops.entities.trace_entity import TraceTaskName
@@ -309,6 +310,23 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
delta_text = chunk.delta.message.content
if delta_text is None:
continue
if isinstance(chunk.delta.message.content, list):
delta_text = ""
for content in chunk.delta.message.content:
logger.debug(
"The content type %s in LLM chunk delta message content.: %r", type(content), content
)
if isinstance(content, TextPromptMessageContent):
delta_text += content.data
elif isinstance(content, str):
delta_text += content # failback to str
else:
logger.warning(
"Unsupported content type %s in LLM chunk delta message content.: %r",
type(content),
content,
)
continue

if not self._task_state.llm_result.prompt_messages:
self._task_state.llm_result.prompt_messages = chunk.prompt_messages

Laden…
Abbrechen
Speichern