Browse Source

chore: apply ruff rules on tests and app.py (#3605)

tags/0.6.4
Bowen Liang 1 year ago
parent
commit
d9b821cecc
No account linked to committer's email address
92 changed files with 379 additions and 177 deletions
  1. 6
    6
      api/app.py
  2. 0
    1
      api/controllers/__init__.py
  3. 40
    7
      api/controllers/console/__init__.py
  4. 4
    5
      api/controllers/console/app/app.py
  5. 1
    1
      api/controllers/files/__init__.py
  6. 1
    0
      api/controllers/inner_api/__init__.py
  7. 1
    1
      api/controllers/service_api/__init__.py
  8. 1
    1
      api/controllers/web/__init__.py
  9. 1
    1
      api/controllers/web/app.py
  10. 12
    3
      api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py
  11. 1
    1
      api/events/event_handlers/__init__.py
  12. 0
    1
      api/libs/__init__.py
  13. 14
    3
      api/pyproject.toml
  14. 0
    1
      api/services/__init__.py
  15. 0
    1
      api/services/errors/__init__.py
  16. 18
    8
      api/tests/integration_tests/model_runtime/__mock/anthropic.py
  17. 6
    6
      api/tests/integration_tests/model_runtime/__mock/google.py
  18. 1
    1
      api/tests/integration_tests/model_runtime/__mock/huggingface.py
  19. 10
    4
      api/tests/integration_tests/model_runtime/__mock/huggingface_chat.py
  20. 5
    2
      api/tests/integration_tests/model_runtime/__mock/openai.py
  21. 31
    18
      api/tests/integration_tests/model_runtime/__mock/openai_chat.py
  22. 7
    4
      api/tests/integration_tests/model_runtime/__mock/openai_completion.py
  23. 5
    4
      api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py
  24. 5
    4
      api/tests/integration_tests/model_runtime/__mock/openai_moderation.py
  25. 2
    3
      api/tests/integration_tests/model_runtime/__mock/openai_remote.py
  26. 4
    3
      api/tests/integration_tests/model_runtime/__mock/openai_speech2text.py
  27. 12
    7
      api/tests/integration_tests/model_runtime/__mock/xinference.py
  28. 2
    1
      api/tests/integration_tests/model_runtime/anthropic/test_llm.py
  29. 1
    0
      api/tests/integration_tests/model_runtime/anthropic/test_provider.py
  30. 10
    4
      api/tests/integration_tests/model_runtime/azure_openai/test_llm.py
  31. 1
    0
      api/tests/integration_tests/model_runtime/azure_openai/test_text_embedding.py
  32. 2
    1
      api/tests/integration_tests/model_runtime/baichuan/test_llm.py
  33. 1
    0
      api/tests/integration_tests/model_runtime/baichuan/test_provider.py
  34. 1
    0
      api/tests/integration_tests/model_runtime/baichuan/test_text_embedding.py
  35. 2
    1
      api/tests/integration_tests/model_runtime/bedrock/test_llm.py
  36. 1
    0
      api/tests/integration_tests/model_runtime/bedrock/test_provider.py
  37. 9
    4
      api/tests/integration_tests/model_runtime/chatglm/test_llm.py
  38. 1
    0
      api/tests/integration_tests/model_runtime/chatglm/test_provider.py
  39. 2
    1
      api/tests/integration_tests/model_runtime/cohere/test_llm.py
  40. 1
    0
      api/tests/integration_tests/model_runtime/cohere/test_provider.py
  41. 1
    0
      api/tests/integration_tests/model_runtime/cohere/test_rerank.py
  42. 1
    0
      api/tests/integration_tests/model_runtime/cohere/test_text_embedding.py
  43. 9
    4
      api/tests/integration_tests/model_runtime/google/test_llm.py
  44. 1
    0
      api/tests/integration_tests/model_runtime/google/test_provider.py
  45. 2
    1
      api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py
  46. 4
    2
      api/tests/integration_tests/model_runtime/huggingface_hub/test_text_embedding.py
  47. 1
    0
      api/tests/integration_tests/model_runtime/jina/test_provider.py
  48. 1
    0
      api/tests/integration_tests/model_runtime/jina/test_text_embedding.py
  49. 9
    4
      api/tests/integration_tests/model_runtime/localai/test_llm.py
  50. 1
    0
      api/tests/integration_tests/model_runtime/minimax/test_embedding.py
  51. 2
    1
      api/tests/integration_tests/model_runtime/minimax/test_llm.py
  52. 1
    0
      api/tests/integration_tests/model_runtime/minimax/test_provider.py
  53. 9
    4
      api/tests/integration_tests/model_runtime/ollama/test_llm.py
  54. 1
    0
      api/tests/integration_tests/model_runtime/ollama/test_text_embedding.py
  55. 10
    4
      api/tests/integration_tests/model_runtime/openai/test_llm.py
  56. 1
    0
      api/tests/integration_tests/model_runtime/openai/test_moderation.py
  57. 1
    0
      api/tests/integration_tests/model_runtime/openai/test_provider.py
  58. 1
    0
      api/tests/integration_tests/model_runtime/openai/test_speech2text.py
  59. 1
    0
      api/tests/integration_tests/model_runtime/openai/test_text_embedding.py
  60. 8
    3
      api/tests/integration_tests/model_runtime/openai_api_compatible/test_llm.py
  61. 4
    2
      api/tests/integration_tests/model_runtime/openai_api_compatible/test_text_embedding.py
  62. 1
    0
      api/tests/integration_tests/model_runtime/openllm/test_embedding.py
  63. 2
    1
      api/tests/integration_tests/model_runtime/openllm/test_llm.py
  64. 8
    3
      api/tests/integration_tests/model_runtime/openrouter/test_llm.py
  65. 2
    1
      api/tests/integration_tests/model_runtime/replicate/test_llm.py
  66. 1
    0
      api/tests/integration_tests/model_runtime/replicate/test_text_embedding.py
  67. 2
    1
      api/tests/integration_tests/model_runtime/spark/test_llm.py
  68. 1
    0
      api/tests/integration_tests/model_runtime/spark/test_provider.py
  69. 8
    3
      api/tests/integration_tests/model_runtime/togetherai/test_llm.py
  70. 2
    1
      api/tests/integration_tests/model_runtime/tongyi/test_llm.py
  71. 1
    0
      api/tests/integration_tests/model_runtime/tongyi/test_provider.py
  72. 2
    1
      api/tests/integration_tests/model_runtime/wenxin/test_llm.py
  73. 1
    0
      api/tests/integration_tests/model_runtime/wenxin/test_provider.py
  74. 1
    0
      api/tests/integration_tests/model_runtime/xinference/test_embeddings.py
  75. 9
    4
      api/tests/integration_tests/model_runtime/xinference/test_llm.py
  76. 1
    0
      api/tests/integration_tests/model_runtime/xinference/test_rerank.py
  77. 8
    3
      api/tests/integration_tests/model_runtime/zhipuai/test_llm.py
  78. 1
    0
      api/tests/integration_tests/model_runtime/zhipuai/test_provider.py
  79. 1
    0
      api/tests/integration_tests/model_runtime/zhipuai/test_text_embedding.py
  80. 1
    0
      api/tests/integration_tests/tools/test_all_provider.py
  81. 1
    1
      api/tests/integration_tests/utils/test_module_import_helper.py
  82. 3
    2
      api/tests/integration_tests/workflow/nodes/__mock/code_executor.py
  83. 6
    6
      api/tests/integration_tests/workflow/nodes/__mock/http.py
  84. 3
    3
      api/tests/integration_tests/workflow/nodes/test_code.py
  85. 1
    1
      api/tests/integration_tests/workflow/nodes/test_http.py
  86. 2
    2
      api/tests/integration_tests/workflow/nodes/test_llm.py
  87. 1
    0
      api/tests/integration_tests/workflow/nodes/test_template_transform.py
  88. 1
    1
      api/tests/integration_tests/workflow/nodes/test_tool.py
  89. 4
    4
      api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py
  90. 1
    1
      api/tests/unit_tests/core/prompt/test_prompt_transform.py
  91. 1
    1
      api/tests/unit_tests/core/prompt/test_simple_prompt_transform.py
  92. 11
    3
      api/tests/unit_tests/services/workflow/test_workflow_converter.py

+ 6
- 6
api/app.py View File



from flask import Flask, Response, request from flask import Flask, Response, request
from flask_cors import CORS from flask_cors import CORS

from werkzeug.exceptions import Unauthorized from werkzeug.exceptions import Unauthorized

from commands import register_commands from commands import register_commands
from config import CloudEditionConfig, Config from config import CloudEditionConfig, Config

# DO NOT REMOVE BELOW
from events import event_handlers
from extensions import ( from extensions import (
ext_celery, ext_celery,
ext_code_based_extension, ext_code_based_extension,
from extensions.ext_database import db from extensions.ext_database import db
from extensions.ext_login import login_manager from extensions.ext_login import login_manager
from libs.passport import PassportService from libs.passport import PassportService
from services.account_service import AccountService

# DO NOT REMOVE BELOW
from events import event_handlers
from models import account, dataset, model, source, task, tool, tools, web from models import account, dataset, model, source, task, tool, tools, web
from services.account_service import AccountService


# DO NOT REMOVE ABOVE # DO NOT REMOVE ABOVE


def register_blueprints(app): def register_blueprints(app):
from controllers.console import bp as console_app_bp from controllers.console import bp as console_app_bp
from controllers.files import bp as files_bp from controllers.files import bp as files_bp
from controllers.inner_api import bp as inner_api_bp
from controllers.service_api import bp as service_api_bp from controllers.service_api import bp as service_api_bp
from controllers.web import bp as web_bp from controllers.web import bp as web_bp
from controllers.inner_api import bp as inner_api_bp


CORS(service_api_bp, CORS(service_api_bp,
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'], allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],

+ 0
- 1
api/controllers/__init__.py View File

# -*- coding:utf-8 -*-







+ 40
- 7
api/controllers/console/__init__.py View File

from flask import Blueprint from flask import Blueprint

from libs.external_api import ExternalApi from libs.external_api import ExternalApi


bp = Blueprint('console', __name__, url_prefix='/console/api') bp = Blueprint('console', __name__, url_prefix='/console/api')
api = ExternalApi(bp) api = ExternalApi(bp)


# Import other controllers # Import other controllers
from . import admin, apikey, extension, feature, setup, version, ping
from . import admin, apikey, extension, feature, ping, setup, version

# Import app controllers # Import app controllers
from .app import (advanced_prompt_template, annotation, app, audio, completion, conversation, generator, message,
model_config, site, statistic, workflow, workflow_run, workflow_app_log, workflow_statistic, agent)
from .app import (
advanced_prompt_template,
agent,
annotation,
app,
audio,
completion,
conversation,
generator,
message,
model_config,
site,
statistic,
workflow,
workflow_app_log,
workflow_run,
workflow_statistic,
)

# Import auth controllers # Import auth controllers
from .auth import activate, data_source_oauth, login, oauth from .auth import activate, data_source_oauth, login, oauth

# Import billing controllers # Import billing controllers
from .billing import billing from .billing import billing

# Import datasets controllers # Import datasets controllers
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing

# Import enterprise controllers
from .enterprise import enterprise_sso

# Import explore controllers # Import explore controllers
from .explore import (audio, completion, conversation, installed_app, message, parameter, recommended_app,
saved_message, workflow)
from .explore import (
audio,
completion,
conversation,
installed_app,
message,
parameter,
recommended_app,
saved_message,
workflow,
)

# Import workspace controllers # Import workspace controllers
from .workspace import account, members, model_providers, models, tool_providers, workspace from .workspace import account, members, model_providers, models, tool_providers, workspace
# Import enterprise controllers
from .enterprise import enterprise_sso

+ 4
- 5
api/controllers/console/app/app.py View File



from flask_login import current_user from flask_login import current_user
from flask_restful import Resource, inputs, marshal_with, reqparse from flask_restful import Resource, inputs, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, BadRequest
from werkzeug.exceptions import BadRequest, Forbidden


from controllers.console import api from controllers.console import api
from controllers.console.app.wraps import get_app_model from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.agent.entities import AgentToolEntity from core.agent.entities import AgentToolEntity
from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ToolParameterConfigurationManager
from extensions.ext_database import db from extensions.ext_database import db
from fields.app_fields import ( from fields.app_fields import (
app_detail_fields, app_detail_fields,
app_pagination_fields, app_pagination_fields,
) )
from libs.login import login_required from libs.login import login_required
from models.model import App, AppMode, AppModelConfig
from services.app_service import AppService from services.app_service import AppService
from models.model import App, AppModelConfig, AppMode
from core.tools.utils.configuration import ToolParameterConfigurationManager
from core.tools.tool_manager import ToolManager



ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion'] ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']



+ 1
- 1
api/controllers/files/__init__.py View File

# -*- coding:utf-8 -*-
from flask import Blueprint from flask import Blueprint

from libs.external_api import ExternalApi from libs.external_api import ExternalApi


bp = Blueprint('files', __name__) bp = Blueprint('files', __name__)

+ 1
- 0
api/controllers/inner_api/__init__.py View File

from flask import Blueprint from flask import Blueprint

from libs.external_api import ExternalApi from libs.external_api import ExternalApi


bp = Blueprint('inner_api', __name__, url_prefix='/inner/api') bp = Blueprint('inner_api', __name__, url_prefix='/inner/api')

+ 1
- 1
api/controllers/service_api/__init__.py View File

# -*- coding:utf-8 -*-
from flask import Blueprint from flask import Blueprint

from libs.external_api import ExternalApi from libs.external_api import ExternalApi


bp = Blueprint('service_api', __name__, url_prefix='/v1') bp = Blueprint('service_api', __name__, url_prefix='/v1')

+ 1
- 1
api/controllers/web/__init__.py View File

# -*- coding:utf-8 -*-
from flask import Blueprint from flask import Blueprint

from libs.external_api import ExternalApi from libs.external_api import ExternalApi


bp = Blueprint('web', __name__, url_prefix='/api') bp = Blueprint('web', __name__, url_prefix='/api')

+ 1
- 1
api/controllers/web/app.py View File

from controllers.web.error import AppUnavailableError from controllers.web.error import AppUnavailableError
from controllers.web.wraps import WebApiResource from controllers.web.wraps import WebApiResource
from extensions.ext_database import db from extensions.ext_database import db
from models.model import App, AppModelConfig, AppMode
from models.model import App, AppMode, AppModelConfig
from models.tools import ApiToolProvider from models.tools import ApiToolProvider
from services.app_service import AppService from services.app_service import AppService



+ 12
- 3
api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py View File



from .__version__ import __version__ from .__version__ import __version__
from ._client import ZhipuAI from ._client import ZhipuAI
from .core._errors import (APIAuthenticationError, APIInternalError, APIReachLimitError, APIRequestFailedError,
APIResponseError, APIResponseValidationError, APIServerFlowExceedError, APIStatusError,
APITimeoutError, ZhipuAIError)
from .core._errors import (
APIAuthenticationError,
APIInternalError,
APIReachLimitError,
APIRequestFailedError,
APIResponseError,
APIResponseValidationError,
APIServerFlowExceedError,
APIStatusError,
APITimeoutError,
ZhipuAIError,
)

+ 1
- 1
api/events/event_handlers/__init__.py View File

from .deduct_quota_when_messaeg_created import handle from .deduct_quota_when_messaeg_created import handle
from .delete_installed_app_when_app_deleted import handle from .delete_installed_app_when_app_deleted import handle
from .update_app_dataset_join_when_app_model_config_updated import handle from .update_app_dataset_join_when_app_model_config_updated import handle
from .update_provider_last_used_at_when_messaeg_created import handle
from .update_app_dataset_join_when_app_published_workflow_updated import handle from .update_app_dataset_join_when_app_published_workflow_updated import handle
from .update_provider_last_used_at_when_messaeg_created import handle

+ 0
- 1
api/libs/__init__.py View File

# -*- coding:utf-8 -*-

+ 14
- 3
api/pyproject.toml View File



[tool.ruff] [tool.ruff]
exclude = [ exclude = [
"app.py",
"__init__.py",
"tests/",
] ]
line-length = 120 line-length = 120


"UP032", # f-string "UP032", # f-string
] ]


[tool.ruff.lint.per-file-ignores]
"app.py" = [
"F401", # unused-import
"F811", # redefined-while-unused
]
"__init__.py" = [
"F401", # unused-import
"F811", # redefined-while-unused
]
"tests/*" = [
"F401", # unused-import
"F811", # redefined-while-unused
]



[tool.pytest_env] [tool.pytest_env]
OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii" OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii"

+ 0
- 1
api/services/__init__.py View File

# -*- coding:utf-8 -*-
import services.errors import services.errors

+ 0
- 1
api/services/errors/__init__.py View File

# -*- coding:utf-8 -*-
__all__ = [ __all__ = [
'base', 'conversation', 'message', 'index', 'app_model_config', 'account', 'document', 'dataset', 'base', 'conversation', 'message', 'index', 'app_model_config', 'account', 'document', 'dataset',
'app', 'completion', 'audio', 'file' 'app', 'completion', 'audio', 'file'

+ 18
- 8
api/tests/integration_tests/model_runtime/__mock/anthropic.py View File

import os import os
from collections.abc import Iterable
from time import sleep from time import sleep
from typing import Any, Literal, Union, Iterable

from anthropic.resources import Messages
from anthropic.types.message_delta_event import Delta
from typing import Any, Literal, Union


import anthropic import anthropic
import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from anthropic import Anthropic, Stream from anthropic import Anthropic, Stream
from anthropic.types import MessageParam, Message, MessageStreamEvent, \
ContentBlock, MessageStartEvent, Usage, TextDelta, MessageDeltaEvent, MessageStopEvent, ContentBlockDeltaEvent, \
MessageDeltaUsage
from anthropic.resources import Messages
from anthropic.types import (
ContentBlock,
ContentBlockDeltaEvent,
Message,
MessageDeltaEvent,
MessageDeltaUsage,
MessageParam,
MessageStartEvent,
MessageStopEvent,
MessageStreamEvent,
TextDelta,
Usage,
)
from anthropic.types.message_delta_event import Delta


MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'




class MockAnthropicClass(object):
class MockAnthropicClass:
@staticmethod @staticmethod
def mocked_anthropic_chat_create_sync(model: str) -> Message: def mocked_anthropic_chat_create_sync(model: str) -> Message:
return Message( return Message(

+ 6
- 6
api/tests/integration_tests/model_runtime/__mock/google.py View File

from typing import Generator, List
from collections.abc import Generator


import google.generativeai.types.content_types as content_types import google.generativeai.types.content_types as content_types
import google.generativeai.types.generation_types as generation_config_types import google.generativeai.types.generation_types as generation_config_types
import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from google.ai import generativelanguage as glm from google.ai import generativelanguage as glm
from google.ai.generativelanguage_v1beta.types import content as gag_content
from google.generativeai import GenerativeModel from google.generativeai import GenerativeModel
from google.generativeai.client import _ClientManager, configure from google.generativeai.client import _ClientManager, configure
from google.generativeai.types import GenerateContentResponse from google.generativeai.types import GenerateContentResponse
from google.generativeai.types.generation_types import BaseGenerateContentResponse from google.generativeai.types.generation_types import BaseGenerateContentResponse
from google.ai.generativelanguage_v1beta.types import content as gag_content


current_api_key = '' current_api_key = ''


class MockGoogleResponseClass(object):
class MockGoogleResponseClass:
_done = False _done = False


def __iter__(self): def __iter__(self):
chunks=[] chunks=[]
) )


class MockGoogleResponseCandidateClass(object):
class MockGoogleResponseCandidateClass:
finish_reason = 'stop' finish_reason = 'stop'


@property @property
] ]
) )


class MockGoogleClass(object):
class MockGoogleClass:
@staticmethod @staticmethod
def generate_content_sync() -> GenerateContentResponse: def generate_content_sync() -> GenerateContentResponse:
return GenerateContentResponse( return GenerateContentResponse(
return 'it\'s google!' return 'it\'s google!'
@property @property
def generative_response_candidates(self) -> List[MockGoogleResponseCandidateClass]:
def generative_response_candidates(self) -> list[MockGoogleResponseCandidateClass]:
return [MockGoogleResponseCandidateClass()] return [MockGoogleResponseCandidateClass()]
def make_client(self: _ClientManager, name: str): def make_client(self: _ClientManager, name: str):

+ 1
- 1
api/tests/integration_tests/model_runtime/__mock/huggingface.py View File

import os import os
from typing import Any, Dict, List


import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from huggingface_hub import InferenceClient from huggingface_hub import InferenceClient

from tests.integration_tests.model_runtime.__mock.huggingface_chat import MockHuggingfaceChatClass from tests.integration_tests.model_runtime.__mock.huggingface_chat import MockHuggingfaceChatClass


MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true' MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'

+ 10
- 4
api/tests/integration_tests/model_runtime/__mock/huggingface_chat.py View File

import re import re
from typing import Any, Generator, List, Literal, Optional, Union
from collections.abc import Generator
from typing import Any, Literal, Optional, Union


from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from huggingface_hub import InferenceClient from huggingface_hub import InferenceClient
from huggingface_hub.inference._text_generation import (Details, StreamDetails, TextGenerationResponse,
TextGenerationStreamResponse, Token)
from huggingface_hub.inference._text_generation import (
Details,
StreamDetails,
TextGenerationResponse,
TextGenerationStreamResponse,
Token,
)
from huggingface_hub.utils import BadRequestError from huggingface_hub.utils import BadRequestError




class MockHuggingfaceChatClass(object):
class MockHuggingfaceChatClass:
@staticmethod @staticmethod
def generate_create_sync(model: str) -> TextGenerationResponse: def generate_create_sync(model: str) -> TextGenerationResponse:
response = TextGenerationResponse( response = TextGenerationResponse(

+ 5
- 2
api/tests/integration_tests/model_runtime/__mock/openai.py View File

import os import os
from typing import Callable, List, Literal
from collections.abc import Callable
from typing import Literal


import pytest import pytest

# import monkeypatch # import monkeypatch
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from openai.resources.audio.transcriptions import Transcriptions from openai.resources.audio.transcriptions import Transcriptions
from openai.resources.embeddings import Embeddings from openai.resources.embeddings import Embeddings
from openai.resources.models import Models from openai.resources.models import Models
from openai.resources.moderations import Moderations from openai.resources.moderations import Moderations

from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass
from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass
from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass
from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass




def mock_openai(monkeypatch: MonkeyPatch, methods: List[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
def mock_openai(monkeypatch: MonkeyPatch, methods: list[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
""" """
mock openai module mock openai module



+ 31
- 18
api/tests/integration_tests/model_runtime/__mock/openai_chat.py View File

import re import re
from collections.abc import Generator
from json import dumps, loads from json import dumps, loads
from time import sleep, time from time import sleep, time

# import monkeypatch # import monkeypatch
from typing import Any, Generator, List, Literal, Optional, Union
from typing import Any, Literal, Optional, Union


import openai.types.chat.completion_create_params as completion_create_params import openai.types.chat.completion_create_params as completion_create_params
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from openai import AzureOpenAI, OpenAI from openai import AzureOpenAI, OpenAI
from openai._types import NOT_GIVEN, NotGiven from openai._types import NOT_GIVEN, NotGiven
from openai.resources.chat.completions import Completions from openai.resources.chat.completions import Completions
from openai.types import Completion as CompletionMessage from openai.types import Completion as CompletionMessage
from openai.types.chat import (ChatCompletion, ChatCompletionChunk, ChatCompletionMessageParam,
ChatCompletionMessageToolCall, ChatCompletionToolChoiceOptionParam,
ChatCompletionToolParam)
from openai.types.chat import (
ChatCompletion,
ChatCompletionChunk,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionToolChoiceOptionParam,
ChatCompletionToolParam,
)
from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion
from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice
from openai.types.chat.chat_completion_chunk import (Choice, ChoiceDelta, ChoiceDeltaFunctionCall, ChoiceDeltaToolCall,
ChoiceDeltaToolCallFunction)
from openai.types.chat.chat_completion_chunk import (
Choice,
ChoiceDelta,
ChoiceDeltaFunctionCall,
ChoiceDeltaToolCall,
ChoiceDeltaToolCallFunction,
)
from openai.types.chat.chat_completion_message import ChatCompletionMessage, FunctionCall from openai.types.chat.chat_completion_message import ChatCompletionMessage, FunctionCall
from openai.types.chat.chat_completion_message_tool_call import Function from openai.types.chat.chat_completion_message_tool_call import Function
from openai.types.completion_usage import CompletionUsage from openai.types.completion_usage import CompletionUsage


from core.model_runtime.errors.invoke import InvokeAuthorizationError



class MockChatClass(object):
class MockChatClass:
@staticmethod @staticmethod
def generate_function_call( def generate_function_call(
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
) -> Optional[FunctionCall]: ) -> Optional[FunctionCall]:
if not functions or len(functions) == 0: if not functions or len(functions) == 0:
return None return None
@staticmethod @staticmethod
def generate_tool_calls( def generate_tool_calls(
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
) -> Optional[List[ChatCompletionMessageToolCall]]:
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
) -> Optional[list[ChatCompletionMessageToolCall]]:
list_tool_calls = [] list_tool_calls = []
if not tools or len(tools) == 0: if not tools or len(tools) == 0:
return None return None
@staticmethod @staticmethod
def mocked_openai_chat_create_sync( def mocked_openai_chat_create_sync(
model: str, model: str,
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
) -> CompletionMessage: ) -> CompletionMessage:
tool_calls = [] tool_calls = []
function_call = MockChatClass.generate_function_call(functions=functions) function_call = MockChatClass.generate_function_call(functions=functions)
@staticmethod @staticmethod
def mocked_openai_chat_create_stream( def mocked_openai_chat_create_stream(
model: str, model: str,
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
) -> Generator[ChatCompletionChunk, None, None]: ) -> Generator[ChatCompletionChunk, None, None]:
tool_calls = [] tool_calls = []
function_call = MockChatClass.generate_function_call(functions=functions) function_call = MockChatClass.generate_function_call(functions=functions)
) )


def chat_create(self: Completions, *, def chat_create(self: Completions, *,
messages: List[ChatCompletionMessageParam],
messages: list[ChatCompletionMessageParam],
model: Union[str,Literal[ model: Union[str,Literal[
"gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613",
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"], "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"],
], ],
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
**kwargs: Any, **kwargs: Any,
): ):
openai_models = [ openai_models = [

+ 7
- 4
api/tests/integration_tests/model_runtime/__mock/openai_completion.py View File

import re import re
from collections.abc import Generator
from time import sleep, time from time import sleep, time

# import monkeypatch # import monkeypatch
from typing import Any, Generator, List, Literal, Optional, Union
from typing import Any, Literal, Optional, Union


from core.model_runtime.errors.invoke import InvokeAuthorizationError
from openai import AzureOpenAI, BadRequestError, OpenAI from openai import AzureOpenAI, BadRequestError, OpenAI
from openai._types import NOT_GIVEN, NotGiven from openai._types import NOT_GIVEN, NotGiven
from openai.resources.completions import Completions from openai.resources.completions import Completions
from openai.types.completion import CompletionChoice from openai.types.completion import CompletionChoice
from openai.types.completion_usage import CompletionUsage from openai.types.completion_usage import CompletionUsage


from core.model_runtime.errors.invoke import InvokeAuthorizationError



class MockCompletionsClass(object):
class MockCompletionsClass:
@staticmethod @staticmethod
def mocked_openai_completion_create_sync( def mocked_openai_completion_create_sync(
model: str model: str
"code-davinci-002", "text-curie-001", "text-babbage-001", "code-davinci-002", "text-curie-001", "text-babbage-001",
"text-ada-001"], "text-ada-001"],
], ],
prompt: Union[str, List[str], List[int], List[List[int]], None],
prompt: Union[str, list[str], list[int], list[list[int]], None],
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
**kwargs: Any **kwargs: Any
): ):

+ 5
- 4
api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py View File

import re import re
from typing import Any, List, Literal, Union
from typing import Any, Literal, Union


from core.model_runtime.errors.invoke import InvokeAuthorizationError
from openai import OpenAI from openai import OpenAI
from openai._types import NOT_GIVEN, NotGiven from openai._types import NOT_GIVEN, NotGiven
from openai.resources.embeddings import Embeddings from openai.resources.embeddings import Embeddings
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
from openai.types.embedding import Embedding from openai.types.embedding import Embedding


from core.model_runtime.errors.invoke import InvokeAuthorizationError



class MockEmbeddingsClass(object):
class MockEmbeddingsClass:
def create_embeddings( def create_embeddings(
self: Embeddings, *, self: Embeddings, *,
input: Union[str, List[str], List[int], List[List[int]]],
input: Union[str, list[str], list[int], list[list[int]]],
model: Union[str, Literal["text-embedding-ada-002"]], model: Union[str, Literal["text-embedding-ada-002"]],
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
**kwargs: Any **kwargs: Any

+ 5
- 4
api/tests/integration_tests/model_runtime/__mock/openai_moderation.py View File

import re import re
from typing import Any, List, Literal, Union
from typing import Any, Literal, Union


from core.model_runtime.errors.invoke import InvokeAuthorizationError
from openai._types import NOT_GIVEN, NotGiven from openai._types import NOT_GIVEN, NotGiven
from openai.resources.moderations import Moderations from openai.resources.moderations import Moderations
from openai.types import ModerationCreateResponse from openai.types import ModerationCreateResponse
from openai.types.moderation import Categories, CategoryScores, Moderation from openai.types.moderation import Categories, CategoryScores, Moderation


from core.model_runtime.errors.invoke import InvokeAuthorizationError



class MockModerationClass(object):
class MockModerationClass:
def moderation_create(self: Moderations,*, def moderation_create(self: Moderations,*,
input: Union[str, List[str]],
input: Union[str, list[str]],
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN, model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
**kwargs: Any **kwargs: Any
) -> ModerationCreateResponse: ) -> ModerationCreateResponse:

+ 2
- 3
api/tests/integration_tests/model_runtime/__mock/openai_remote.py View File

from time import time from time import time
from typing import List


from openai.resources.models import Models from openai.resources.models import Models
from openai.types.model import Model from openai.types.model import Model




class MockModelClass(object):
class MockModelClass:
""" """
mock class for openai.models.Models mock class for openai.models.Models
""" """
def list( def list(
self, self,
**kwargs, **kwargs,
) -> List[Model]:
) -> list[Model]:
return [ return [
Model( Model(
id='ft:gpt-3.5-turbo-0613:personal::8GYJLPDQ', id='ft:gpt-3.5-turbo-0613:personal::8GYJLPDQ',

+ 4
- 3
api/tests/integration_tests/model_runtime/__mock/openai_speech2text.py View File

import re import re
from typing import Any, List, Literal, Union
from typing import Any, Literal, Union


from core.model_runtime.errors.invoke import InvokeAuthorizationError
from openai._types import NOT_GIVEN, FileTypes, NotGiven from openai._types import NOT_GIVEN, FileTypes, NotGiven
from openai.resources.audio.transcriptions import Transcriptions from openai.resources.audio.transcriptions import Transcriptions
from openai.types.audio.transcription import Transcription from openai.types.audio.transcription import Transcription


from core.model_runtime.errors.invoke import InvokeAuthorizationError



class MockSpeech2TextClass(object):
class MockSpeech2TextClass:
def speech2text_create(self: Transcriptions, def speech2text_create(self: Transcriptions,
*, *,
file: FileTypes, file: FileTypes,

+ 12
- 7
api/tests/integration_tests/model_runtime/__mock/xinference.py View File

import os import os
import re import re
from typing import List, Union
from typing import Union


import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from requests import Response from requests import Response
from requests.exceptions import ConnectionError from requests.exceptions import ConnectionError
from requests.sessions import Session from requests.sessions import Session
from xinference_client.client.restful.restful_client import (Client, RESTfulChatglmCppChatModelHandle,
RESTfulChatModelHandle, RESTfulEmbeddingModelHandle,
RESTfulGenerateModelHandle, RESTfulRerankModelHandle)
from xinference_client.client.restful.restful_client import (
Client,
RESTfulChatglmCppChatModelHandle,
RESTfulChatModelHandle,
RESTfulEmbeddingModelHandle,
RESTfulGenerateModelHandle,
RESTfulRerankModelHandle,
)
from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage




class MockXinferenceClass(object):
class MockXinferenceClass:
def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]: def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
if not re.match(r'https?:\/\/[^\s\/$.?#].[^\s]*$', self.base_url): if not re.match(r'https?:\/\/[^\s\/$.?#].[^\s]*$', self.base_url):
raise RuntimeError('404 Not Found') raise RuntimeError('404 Not Found')
def _check_cluster_authenticated(self): def _check_cluster_authenticated(self):
self._cluster_authed = True self._cluster_authed = True
def rerank(self: RESTfulRerankModelHandle, documents: List[str], query: str, top_n: int) -> dict:
def rerank(self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int) -> dict:
# check if self._model_uid is a valid uuid # check if self._model_uid is a valid uuid
if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \ if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
self._model_uid != 'rerank': self._model_uid != 'rerank':
def create_embedding( def create_embedding(
self: RESTfulGenerateModelHandle, self: RESTfulGenerateModelHandle,
input: Union[str, List[str]],
input: Union[str, list[str]],
**kwargs **kwargs
) -> dict: ) -> dict:
# check if self._model_uid is a valid uuid # check if self._model_uid is a valid uuid

+ 2
- 1
api/tests/integration_tests/model_runtime/anthropic/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/anthropic/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.anthropic.anthropic import AnthropicProvider from core.model_runtime.model_providers.anthropic.anthropic import AnthropicProvider
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock

+ 10
- 4
api/tests/integration_tests/model_runtime/azure_openai/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
PromptMessageTool, SystemPromptMessage,
TextPromptMessageContent, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.azure_openai.llm.llm import AzureOpenAILargeLanguageModel from core.model_runtime.model_providers.azure_openai.llm.llm import AzureOpenAILargeLanguageModel
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock

+ 1
- 0
api/tests/integration_tests/model_runtime/azure_openai/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.azure_openai.text_embedding.text_embedding import AzureOpenAITextEmbeddingModel from core.model_runtime.model_providers.azure_openai.text_embedding.text_embedding import AzureOpenAITextEmbeddingModel

+ 2
- 1
api/tests/integration_tests/model_runtime/baichuan/test_llm.py View File

import os import os
from collections.abc import Generator
from time import sleep from time import sleep
from typing import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity

+ 1
- 0
api/tests/integration_tests/model_runtime/baichuan/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.baichuan.baichuan import BaichuanProvider from core.model_runtime.model_providers.baichuan.baichuan import BaichuanProvider



+ 1
- 0
api/tests/integration_tests/model_runtime/baichuan/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.baichuan.text_embedding.text_embedding import BaichuanTextEmbeddingModel from core.model_runtime.model_providers.baichuan.text_embedding.text_embedding import BaichuanTextEmbeddingModel

+ 2
- 1
api/tests/integration_tests/model_runtime/bedrock/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/bedrock/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider



+ 9
- 4
api/tests/integration_tests/model_runtime/chatglm/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.chatglm.llm.llm import ChatGLMLargeLanguageModel from core.model_runtime.model_providers.chatglm.llm.llm import ChatGLMLargeLanguageModel

+ 1
- 0
api/tests/integration_tests/model_runtime/chatglm/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.chatglm.chatglm import ChatGLMProvider from core.model_runtime.model_providers.chatglm.chatglm import ChatGLMProvider
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock

+ 2
- 1
api/tests/integration_tests/model_runtime/cohere/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/cohere/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.cohere.cohere import CohereProvider from core.model_runtime.model_providers.cohere.cohere import CohereProvider



+ 1
- 0
api/tests/integration_tests/model_runtime/cohere/test_rerank.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.rerank_entities import RerankResult from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.cohere.rerank.rerank import CohereRerankModel from core.model_runtime.model_providers.cohere.rerank.rerank import CohereRerankModel

+ 1
- 0
api/tests/integration_tests/model_runtime/cohere/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.cohere.text_embedding.text_embedding import CohereTextEmbeddingModel from core.model_runtime.model_providers.cohere.text_embedding.text_embedding import CohereTextEmbeddingModel

+ 9
- 4
api/tests/integration_tests/model_runtime/google/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.google.llm.llm import GoogleLargeLanguageModel from core.model_runtime.model_providers.google.llm.llm import GoogleLargeLanguageModel
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock from tests.integration_tests.model_runtime.__mock.google import setup_google_mock

+ 1
- 0
api/tests/integration_tests/model_runtime/google/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.google.google import GoogleProvider from core.model_runtime.model_providers.google.google import GoogleProvider
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock from tests.integration_tests.model_runtime.__mock.google import setup_google_mock

+ 2
- 1
api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 4
- 2
api/tests/integration_tests/model_runtime/huggingface_hub/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import \
HuggingfaceHubTextEmbeddingModel
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import (
HuggingfaceHubTextEmbeddingModel,
)




def test_hosted_inference_api_validate_credentials(): def test_hosted_inference_api_validate_credentials():

+ 1
- 0
api/tests/integration_tests/model_runtime/jina/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.jina.jina import JinaProvider from core.model_runtime.model_providers.jina.jina import JinaProvider



+ 1
- 0
api/tests/integration_tests/model_runtime/jina/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.jina.text_embedding.text_embedding import JinaTextEmbeddingModel from core.model_runtime.model_providers.jina.text_embedding.text_embedding import JinaTextEmbeddingModel

+ 9
- 4
api/tests/integration_tests/model_runtime/localai/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import ParameterRule from core.model_runtime.entities.model_entities import ParameterRule
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.localai.llm.llm import LocalAILarguageModel from core.model_runtime.model_providers.localai.llm.llm import LocalAILarguageModel

+ 1
- 0
api/tests/integration_tests/model_runtime/minimax/test_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.minimax.text_embedding.text_embedding import MinimaxTextEmbeddingModel from core.model_runtime.model_providers.minimax.text_embedding.text_embedding import MinimaxTextEmbeddingModel

+ 2
- 1
api/tests/integration_tests/model_runtime/minimax/test_llm.py View File

import os import os
from collections.abc import Generator
from time import sleep from time import sleep
from typing import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity

+ 1
- 0
api/tests/integration_tests/model_runtime/minimax/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.minimax.minimax import MinimaxProvider from core.model_runtime.model_providers.minimax.minimax import MinimaxProvider



+ 9
- 4
api/tests/integration_tests/model_runtime/ollama/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.ollama.llm.llm import OllamaLargeLanguageModel from core.model_runtime.model_providers.ollama.llm.llm import OllamaLargeLanguageModel



+ 1
- 0
api/tests/integration_tests/model_runtime/ollama/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.ollama.text_embedding.text_embedding import OllamaEmbeddingModel from core.model_runtime.model_providers.ollama.text_embedding.text_embedding import OllamaEmbeddingModel

+ 10
- 4
api/tests/integration_tests/model_runtime/openai/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
PromptMessageTool, SystemPromptMessage,
TextPromptMessageContent, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel

+ 1
- 0
api/tests/integration_tests/model_runtime/openai/test_moderation.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock

+ 1
- 0
api/tests/integration_tests/model_runtime/openai/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai.openai import OpenAIProvider from core.model_runtime.model_providers.openai.openai import OpenAIProvider
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock

+ 1
- 0
api/tests/integration_tests/model_runtime/openai/test_speech2text.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock

+ 1
- 0
api/tests/integration_tests/model_runtime/openai/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel

+ 8
- 3
api/tests/integration_tests/model_runtime/openai_api_compatible/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel



+ 4
- 2
api/tests/integration_tests/model_runtime/openai_api_compatible/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import \
OAICompatEmbeddingModel
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
OAICompatEmbeddingModel,
)


""" """
Using OpenAI's API as testing endpoint Using OpenAI's API as testing endpoint

+ 1
- 0
api/tests/integration_tests/model_runtime/openllm/test_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openllm.text_embedding.text_embedding import OpenLLMTextEmbeddingModel from core.model_runtime.model_providers.openllm.text_embedding.text_embedding import OpenLLMTextEmbeddingModel

+ 2
- 1
api/tests/integration_tests/model_runtime/openllm/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 8
- 3
api/tests/integration_tests/model_runtime/openrouter/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openrouter.llm.llm import OpenRouterLargeLanguageModel from core.model_runtime.model_providers.openrouter.llm.llm import OpenRouterLargeLanguageModel



+ 2
- 1
api/tests/integration_tests/model_runtime/replicate/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/replicate/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.replicate.text_embedding.text_embedding import ReplicateEmbeddingModel from core.model_runtime.model_providers.replicate.text_embedding.text_embedding import ReplicateEmbeddingModel

+ 2
- 1
api/tests/integration_tests/model_runtime/spark/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/spark/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.spark.spark import SparkProvider from core.model_runtime.model_providers.spark.spark import SparkProvider



+ 8
- 3
api/tests/integration_tests/model_runtime/togetherai/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.togetherai.llm.llm import TogetherAILargeLanguageModel from core.model_runtime.model_providers.togetherai.llm.llm import TogetherAILargeLanguageModel



+ 2
- 1
api/tests/integration_tests/model_runtime/tongyi/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 1
- 0
api/tests/integration_tests/model_runtime/tongyi/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.tongyi.tongyi import TongyiProvider from core.model_runtime.model_providers.tongyi.tongyi import TongyiProvider



+ 2
- 1
api/tests/integration_tests/model_runtime/wenxin/test_llm.py View File

import os import os
from collections.abc import Generator
from time import sleep from time import sleep
from typing import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity

+ 1
- 0
api/tests/integration_tests/model_runtime/wenxin/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.wenxin.wenxin import WenxinProvider from core.model_runtime.model_providers.wenxin.wenxin import WenxinProvider



+ 1
- 0
api/tests/integration_tests/model_runtime/xinference/test_embeddings.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel

+ 9
- 4
api/tests/integration_tests/model_runtime/xinference/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel

+ 1
- 0
api/tests/integration_tests/model_runtime/xinference/test_rerank.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.rerank_entities import RerankResult from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel

+ 8
- 3
api/tests/integration_tests/model_runtime/zhipuai/test_llm.py View File

import os import os
from typing import Generator
from collections.abc import Generator


import pytest import pytest

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage,
UserPromptMessage,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.zhipuai.llm.llm import ZhipuAILargeLanguageModel from core.model_runtime.model_providers.zhipuai.llm.llm import ZhipuAILargeLanguageModel



+ 1
- 0
api/tests/integration_tests/model_runtime/zhipuai/test_provider.py View File

import os import os


import pytest import pytest

from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.zhipuai.zhipuai import ZhipuaiProvider from core.model_runtime.model_providers.zhipuai.zhipuai import ZhipuaiProvider



+ 1
- 0
api/tests/integration_tests/model_runtime/zhipuai/test_text_embedding.py View File

import os import os


import pytest import pytest

from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.zhipuai.text_embedding.text_embedding import ZhipuAITextEmbeddingModel from core.model_runtime.model_providers.zhipuai.text_embedding.text_embedding import ZhipuAITextEmbeddingModel

+ 1
- 0
api/tests/integration_tests/tools/test_all_provider.py View File

import pytest import pytest

from core.tools.tool_manager import ToolManager from core.tools.tool_manager import ToolManager


provider_generator = ToolManager.list_builtin_providers() provider_generator = ToolManager.list_builtin_providers()

+ 1
- 1
api/tests/integration_tests/utils/test_module_import_helper.py View File

import os import os


from core.utils.module_import_helper import load_single_subclass_from_source, import_module_from_source
from core.utils.module_import_helper import import_module_from_source, load_single_subclass_from_source
from tests.integration_tests.utils.parent_class import ParentClass from tests.integration_tests.utils.parent_class import ParentClass





+ 3
- 2
api/tests/integration_tests/workflow/nodes/__mock/code_executor.py View File

import os import os
import pytest

from typing import Literal from typing import Literal

import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch

from core.helper.code_executor.code_executor import CodeExecutor from core.helper.code_executor.code_executor import CodeExecutor


MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'

+ 6
- 6
api/tests/integration_tests/workflow/nodes/__mock/http.py View File

import os import os
from json import dumps
from typing import Literal

import httpx._api as httpx
import pytest import pytest
import requests.api as requests import requests.api as requests
import httpx._api as httpx
from requests import Response as RequestsResponse
from _pytest.monkeypatch import MonkeyPatch
from httpx import Request as HttpxRequest from httpx import Request as HttpxRequest
from requests import Response as RequestsResponse
from yarl import URL from yarl import URL


from typing import Literal
from _pytest.monkeypatch import MonkeyPatch
from json import dumps

MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'


class MockedHttp: class MockedHttp:

+ 3
- 3
api/tests/integration_tests/workflow/nodes/test_code.py View File

from os import getenv

import pytest import pytest
from core.app.entities.app_invoke_entities import InvokeFrom


from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.code.code_node import CodeNode from core.workflow.nodes.code.code_node import CodeNode
from models.workflow import WorkflowNodeExecutionStatus from models.workflow import WorkflowNodeExecutionStatus
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock


from os import getenv

CODE_MAX_STRING_LENGTH = int(getenv('CODE_MAX_STRING_LENGTH', '10000')) CODE_MAX_STRING_LENGTH = int(getenv('CODE_MAX_STRING_LENGTH', '10000'))


@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True) @pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)

+ 1
- 1
api/tests/integration_tests/workflow/nodes/test_http.py View File

import pytest import pytest

from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.http_request.http_request_node import HttpRequestNode from core.workflow.nodes.http_request.http_request_node import HttpRequestNode

from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock


BASIC_NODE_DATA = { BASIC_NODE_DATA = {

+ 2
- 2
api/tests/integration_tests/workflow/nodes/test_llm.py View File

import pytest import pytest


from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderModelBundle, ProviderConfiguration
from core.entities.provider_entities import SystemConfiguration, CustomConfiguration, CustomProviderConfiguration
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import ModelProviderFactory from core.model_runtime.model_providers import ModelProviderFactory

+ 1
- 0
api/tests/integration_tests/workflow/nodes/test_template_transform.py View File

from models.workflow import WorkflowNodeExecutionStatus from models.workflow import WorkflowNodeExecutionStatus
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock



@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True) @pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
def test_execute_code(setup_code_executor_mock): def test_execute_code(setup_code_executor_mock):
code = '''{{args2}}''' code = '''{{args2}}'''

+ 1
- 1
api/tests/integration_tests/workflow/nodes/test_tool.py View File

from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom

from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.variable_pool import VariablePool
from core.workflow.nodes.tool.tool_node import ToolNode from core.workflow.nodes.tool.tool_node import ToolNode
from models.workflow import WorkflowNodeExecutionStatus from models.workflow import WorkflowNodeExecutionStatus



def test_tool_variable_invoke(): def test_tool_variable_invoke():
pool = VariablePool(system_variables={}, user_inputs={}) pool = VariablePool(system_variables={}, user_inputs={})
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value='1+1') pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value='1+1')

+ 4
- 4
api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py View File



import pytest import pytest


from core.app.app_config.entities import ModelConfigEntity, FileExtraConfig
from core.file.file_obj import FileVar, FileType, FileTransferMethod
from core.app.app_config.entities import FileExtraConfig, ModelConfigEntity
from core.file.file_obj import FileTransferMethod, FileType, FileVar
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage, PromptMessageRole
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessageRole, UserPromptMessage
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig, ChatModelMessage
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
from core.prompt.utils.prompt_template_parser import PromptTemplateParser from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from models.model import Conversation from models.model import Conversation



+ 1
- 1
api/tests/unit_tests/core/prompt/test_prompt_transform.py View File

from core.app.app_config.entities import ModelConfigEntity from core.app.app_config.entities import ModelConfigEntity
from core.entities.provider_configuration import ProviderModelBundle from core.entities.provider_configuration import ProviderModelBundle
from core.model_runtime.entities.message_entities import UserPromptMessage from core.model_runtime.entities.message_entities import UserPromptMessage
from core.model_runtime.entities.model_entities import ModelPropertyKey, AIModelEntity, ParameterRule
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.prompt.prompt_transform import PromptTransform from core.prompt.prompt_transform import PromptTransform



+ 1
- 1
api/tests/unit_tests/core/prompt/test_simple_prompt_transform.py View File



from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
from core.prompt.simple_prompt_transform import SimplePromptTransform from core.prompt.simple_prompt_transform import SimplePromptTransform
from models.model import AppMode, Conversation from models.model import AppMode, Conversation



+ 11
- 3
api/tests/unit_tests/services/workflow/test_workflow_converter.py View File



import pytest import pytest


from core.app.app_config.entities import VariableEntity, ExternalDataVariableEntity, DatasetEntity, \
DatasetRetrieveConfigEntity, ModelConfigEntity, PromptTemplateEntity, AdvancedChatPromptTemplateEntity, \
AdvancedChatMessageEntity, AdvancedCompletionPromptTemplateEntity
from core.app.app_config.entities import (
AdvancedChatMessageEntity,
AdvancedChatPromptTemplateEntity,
AdvancedCompletionPromptTemplateEntity,
DatasetEntity,
DatasetRetrieveConfigEntity,
ExternalDataVariableEntity,
ModelConfigEntity,
PromptTemplateEntity,
VariableEntity,
)
from core.helper import encrypter from core.helper import encrypter
from core.model_runtime.entities.llm_entities import LLMMode from core.model_runtime.entities.llm_entities import LLMMode
from core.model_runtime.entities.message_entities import PromptMessageRole from core.model_runtime.entities.message_entities import PromptMessageRole

Loading…
Cancel
Save