ソースを参照

refactor

tags/1.0.0-beta.1
Yeuoly 9ヶ月前
コミット
e4c4490175

+ 1
- 0
.github/workflows/build-push.yml ファイルの表示

@@ -5,6 +5,7 @@ on:
branches:
- "main"
- "deploy/dev"
- "plugins/beta"
release:
types: [published]


+ 1
- 0
.github/workflows/db-migration-test.yml ファイルの表示

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- main
- plugins/beta
paths:
- api/migrations/**
- .github/workflows/db-migration-test.yml

+ 1
- 1
.github/workflows/style.yml ファイルの表示

@@ -4,6 +4,7 @@ on:
pull_request:
branches:
- main
- plugins/beta

concurrency:
group: style-${{ github.head_ref || github.run_id }}
@@ -82,7 +83,6 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn run lint


superlinter:
name: SuperLinter
runs-on: ubuntu-latest

+ 1
- 1
api/tests/unit_tests/core/helper/test_marketplace.py ファイルの表示

@@ -2,6 +2,6 @@ from core.helper.marketplace import download_plugin_pkg


def test_download_plugin_pkg():
pkg = download_plugin_pkg("yeuoly/google:0.0.1@4ff79ee644987e5b744d9c5b7a735d459fe66f26b28724326a7834d7e459e708")
pkg = download_plugin_pkg("langgenius/bing:0.0.1@e58735424d2104f208c2bd683c5142e0332045b425927067acf432b26f3d970b")
assert pkg is not None
assert len(pkg) > 0

+ 52
- 52
api/tests/unit_tests/core/prompt/test_prompt_transform.py ファイルの表示

@@ -1,52 +1,52 @@
from unittest.mock import MagicMock
from core.app.app_config.entities import ModelConfigEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.model_runtime.entities.message_entities import UserPromptMessage
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
from core.model_runtime.entities.provider_entities import ProviderEntity
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.prompt.prompt_transform import PromptTransform
def test__calculate_rest_token():
model_schema_mock = MagicMock(spec=AIModelEntity)
parameter_rule_mock = MagicMock(spec=ParameterRule)
parameter_rule_mock.name = "max_tokens"
model_schema_mock.parameter_rules = [parameter_rule_mock]
model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
large_language_model_mock = MagicMock(spec=LargeLanguageModel)
large_language_model_mock.get_num_tokens.return_value = 6
provider_mock = MagicMock(spec=ProviderEntity)
provider_mock.provider = "openai"
provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
provider_configuration_mock.provider = provider_mock
provider_configuration_mock.model_settings = None
provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
provider_model_bundle_mock.model_type_instance = large_language_model_mock
provider_model_bundle_mock.configuration = provider_configuration_mock
model_config_mock = MagicMock(spec=ModelConfigEntity)
model_config_mock.model = "gpt-4"
model_config_mock.credentials = {}
model_config_mock.parameters = {"max_tokens": 50}
model_config_mock.model_schema = model_schema_mock
model_config_mock.provider_model_bundle = provider_model_bundle_mock
prompt_transform = PromptTransform()
prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
# Validate based on the mock configuration and expected logic
expected_rest_tokens = (
model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
- model_config_mock.parameters["max_tokens"]
- large_language_model_mock.get_num_tokens.return_value
)
assert rest_tokens == expected_rest_tokens
assert rest_tokens == 6
# from unittest.mock import MagicMock
# from core.app.app_config.entities import ModelConfigEntity
# from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
# from core.model_runtime.entities.message_entities import UserPromptMessage
# from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
# from core.model_runtime.entities.provider_entities import ProviderEntity
# from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
# from core.prompt.prompt_transform import PromptTransform
# def test__calculate_rest_token():
# model_schema_mock = MagicMock(spec=AIModelEntity)
# parameter_rule_mock = MagicMock(spec=ParameterRule)
# parameter_rule_mock.name = "max_tokens"
# model_schema_mock.parameter_rules = [parameter_rule_mock]
# model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
# large_language_model_mock = MagicMock(spec=LargeLanguageModel)
# large_language_model_mock.get_num_tokens.return_value = 6
# provider_mock = MagicMock(spec=ProviderEntity)
# provider_mock.provider = "openai"
# provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
# provider_configuration_mock.provider = provider_mock
# provider_configuration_mock.model_settings = None
# provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
# provider_model_bundle_mock.model_type_instance = large_language_model_mock
# provider_model_bundle_mock.configuration = provider_configuration_mock
# model_config_mock = MagicMock(spec=ModelConfigEntity)
# model_config_mock.model = "gpt-4"
# model_config_mock.credentials = {}
# model_config_mock.parameters = {"max_tokens": 50}
# model_config_mock.model_schema = model_schema_mock
# model_config_mock.provider_model_bundle = provider_model_bundle_mock
# prompt_transform = PromptTransform()
# prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
# rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
# # Validate based on the mock configuration and expected logic
# expected_rest_tokens = (
# model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
# - model_config_mock.parameters["max_tokens"]
# - large_language_model_mock.get_num_tokens.return_value
# )
# assert rest_tokens == expected_rest_tokens
# assert rest_tokens == 6

+ 190
- 186
api/tests/unit_tests/core/test_provider_manager.py ファイルの表示

@@ -1,186 +1,190 @@
from core.entities.provider_entities import ModelSettings
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.provider_manager import ProviderManager
from models.provider import LoadBalancingModelConfig, ProviderModelSetting


def test__to_model_settings(mocker):
# Get all provider entities
model_provider_factory = ModelProviderFactory("test_tenant")
provider_entities = model_provider_factory.get_providers()

provider_entity = None
for provider in provider_entities:
if provider.provider == "openai":
provider_entity = provider

# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=True,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
),
LoadBalancingModelConfig(
id="id2",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="first",
encrypted_config='{"openai_api_key": "fake_key"}',
enabled=True,
),
]

mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)

provider_manager = ProviderManager()

# Running the method
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)

# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 2
assert result[0].load_balancing_configs[0].name == "__inherit__"
assert result[0].load_balancing_configs[1].name == "first"


def test__to_model_settings_only_one_lb(mocker):
# Get all provider entities
model_provider_factory = ModelProviderFactory("test_tenant")
provider_entities = model_provider_factory.get_providers()

provider_entity = None
for provider in provider_entities:
if provider.provider == "openai":
provider_entity = provider

# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=True,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
)
]

mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)

provider_manager = ProviderManager()

# Running the method
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)

# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 0


def test__to_model_settings_lb_disabled(mocker):
# Get all provider entities
model_provider_factory = ModelProviderFactory("test_tenant")
provider_entities = model_provider_factory.get_providers()

provider_entity = None
for provider in provider_entities:
if provider.provider == "openai":
provider_entity = provider

# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=False,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
),
LoadBalancingModelConfig(
id="id2",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="first",
encrypted_config='{"openai_api_key": "fake_key"}',
enabled=True,
),
]

mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)

provider_manager = ProviderManager()

# Running the method
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)

# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 0
# from core.entities.provider_entities import ModelSettings
# from core.model_runtime.entities.model_entities import ModelType
# from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
# from core.provider_manager import ProviderManager
# from models.provider import LoadBalancingModelConfig, ProviderModelSetting


# def test__to_model_settings(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()

# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider

# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=True,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# ),
# LoadBalancingModelConfig(
# id="id2",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="first",
# encrypted_config='{"openai_api_key": "fake_key"}',
# enabled=True,
# ),
# ]

# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
# )

# provider_manager = ProviderManager()

# # Running the method
# result = provider_manager._to_model_settings(provider_entity,
# provider_model_settings, load_balancing_model_configs)

# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 2
# assert result[0].load_balancing_configs[0].name == "__inherit__"
# assert result[0].load_balancing_configs[1].name == "first"


# def test__to_model_settings_only_one_lb(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()

# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider

# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=True,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# )
# ]

# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
# )

# provider_manager = ProviderManager()

# # Running the method
# result = provider_manager._to_model_settings(
# provider_entity, provider_model_settings, load_balancing_model_configs)

# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 0


# def test__to_model_settings_lb_disabled(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()

# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider

# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=False,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# ),
# LoadBalancingModelConfig(
# id="id2",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="first",
# encrypted_config='{"openai_api_key": "fake_key"}',
# enabled=True,
# ),
# ]

# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get",
# return_value={"openai_api_key": "fake_key"}
# )

# provider_manager = ProviderManager()

# # Running the method
# result = provider_manager._to_model_settings(provider_entity,
# provider_model_settings, load_balancing_model_configs)

# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 0

読み込み中…
キャンセル
保存