Browse Source

chore: removing unused imports in tests (#9049)

tags/0.9.2
Bowen Liang 1 year ago
parent
commit
7c0b159a81
No account linked to committer's email address
30 changed files with 21 additions and 52 deletions
  1. 6
    6
      api/app.py
  2. 1
    1
      api/poetry.lock
  3. 7
    4
      api/pyproject.toml
  4. 1
    1
      api/tests/integration_tests/model_runtime/__mock/anthropic.py
  5. 1
    1
      api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py
  6. 1
    3
      api/tests/integration_tests/model_runtime/__mock/openai_chat.py
  7. 0
    1
      api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py
  8. 0
    1
      api/tests/integration_tests/model_runtime/__mock/openai_remote.py
  9. 0
    1
      api/tests/integration_tests/model_runtime/__mock/xinference.py
  10. 0
    3
      api/tests/integration_tests/model_runtime/azure_ai_studio/test_llm.py
  11. 0
    1
      api/tests/integration_tests/model_runtime/chatglm/test_llm.py
  12. 1
    2
      api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py
  13. 0
    2
      api/tests/integration_tests/model_runtime/localai/test_llm.py
  14. 0
    1
      api/tests/integration_tests/model_runtime/nomic/test_provider.py
  15. 0
    1
      api/tests/integration_tests/model_runtime/novita/test_llm.py
  16. 0
    1
      api/tests/integration_tests/model_runtime/oci/test_llm.py
  17. 0
    1
      api/tests/integration_tests/model_runtime/openai/test_llm.py
  18. 0
    1
      api/tests/integration_tests/model_runtime/openrouter/test_llm.py
  19. 0
    2
      api/tests/integration_tests/model_runtime/sagemaker/test_provider.py
  20. 0
    2
      api/tests/integration_tests/model_runtime/sagemaker/test_text_embedding.py
  21. 1
    3
      api/tests/integration_tests/model_runtime/stepfun/test_llm.py
  22. 0
    1
      api/tests/integration_tests/model_runtime/togetherai/test_llm.py
  23. 1
    2
      api/tests/integration_tests/model_runtime/upstage/test_llm.py
  24. 0
    2
      api/tests/integration_tests/model_runtime/xinference/test_llm.py
  25. 0
    1
      api/tests/integration_tests/vdb/pgvector/test_pgvector.py
  26. 0
    1
      api/tests/integration_tests/vdb/test_vector_store.py
  27. 1
    1
      api/tests/integration_tests/workflow/nodes/__mock/code_executor.py
  28. 0
    1
      api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
  29. 0
    1
      api/tests/unit_tests/core/app/segments/test_variables.py
  30. 0
    3
      api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py

+ 6
- 6
api/app.py View File

from configs import dify_config from configs import dify_config


# DO NOT REMOVE BELOW # DO NOT REMOVE BELOW
from events import event_handlers
from events import event_handlers # noqa: F401
from extensions import ( from extensions import (
ext_celery, ext_celery,
ext_code_based_extension, ext_code_based_extension,
from libs.passport import PassportService from libs.passport import PassportService


# TODO: Find a way to avoid importing models here # TODO: Find a way to avoid importing models here
from models import account, dataset, model, source, task, tool, tools, web
from models import account, dataset, model, source, task, tool, tools, web # noqa: F401
from services.account_service import AccountService from services.account_service import AccountService


# DO NOT REMOVE ABOVE # DO NOT REMOVE ABOVE
decoded = PassportService().verify(auth_token) decoded = PassportService().verify(auth_token)
user_id = decoded.get("user_id") user_id = decoded.get("user_id")


account = AccountService.load_logged_in_account(account_id=user_id, token=auth_token)
if account:
contexts.tenant_id.set(account.current_tenant_id)
return account
logged_in_account = AccountService.load_logged_in_account(account_id=user_id, token=auth_token)
if logged_in_account:
contexts.tenant_id.set(logged_in_account.current_tenant_id)
return logged_in_account




@login_manager.unauthorized_handler @login_manager.unauthorized_handler

+ 1
- 1
api/poetry.lock View File

[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.10,<3.13" python-versions = ">=3.10,<3.13"
content-hash = "dbcbcd7addb7c35c67f074fce00f6e25ec1e225498e5b9de3ef2fcd6af749bf6"
content-hash = "d29d0c4ce384ef94fe65f0a2a145898bd1a117d4fd59c217d15bbb8993f4ce4e"

+ 7
- 4
api/pyproject.toml View File



[tool.ruff.lint.per-file-ignores] [tool.ruff.lint.per-file-ignores]
"app.py" = [ "app.py" = [
"F401", # unused-import
"F811", # redefined-while-unused
] ]
"__init__.py" = [ "__init__.py" = [
"F401", # unused-import "F401", # unused-import
"N803", # invalid-argument-name "N803", # invalid-argument-name
] ]
"tests/*" = [ "tests/*" = [
"F401", # unused-import
"F811", # redefined-while-unused "F811", # redefined-while-unused
] ]


[tool.ruff.lint.pyflakes]
allowed-unused-imports=[
"_pytest.monkeypatch",
"tests.integration_tests",
]

[tool.ruff.format] [tool.ruff.format]
exclude = [ exclude = [
] ]


[tool.poetry.group.lint.dependencies] [tool.poetry.group.lint.dependencies]
dotenv-linter = "~0.5.0" dotenv-linter = "~0.5.0"
ruff = "~0.6.8"
ruff = "~0.6.9"

+ 1
- 1
api/tests/integration_tests/model_runtime/__mock/anthropic.py View File

import anthropic import anthropic
import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from anthropic import Anthropic, Stream
from anthropic import Stream
from anthropic.resources import Messages from anthropic.resources import Messages
from anthropic.types import ( from anthropic.types import (
ContentBlock, ContentBlock,

+ 1
- 1
api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py View File

import os import os
from collections.abc import Callable from collections.abc import Callable
from typing import Any, Literal, Union
from typing import Any, Literal


import pytest import pytest



+ 1
- 3
api/tests/integration_tests/model_runtime/__mock/openai_chat.py View File

import re import re
from collections.abc import Generator from collections.abc import Generator
from json import dumps, loads
from json import dumps
from time import time from time import time


# import monkeypatch # import monkeypatch
from openai.resources.chat.completions import Completions from openai.resources.chat.completions import Completions
from openai.types import Completion as CompletionMessage from openai.types import Completion as CompletionMessage
from openai.types.chat import ( from openai.types.chat import (
ChatCompletion,
ChatCompletionChunk, ChatCompletionChunk,
ChatCompletionMessageParam, ChatCompletionMessageParam,
ChatCompletionMessageToolCall, ChatCompletionMessageToolCall,
ChatCompletionToolChoiceOptionParam,
ChatCompletionToolParam, ChatCompletionToolParam,
completion_create_params, completion_create_params,
) )

+ 0
- 1
api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py View File

import re import re
from typing import Any, Literal, Union from typing import Any, Literal, Union


from openai import OpenAI
from openai._types import NOT_GIVEN, NotGiven from openai._types import NOT_GIVEN, NotGiven
from openai.resources.embeddings import Embeddings from openai.resources.embeddings import Embeddings
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage

+ 0
- 1
api/tests/integration_tests/model_runtime/__mock/openai_remote.py View File

from time import time from time import time


from openai.resources.models import Models
from openai.types.model import Model from openai.types.model import Model





+ 0
- 1
api/tests/integration_tests/model_runtime/__mock/xinference.py View File

import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from requests import Response from requests import Response
from requests.exceptions import ConnectionError
from requests.sessions import Session from requests.sessions import Session
from xinference_client.client.restful.restful_client import ( from xinference_client.client.restful.restful_client import (
Client, Client,

+ 0
- 3
api/tests/integration_tests/model_runtime/azure_ai_studio/test_llm.py View File

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 0
- 1
api/tests/integration_tests/model_runtime/chatglm/test_llm.py View File

AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.entities.model_entities import AIModelEntity from core.model_runtime.entities.model_entities import AIModelEntity

+ 1
- 2
api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py View File



import pytest import pytest


from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.huggingface_tei.rerank.rerank import ( from core.model_runtime.model_providers.huggingface_tei.rerank.rerank import (
HuggingfaceTeiRerankModel, HuggingfaceTeiRerankModel,

+ 0
- 2
api/tests/integration_tests/model_runtime/localai/test_llm.py View File

AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.entities.model_entities import ParameterRule
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.localai.llm.llm import LocalAILanguageModel from core.model_runtime.model_providers.localai.llm.llm import LocalAILanguageModel



+ 0
- 1
api/tests/integration_tests/model_runtime/nomic/test_provider.py View File



from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.nomic.nomic import NomicAtlasProvider from core.model_runtime.model_providers.nomic.nomic import NomicAtlasProvider
from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel
from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock





+ 0
- 1
api/tests/integration_tests/model_runtime/novita/test_llm.py View File

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
UserPromptMessage, UserPromptMessage,
) )

+ 0
- 1
api/tests/integration_tests/model_runtime/oci/test_llm.py View File

AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 0
- 1
api/tests/integration_tests/model_runtime/openai/test_llm.py View File

) )
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel


"""FOR MOCK FIXTURES, DO NOT REMOVE""" """FOR MOCK FIXTURES, DO NOT REMOVE"""

+ 0
- 1
api/tests/integration_tests/model_runtime/openrouter/test_llm.py View File

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
UserPromptMessage, UserPromptMessage,
) )

+ 0
- 2
api/tests/integration_tests/model_runtime/sagemaker/test_provider.py View File

import os

import pytest import pytest


from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError

+ 0
- 2
api/tests/integration_tests/model_runtime/sagemaker/test_text_embedding.py View File

import os

import pytest import pytest


from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult

+ 1
- 3
api/tests/integration_tests/model_runtime/stepfun/test_llm.py View File

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.stepfun.llm.llm import StepfunLargeLanguageModel from core.model_runtime.model_providers.stepfun.llm.llm import StepfunLargeLanguageModel



+ 0
- 1
api/tests/integration_tests/model_runtime/togetherai/test_llm.py View File

from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
UserPromptMessage, UserPromptMessage,
) )

+ 1
- 2
api/tests/integration_tests/model_runtime/upstage/test_llm.py View File

SystemPromptMessage, SystemPromptMessage,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.upstage.llm.llm import UpstageLargeLanguageModel from core.model_runtime.model_providers.upstage.llm.llm import UpstageLargeLanguageModel


"""FOR MOCK FIXTURES, DO NOT REMOVE""" """FOR MOCK FIXTURES, DO NOT REMOVE"""

+ 0
- 2
api/tests/integration_tests/model_runtime/xinference/test_llm.py View File

AssistantPromptMessage, AssistantPromptMessage,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage, UserPromptMessage,
) )
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel



+ 0
- 1
api/tests/integration_tests/vdb/pgvector/test_pgvector.py View File

from core.rag.datasource.vdb.pgvector.pgvector import PGVector, PGVectorConfig from core.rag.datasource.vdb.pgvector.pgvector import PGVector, PGVectorConfig
from core.rag.models.document import Document
from tests.integration_tests.vdb.test_vector_store import ( from tests.integration_tests.vdb.test_vector_store import (
AbstractVectorTest, AbstractVectorTest,
get_example_text, get_example_text,

+ 0
- 1
api/tests/integration_tests/vdb/test_vector_store.py View File

import random
import uuid import uuid
from unittest.mock import MagicMock from unittest.mock import MagicMock



+ 1
- 1
api/tests/integration_tests/workflow/nodes/__mock/code_executor.py View File

import os import os
from typing import Literal, Optional
from typing import Literal


import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch

+ 0
- 1
api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py View File

import json
import os import os
import time import time
import uuid import uuid

+ 0
- 1
api/tests/unit_tests/core/app/segments/test_variables.py View File

from pydantic import ValidationError from pydantic import ValidationError


from core.app.segments import ( from core.app.segments import (
ArrayAnyVariable,
FloatVariable, FloatVariable,
IntegerVariable, IntegerVariable,
ObjectVariable, ObjectVariable,

+ 0
- 3
api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py View File

import os import os
from unittest import mock


from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp
from core.rag.extractor.firecrawl.firecrawl_web_extractor import FirecrawlWebExtractor
from core.rag.models.document import Document
from tests.unit_tests.core.rag.extractor.test_notion_extractor import _mock_response from tests.unit_tests.core.rag.extractor.test_notion_extractor import _mock_response





Loading…
Cancel
Save