Kaynağa Gözat

Merge branch 'feat/queue-based-graph-engine' into feat/rag-2

# Conflicts:
#	api/core/app/apps/advanced_chat/generate_task_pipeline.py
#	api/pyproject.toml
#	api/uv.lock
#	docker/docker-compose-template.yaml
#	docker/docker-compose.yaml
#	web/package.json
tags/2.0.0-beta.1
jyong 2 ay önce
ebeveyn
işleme
b9394d542c
87 değiştirilmiş dosya ile 1933 ekleme ve 396 silme
  1. 13
    3
      .github/workflows/translate-i18n-base-on-english.yml
  2. 5
    0
      .github/workflows/web-tests.yml
  3. 1
    1
      Makefile
  4. 1
    0
      api/.env.example
  5. 1
    1
      api/controllers/console/app/conversation.py
  6. 22
    7
      api/controllers/console/auth/oauth_server.py
  7. 1
    3
      api/core/app/apps/advanced_chat/generate_task_pipeline.py
  8. 3
    2
      api/core/app/task_pipeline/message_cycle_manager.py
  9. 8
    8
      api/core/entities/provider_configuration.py
  10. 4
    0
      api/core/mcp/client/streamable_client.py
  11. 1
    1
      api/core/mcp/server/streamable_http.py
  12. 1
    1
      api/core/model_runtime/README.md
  13. 1
    1
      api/core/model_runtime/README_CN.md
  14. 7
    5
      api/core/plugin/backwards_invocation/app.py
  15. 33
    23
      api/core/provider_manager.py
  16. 2
    2
      api/core/rag/datasource/vdb/myscale/myscale_vector.py
  17. 8
    2
      api/core/rag/datasource/vdb/tablestore/tablestore_vector.py
  18. 0
    7
      api/core/rag/datasource/vdb/weaviate/weaviate_vector.py
  19. 126
    19
      api/core/workflow/entities/graph_runtime_state.py
  20. 12
    1
      api/core/workflow/graph/__init__.py
  21. 59
    0
      api/core/workflow/graph/graph_runtime_state_protocol.py
  22. 76
    0
      api/core/workflow/graph/read_only_state_wrapper.py
  23. 4
    4
      api/core/workflow/graph_engine/event_management/event_handlers.py
  24. 3
    3
      api/core/workflow/graph_engine/event_management/event_manager.py
  25. 9
    6
      api/core/workflow/graph_engine/graph_engine.py
  26. 2
    2
      api/core/workflow/graph_engine/layers/__init__.py
  27. 8
    8
      api/core/workflow/graph_engine/layers/base.py
  28. 2
    2
      api/core/workflow/graph_engine/layers/debug_logging.py
  29. 2
    2
      api/core/workflow/graph_engine/layers/execution_limits.py
  30. 2
    2
      api/core/workflow/graph_engine/orchestration/execution_coordinator.py
  31. 16
    14
      api/core/workflow/graph_engine/worker.py
  32. 2
    2
      api/core/workflow/graph_engine/worker_management/__init__.py
  33. 126
    4
      api/core/workflow/graph_engine/worker_management/worker_pool.py
  34. 1
    1
      api/core/workflow/nodes/agent/agent_node.py
  35. 0
    3
      api/core/workflow/nodes/base/entities.py
  36. 36
    23
      api/core/workflow/nodes/code/code_node.py
  37. 6
    17
      api/core/workflow/nodes/http_request/executor.py
  38. 4
    10
      api/core/workflow/nodes/list_operator/node.py
  39. 5
    11
      api/core/workflow/nodes/llm/node.py
  40. 4
    4
      api/core/workflow/nodes/loop/loop_node.py
  41. 0
    2
      api/core/workflow/nodes/parameter_extractor/entities.py
  42. 14
    19
      api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py
  43. 1
    1
      api/core/workflow/nodes/tool/tool_node.py
  44. 0
    5
      api/core/workflow/nodes/variable_assigner/v1/node.py
  45. 0
    2
      api/core/workflow/nodes/variable_assigner/v2/helpers.py
  46. 0
    2
      api/core/workflow/nodes/variable_assigner/v2/node.py
  47. 2
    1
      api/extensions/storage/clickzetta_volume/file_lifecycle.py
  48. 0
    1
      api/pyrightconfig.json
  49. 1
    1
      api/services/recommended_app_service.py
  50. 0
    0
      api/tests/test_containers_integration_tests/services/workflow/__init__.py
  51. 553
    0
      api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py
  52. 64
    1
      api/tests/unit_tests/core/mcp/server/test_streamable_http.py
  53. 111
    0
      api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py
  54. 4
    4
      api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py
  55. 12
    5
      api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py
  56. 8
    1
      dev/basedpyright-check
  57. 1
    1
      web/app/components/app/configuration/config-var/config-modal/type-select.tsx
  58. 2
    2
      web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx
  59. 2
    2
      web/app/components/header/nav/nav-selector/index.tsx
  60. 41
    2
      web/app/components/workflow/hooks/use-nodes-interactions.ts
  61. 2
    1
      web/app/components/workflow/operator/export-image.tsx
  62. 7
    0
      web/app/components/workflow/utils/node.ts
  63. 2
    0
      web/global.d.ts
  64. 120
    0
      web/i18n-config/check-i18n-sync.js
  65. 135
    0
      web/i18n-config/generate-i18n-types.js
  66. 10
    10
      web/i18n/id-ID/app-annotation.ts
  67. 13
    13
      web/i18n/id-ID/app-api.ts
  68. 4
    4
      web/i18n/id-ID/app-debug.ts
  69. 8
    8
      web/i18n/id-ID/app-log.ts
  70. 2
    2
      web/i18n/id-ID/app-overview.ts
  71. 8
    8
      web/i18n/id-ID/app.ts
  72. 18
    18
      web/i18n/id-ID/common.ts
  73. 9
    9
      web/i18n/id-ID/custom.ts
  74. 10
    10
      web/i18n/id-ID/dataset-creation.ts
  75. 2
    2
      web/i18n/id-ID/dataset-hit-testing.ts
  76. 1
    1
      web/i18n/id-ID/dataset-settings.ts
  77. 2
    2
      web/i18n/id-ID/dataset.ts
  78. 1
    1
      web/i18n/id-ID/education.ts
  79. 4
    4
      web/i18n/id-ID/explore.ts
  80. 13
    13
      web/i18n/id-ID/login.ts
  81. 5
    5
      web/i18n/id-ID/oauth.ts
  82. 5
    5
      web/i18n/id-ID/plugin.ts
  83. 8
    8
      web/i18n/id-ID/time.ts
  84. 13
    13
      web/i18n/id-ID/workflow.ts
  85. 1
    1
      web/i18n/tr-TR/common.ts
  86. 1
    1
      web/i18n/uk-UA/common.ts
  87. 96
    0
      web/types/i18n.d.ts

+ 13
- 3
.github/workflows/translate-i18n-base-on-english.yml Dosyayı Görüntüle

working-directory: ./web working-directory: ./web
run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }} run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }}


- name: Generate i18n type definitions
if: env.FILES_CHANGED == 'true'
working-directory: ./web
run: pnpm run gen:i18n-types

- name: Create Pull Request - name: Create Pull Request
if: env.FILES_CHANGED == 'true' if: env.FILES_CHANGED == 'true'
uses: peter-evans/create-pull-request@v6 uses: peter-evans/create-pull-request@v6
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Update i18n files based on en-US changes
title: 'chore: translate i18n files'
body: This PR was automatically created to update i18n files based on changes in en-US locale.
commit-message: Update i18n files and type definitions based on en-US changes
title: 'chore: translate i18n files and update type definitions'
body: |
This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale.
**Changes included:**
- Updated translation files for all locales
- Regenerated TypeScript type definitions for type safety
branch: chore/automated-i18n-updates branch: chore/automated-i18n-updates

+ 5
- 0
.github/workflows/web-tests.yml Dosyayı Görüntüle

working-directory: ./web working-directory: ./web
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile


- name: Check i18n types synchronization
if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web
run: pnpm run check:i18n-types

- name: Run tests - name: Run tests
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web working-directory: ./web

+ 1
- 1
Makefile Dosyayı Görüntüle

prepare-api: prepare-api:
@echo "🔧 Setting up API environment..." @echo "🔧 Setting up API environment..."
@cp -n api/.env.example api/.env 2>/dev/null || echo "API .env already exists" @cp -n api/.env.example api/.env 2>/dev/null || echo "API .env already exists"
@cd api && uv sync --dev --extra all
@cd api && uv sync --dev
@cd api && uv run flask db upgrade @cd api && uv run flask db upgrade
@echo "✅ API environment prepared (not started)" @echo "✅ API environment prepared (not started)"



+ 1
- 0
api/.env.example Dosyayı Görüntüle

DB_HOST=localhost DB_HOST=localhost
DB_PORT=5432 DB_PORT=5432
DB_DATABASE=dify DB_DATABASE=dify
SQLALCHEMY_POOL_PRE_PING=true


# Storage configuration # Storage configuration
# use for store upload files, private keys... # use for store upload files, private keys...

+ 1
- 1
api/controllers/console/app/conversation.py Dosyayı Görüntüle

@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@get_app_model(mode=AppMode.COMPLETION)
def delete(self, app_model, conversation_id): def delete(self, app_model, conversation_id):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()

+ 22
- 7
api/controllers/console/auth/oauth_server.py Dosyayı Görüntüle

from typing import cast from typing import cast


import flask_login import flask_login
from flask import request
from flask import jsonify, request
from flask_restx import Resource, reqparse from flask_restx import Resource, reqparse
from werkzeug.exceptions import BadRequest, NotFound from werkzeug.exceptions import BadRequest, NotFound




authorization_header = request.headers.get("Authorization") authorization_header = request.headers.get("Authorization")
if not authorization_header: if not authorization_header:
raise BadRequest("Authorization header is required")
response = jsonify({"error": "Authorization header is required"})
response.status_code = 401
response.headers["WWW-Authenticate"] = "Bearer"
return response


parts = authorization_header.strip().split(" ")
parts = authorization_header.strip().split(None, 1)
if len(parts) != 2: if len(parts) != 2:
raise BadRequest("Invalid Authorization header format")
response = jsonify({"error": "Invalid Authorization header format"})
response.status_code = 401
response.headers["WWW-Authenticate"] = "Bearer"
return response


token_type = parts[0].strip() token_type = parts[0].strip()
if token_type.lower() != "bearer": if token_type.lower() != "bearer":
raise BadRequest("token_type is invalid")
response = jsonify({"error": "token_type is invalid"})
response.status_code = 401
response.headers["WWW-Authenticate"] = "Bearer"
return response


access_token = parts[1].strip() access_token = parts[1].strip()
if not access_token: if not access_token:
raise BadRequest("access_token is required")
response = jsonify({"error": "access_token is required"})
response.status_code = 401
response.headers["WWW-Authenticate"] = "Bearer"
return response


account = OAuthServerService.validate_oauth_access_token(oauth_provider_app.client_id, access_token) account = OAuthServerService.validate_oauth_access_token(oauth_provider_app.client_id, access_token)
if not account: if not account:
raise BadRequest("access_token or client_id is invalid")
response = jsonify({"error": "access_token or client_id is invalid"})
response.status_code = 401
response.headers["WWW-Authenticate"] = "Bearer"
return response


kwargs["account"] = account kwargs["account"] = account



+ 1
- 3
api/core/app/apps/advanced_chat/generate_task_pipeline.py Dosyayı Görüntüle

err = self._base_task_pipeline._handle_error(event=event, session=session, message_id=self._message_id) err = self._base_task_pipeline._handle_error(event=event, session=session, message_id=self._message_id)
yield self._base_task_pipeline._error_to_stream_response(err) yield self._base_task_pipeline._error_to_stream_response(err)


def _handle_workflow_started_event(
self, event: QueueWorkflowStartedEvent, **kwargs
) -> Generator[StreamResponse, None, None]:
def _handle_workflow_started_event(self, *args, **kwargs) -> Generator[StreamResponse, None, None]:
"""Handle workflow started events.""" """Handle workflow started events."""
with self._database_session() as session: with self._database_session() as session:
workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start() workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start()

+ 3
- 2
api/core/app/task_pipeline/message_cycle_manager.py Dosyayı Görüntüle



# generate conversation name # generate conversation name
try: try:
name = LLMGenerator.generate_conversation_name(app_model.tenant_id, query)
name = LLMGenerator.generate_conversation_name(
app_model.tenant_id, query, conversation_id, conversation.app_id
)
conversation.name = name conversation.name = name
except Exception: except Exception:
if dify_config.DEBUG: if dify_config.DEBUG:
logger.exception("generate conversation name failed, conversation_id: %s", conversation_id) logger.exception("generate conversation name failed, conversation_id: %s", conversation_id)
pass


db.session.merge(conversation) db.session.merge(conversation)
db.session.commit() db.session.commit()

+ 8
- 8
api/core/entities/provider_configuration.py Dosyayı Görüntüle

:return: :return:
""" """
with Session(db.engine) as session: with Session(db.engine) as session:
if credential_name and self._check_provider_credential_name_exists(
credential_name=credential_name, session=session
):
raise ValueError(f"Credential with name '{credential_name}' already exists.")
if credential_name:
if self._check_provider_credential_name_exists(credential_name=credential_name, session=session):
raise ValueError(f"Credential with name '{credential_name}' already exists.")
else: else:
credential_name = self._generate_provider_credential_name(session) credential_name = self._generate_provider_credential_name(session)


:return: :return:
""" """
with Session(db.engine) as session: with Session(db.engine) as session:
if credential_name and self._check_custom_model_credential_name_exists(
model=model, model_type=model_type, credential_name=credential_name, session=session
):
raise ValueError(f"Model credential with name '{credential_name}' already exists for {model}.")
if credential_name:
if self._check_custom_model_credential_name_exists(
model=model, model_type=model_type, credential_name=credential_name, session=session
):
raise ValueError(f"Model credential with name '{credential_name}' already exists for {model}.")
else: else:
credential_name = self._generate_custom_model_credential_name( credential_name = self._generate_custom_model_credential_name(
model=model, model_type=model_type, session=session model=model, model_type=model_type, session=session

+ 4
- 0
api/core/mcp/client/streamable_client.py Dosyayı Görüntüle

logger.debug("Received 202 Accepted") logger.debug("Received 202 Accepted")
return return


if response.status_code == 204:
logger.debug("Received 204 No Content")
return

if response.status_code == 404: if response.status_code == 404:
if isinstance(message.root, JSONRPCRequest): if isinstance(message.root, JSONRPCRequest):
self._send_session_terminated_error( self._send_session_terminated_error(

+ 1
- 1
api/core/mcp/server/streamable_http.py Dosyayı Görüntüle

parameters[item.variable]["type"] = "string" parameters[item.variable]["type"] = "string"
parameters[item.variable]["enum"] = item.options parameters[item.variable]["enum"] = item.options
elif item.type == VariableEntityType.NUMBER: elif item.type == VariableEntityType.NUMBER:
parameters[item.variable]["type"] = "float"
parameters[item.variable]["type"] = "number"
return parameters, required return parameters, required

+ 1
- 1
api/core/model_runtime/README.md Dosyayı Görüntüle



## Features ## Features


- Supports capability invocation for 5 types of models
- Supports capability invocation for 6 types of models


- `LLM` - LLM text completion, dialogue, pre-computed tokens capability - `LLM` - LLM text completion, dialogue, pre-computed tokens capability
- `Text Embedding Model` - Text Embedding, pre-computed tokens capability - `Text Embedding Model` - Text Embedding, pre-computed tokens capability

+ 1
- 1
api/core/model_runtime/README_CN.md Dosyayı Görüntüle



## 功能介绍 ## 功能介绍


- 支持 5 种模型类型的能力调用
- 支持 6 种模型类型的能力调用


- `LLM` - LLM 文本补全、对话,预计算 tokens 能力 - `LLM` - LLM 文本补全、对话,预计算 tokens 能力
- `Text Embedding Model` - 文本 Embedding,预计算 tokens 能力 - `Text Embedding Model` - 文本 Embedding,预计算 tokens 能力

+ 7
- 5
api/core/plugin/backwards_invocation/app.py Dosyayı Görüntüle

from typing import Optional, Union from typing import Optional, Union


from sqlalchemy import select from sqlalchemy import select
from sqlalchemy.orm import Session


from controllers.service_api.wraps import create_or_update_end_user_for_user_id from controllers.service_api.wraps import create_or_update_end_user_for_user_id
from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict
""" """
get the user by user id get the user by user id
""" """
stmt = select(EndUser).where(EndUser.id == user_id)
user = db.session.scalar(stmt)
if not user:
stmt = select(Account).where(Account.id == user_id)
user = db.session.scalar(stmt)
with Session(db.engine, expire_on_commit=False) as session:
stmt = select(EndUser).where(EndUser.id == user_id)
user = session.scalar(stmt)
if not user:
stmt = select(Account).where(Account.id == user_id)
user = session.scalar(stmt)


if not user: if not user:
raise ValueError("user not found") raise ValueError("user not found")

+ 33
- 23
api/core/provider_manager.py Dosyayı Görüntüle

tenant_id tenant_id
) )


# Get All provider model credentials
provider_name_to_provider_model_credentials_dict = self._get_all_provider_model_credentials(tenant_id)

provider_configurations = ProviderConfigurations(tenant_id=tenant_id) provider_configurations = ProviderConfigurations(tenant_id=tenant_id)


# Construct ProviderConfiguration objects for each provider # Construct ProviderConfiguration objects for each provider
provider_model_records.extend( provider_model_records.extend(
provider_name_to_provider_model_records_dict.get(provider_id_entity.provider_name, []) provider_name_to_provider_model_records_dict.get(provider_id_entity.provider_name, [])
) )
provider_model_credentials = provider_name_to_provider_model_credentials_dict.get(
provider_entity.provider, []
)
provider_id_entity = ModelProviderID(provider_name)
if provider_id_entity.is_langgenius():
provider_model_credentials.extend(
provider_name_to_provider_model_credentials_dict.get(provider_id_entity.provider_name, [])
)


# Convert to custom configuration # Convert to custom configuration
custom_configuration = self._to_custom_configuration( custom_configuration = self._to_custom_configuration(
tenant_id, provider_entity, provider_records, provider_model_records
tenant_id, provider_entity, provider_records, provider_model_records, provider_model_credentials
) )


# Convert to system configuration # Convert to system configuration
) )
return provider_name_to_provider_model_settings_dict return provider_name_to_provider_model_settings_dict


@staticmethod
def _get_all_provider_model_credentials(tenant_id: str) -> dict[str, list[ProviderModelCredential]]:
"""
Get All provider model credentials of the workspace.

:param tenant_id: workspace id
:return:
"""
provider_name_to_provider_model_credentials_dict = defaultdict(list)
with Session(db.engine, expire_on_commit=False) as session:
stmt = select(ProviderModelCredential).where(ProviderModelCredential.tenant_id == tenant_id)
provider_model_credentials = session.scalars(stmt)
for provider_model_credential in provider_model_credentials:
provider_name_to_provider_model_credentials_dict[provider_model_credential.provider_name].append(
provider_model_credential
)
return provider_name_to_provider_model_credentials_dict

@staticmethod @staticmethod
def _get_all_provider_load_balancing_configs(tenant_id: str) -> dict[str, list[LoadBalancingModelConfig]]: def _get_all_provider_load_balancing_configs(tenant_id: str) -> dict[str, list[LoadBalancingModelConfig]]:
""" """
for credential in available_credentials for credential in available_credentials
] ]


@staticmethod
def get_credentials_from_provider_model(tenant_id: str, provider_name: str) -> Sequence[ProviderModelCredential]:
"""
Get all the credentials records from ProviderModelCredential by provider_name

:param tenant_id: workspace id
:param provider_name: provider name

"""
with Session(db.engine, expire_on_commit=False) as session:
stmt = select(ProviderModelCredential).where(
ProviderModelCredential.tenant_id == tenant_id, ProviderModelCredential.provider_name == provider_name
)

all_credentials = session.scalars(stmt).all()
return all_credentials

@staticmethod @staticmethod
def _init_trial_provider_records( def _init_trial_provider_records(
tenant_id: str, provider_name_to_provider_records_dict: dict[str, list[Provider]] tenant_id: str, provider_name_to_provider_records_dict: dict[str, list[Provider]]
provider_entity: ProviderEntity, provider_entity: ProviderEntity,
provider_records: list[Provider], provider_records: list[Provider],
provider_model_records: list[ProviderModel], provider_model_records: list[ProviderModel],
provider_model_credentials: list[ProviderModelCredential],
) -> CustomConfiguration: ) -> CustomConfiguration:
""" """
Convert to custom configuration. Convert to custom configuration.
tenant_id, provider_entity, provider_records tenant_id, provider_entity, provider_records
) )


# Get all model credentials once
all_model_credentials = self.get_credentials_from_provider_model(tenant_id, provider_entity.provider)

# Get custom models which have not been added to the model list yet # Get custom models which have not been added to the model list yet
unadded_models = self._get_can_added_models(provider_model_records, all_model_credentials)
unadded_models = self._get_can_added_models(provider_model_records, provider_model_credentials)


# Get custom model configurations # Get custom model configurations
custom_model_configurations = self._get_custom_model_configurations( custom_model_configurations = self._get_custom_model_configurations(
tenant_id, provider_entity, provider_model_records, unadded_models, all_model_credentials
tenant_id, provider_entity, provider_model_records, unadded_models, provider_model_credentials
) )


can_added_models = [ can_added_models = [

+ 2
- 2
api/core/rag/datasource/vdb/myscale/myscale_vector.py Dosyayı Görüntüle

) )
for r in self._client.query(sql).named_results() for r in self._client.query(sql).named_results()
] ]
except Exception as e:
logger.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
except Exception:
logger.exception("Vector search operation failed")
return [] return []


def delete(self) -> None: def delete(self) -> None:

+ 8
- 2
api/core/rag/datasource/vdb/tablestore/tablestore_vector.py Dosyayı Görüntüle

import json import json
import logging import logging
import math import math
from collections.abc import Iterable
from typing import Any, Optional from typing import Any, Optional


import tablestore # type: ignore import tablestore # type: ignore
return uuids return uuids


def text_exists(self, id: str) -> bool: def text_exists(self, id: str) -> bool:
_, return_row, _ = self._tablestore_client.get_row(
result = self._tablestore_client.get_row(
table_name=self._table_name, primary_key=[("id", id)], columns_to_get=["id"] table_name=self._table_name, primary_key=[("id", id)], columns_to_get=["id"]
) )
assert isinstance(result, tuple | list)
# Unpack the tuple result
_, return_row, _ = result


return return_row is not None return return_row is not None




def _create_search_index_if_not_exist(self, dimension: int) -> None: def _create_search_index_if_not_exist(self, dimension: int) -> None:
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name) search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
assert isinstance(search_index_list, Iterable)
if self._index_name in [t[1] for t in search_index_list]: if self._index_name in [t[1] for t in search_index_list]:
logger.info("Tablestore system index[%s] already exists", self._index_name) logger.info("Tablestore system index[%s] already exists", self._index_name)
return None return None


def _delete_table_if_exist(self): def _delete_table_if_exist(self):
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name) search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
assert isinstance(search_index_list, Iterable)
for resp_tuple in search_index_list: for resp_tuple in search_index_list:
self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1]) self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1])
logger.info("Tablestore delete index[%s] successfully.", self._index_name) logger.info("Tablestore delete index[%s] successfully.", self._index_name)
) )


if search_response is not None: if search_response is not None:
rows.extend([row[0][0][1] for row in search_response.rows])
rows.extend([row[0][0][1] for row in list(search_response.rows)])


if search_response is None or search_response.next_token == b"": if search_response is None or search_response.next_token == b"":
break break

+ 0
- 7
api/core/rag/datasource/vdb/weaviate/weaviate_vector.py Dosyayı Görüntüle



weaviate.connect.connection.has_grpc = False # ty: ignore [unresolved-attribute] weaviate.connect.connection.has_grpc = False # ty: ignore [unresolved-attribute]


# Fix to minimize the performance impact of the deprecation check in weaviate-client 3.24.0,
# by changing the connection timeout to pypi.org from 1 second to 0.001 seconds.
# TODO: This can be removed once weaviate-client is updated to 3.26.7 or higher,
# which does not contain the deprecation check.
if hasattr(weaviate.connect.connection, "PYPI_TIMEOUT"): # ty: ignore [unresolved-attribute]
weaviate.connect.connection.PYPI_TIMEOUT = 0.001 # ty: ignore [unresolved-attribute]

try: try:
client = weaviate.Client( client = weaviate.Client(
url=config.endpoint, auth_client_secret=auth_config, timeout_config=(5, 60), startup_period=None url=config.endpoint, auth_client_secret=auth_config, timeout_config=(5, 60), startup_period=None

+ 126
- 19
api/core/workflow/entities/graph_runtime_state.py Dosyayı Görüntüle

from copy import deepcopy
from typing import Any from typing import Any


from pydantic import BaseModel, Field
from pydantic import BaseModel, PrivateAttr


from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.entities.llm_entities import LLMUsage






class GraphRuntimeState(BaseModel): class GraphRuntimeState(BaseModel):
variable_pool: VariablePool = Field(..., description="variable pool")
"""variable pool"""

start_at: float = Field(..., description="start time")
"""start time"""
total_tokens: int = 0
"""total tokens"""
llm_usage: LLMUsage = LLMUsage.empty_usage()
"""llm usage info"""

# The `outputs` field stores the final output values generated by executing workflows or chatflows.
#
# Note: Since the type of this field is `dict[str, Any]`, its values may not remain consistent
# after a serialization and deserialization round trip.
outputs: dict[str, Any] = Field(default_factory=dict)

node_run_steps: int = 0
"""node run steps"""
# Private attributes to prevent direct modification
_variable_pool: VariablePool = PrivateAttr()
_start_at: float = PrivateAttr()
_total_tokens: int = PrivateAttr(default=0)
_llm_usage: LLMUsage = PrivateAttr(default_factory=LLMUsage.empty_usage)
_outputs: dict[str, Any] = PrivateAttr(default_factory=dict)
_node_run_steps: int = PrivateAttr(default=0)

def __init__(
self,
variable_pool: VariablePool,
start_at: float,
total_tokens: int = 0,
llm_usage: LLMUsage | None = None,
outputs: dict[str, Any] | None = None,
node_run_steps: int = 0,
**kwargs,
):
"""Initialize the GraphRuntimeState with validation."""
super().__init__(**kwargs)

# Initialize private attributes with validation
self._variable_pool = variable_pool

self._start_at = start_at

if total_tokens < 0:
raise ValueError("total_tokens must be non-negative")
self._total_tokens = total_tokens

if llm_usage is None:
llm_usage = LLMUsage.empty_usage()
self._llm_usage = llm_usage

if outputs is None:
outputs = {}
self._outputs = deepcopy(outputs)

if node_run_steps < 0:
raise ValueError("node_run_steps must be non-negative")
self._node_run_steps = node_run_steps

@property
def variable_pool(self) -> VariablePool:
"""Get the variable pool."""
return self._variable_pool

@property
def start_at(self) -> float:
"""Get the start time."""
return self._start_at

@start_at.setter
def start_at(self, value: float) -> None:
"""Set the start time."""
self._start_at = value

@property
def total_tokens(self) -> int:
"""Get the total tokens count."""
return self._total_tokens

@total_tokens.setter
def total_tokens(self, value: int):
"""Set the total tokens count."""
if value < 0:
raise ValueError("total_tokens must be non-negative")
self._total_tokens = value

@property
def llm_usage(self) -> LLMUsage:
"""Get the LLM usage info."""
# Return a copy to prevent external modification
return self._llm_usage.model_copy()

@llm_usage.setter
def llm_usage(self, value: LLMUsage):
"""Set the LLM usage info."""
self._llm_usage = value.model_copy()

@property
def outputs(self) -> dict[str, Any]:
"""Get a copy of the outputs dictionary."""
return deepcopy(self._outputs)

@outputs.setter
def outputs(self, value: dict[str, Any]) -> None:
"""Set the outputs dictionary."""
self._outputs = deepcopy(value)

def set_output(self, key: str, value: Any) -> None:
"""Set a single output value."""
self._outputs[key] = deepcopy(value)

def get_output(self, key: str, default: Any = None) -> Any:
"""Get a single output value."""
return deepcopy(self._outputs.get(key, default))

def update_outputs(self, updates: dict[str, Any]) -> None:
"""Update multiple output values."""
for key, value in updates.items():
self._outputs[key] = deepcopy(value)

@property
def node_run_steps(self) -> int:
"""Get the node run steps count."""
return self._node_run_steps

@node_run_steps.setter
def node_run_steps(self, value: int) -> None:
"""Set the node run steps count."""
if value < 0:
raise ValueError("node_run_steps must be non-negative")
self._node_run_steps = value

def increment_node_run_steps(self) -> None:
"""Increment the node run steps by 1."""
self._node_run_steps += 1

def add_tokens(self, tokens: int) -> None:
"""Add tokens to the total count."""
if tokens < 0:
raise ValueError("tokens must be non-negative")
self._total_tokens += tokens

+ 12
- 1
api/core/workflow/graph/__init__.py Dosyayı Görüntüle

from .edge import Edge from .edge import Edge
from .graph import Graph, NodeFactory from .graph import Graph, NodeFactory
from .graph_runtime_state_protocol import ReadOnlyGraphRuntimeState, ReadOnlyVariablePool
from .graph_template import GraphTemplate from .graph_template import GraphTemplate
from .read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper, ReadOnlyVariablePoolWrapper


__all__ = ["Edge", "Graph", "GraphTemplate", "NodeFactory"]
__all__ = [
"Edge",
"Graph",
"GraphTemplate",
"NodeFactory",
"ReadOnlyGraphRuntimeState",
"ReadOnlyGraphRuntimeStateWrapper",
"ReadOnlyVariablePool",
"ReadOnlyVariablePoolWrapper",
]

+ 59
- 0
api/core/workflow/graph/graph_runtime_state_protocol.py Dosyayı Görüntüle

from typing import Any, Protocol

from core.model_runtime.entities.llm_entities import LLMUsage


class ReadOnlyVariablePool(Protocol):
"""Read-only interface for VariablePool."""

def get(self, node_id: str, variable_key: str) -> Any:
"""Get a variable value (read-only)."""
...

def get_all_by_node(self, node_id: str) -> dict[str, Any]:
"""Get all variables for a node (read-only)."""
...


class ReadOnlyGraphRuntimeState(Protocol):
"""
Read-only view of GraphRuntimeState for layers.

This protocol defines a read-only interface that prevents layers from
modifying the graph runtime state while still allowing observation.
All methods return defensive copies to ensure immutability.
"""

@property
def variable_pool(self) -> ReadOnlyVariablePool:
"""Get read-only access to the variable pool."""
...

@property
def start_at(self) -> float:
"""Get the start time (read-only)."""
...

@property
def total_tokens(self) -> int:
"""Get the total tokens count (read-only)."""
...

@property
def llm_usage(self) -> LLMUsage:
"""Get a copy of LLM usage info (read-only)."""
...

@property
def outputs(self) -> dict[str, Any]:
"""Get a defensive copy of outputs (read-only)."""
...

@property
def node_run_steps(self) -> int:
"""Get the node run steps count (read-only)."""
...

def get_output(self, key: str, default: Any = None) -> Any:
"""Get a single output value (returns a copy)."""
...

+ 76
- 0
api/core/workflow/graph/read_only_state_wrapper.py Dosyayı Görüntüle

from copy import deepcopy
from typing import Any

from core.model_runtime.entities.llm_entities import LLMUsage
from core.workflow.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.entities.variable_pool import VariablePool


class ReadOnlyVariablePoolWrapper:
"""Wrapper that provides read-only access to VariablePool."""

def __init__(self, variable_pool: VariablePool):
self._variable_pool = variable_pool

def get(self, node_id: str, variable_key: str) -> Any:
"""Get a variable value (returns a defensive copy)."""
value = self._variable_pool.get(node_id, variable_key)
return deepcopy(value) if value is not None else None

def get_all_by_node(self, node_id: str) -> dict[str, Any]:
"""Get all variables for a node (returns defensive copies)."""
variables = {}
if node_id in self._variable_pool.variable_dictionary:
for key, var in self._variable_pool.variable_dictionary[node_id].items():
# FIXME(-LAN-): Handle the actual Variable object structure
value = var.value if hasattr(var, "value") else var
variables[key] = deepcopy(value)
return variables


class ReadOnlyGraphRuntimeStateWrapper:
"""
Wrapper that provides read-only access to GraphRuntimeState.

This wrapper ensures that layers can observe the state without
modifying it. All returned values are defensive copies.
"""

def __init__(self, state: GraphRuntimeState):
self._state = state
self._variable_pool_wrapper = ReadOnlyVariablePoolWrapper(state.variable_pool)

@property
def variable_pool(self) -> ReadOnlyVariablePoolWrapper:
"""Get read-only access to the variable pool."""
return self._variable_pool_wrapper

@property
def start_at(self) -> float:
"""Get the start time (read-only)."""
return self._state.start_at

@property
def total_tokens(self) -> int:
"""Get the total tokens count (read-only)."""
return self._state.total_tokens

@property
def llm_usage(self) -> LLMUsage:
"""Get a copy of LLM usage info (read-only)."""
# Return a copy to prevent modification
return self._state.llm_usage.model_copy()

@property
def outputs(self) -> dict[str, Any]:
"""Get a defensive copy of outputs (read-only)."""
return deepcopy(self._state.outputs)

@property
def node_run_steps(self) -> int:
"""Get the node run steps count (read-only)."""
return self._state.node_run_steps

def get_output(self, key: str, default: Any = None) -> Any:
"""Get a single output value (returns a copy)."""
return self._state.get_output(key, default)

+ 4
- 4
api/core/workflow/graph_engine/event_management/event_handlers.py Dosyayı Görüntüle

# in runtime state, rather than allowing nodes to directly access runtime state. # in runtime state, rather than allowing nodes to directly access runtime state.
for key, value in event.node_run_result.outputs.items(): for key, value in event.node_run_result.outputs.items():
if key == "answer": if key == "answer":
existing = self._graph_runtime_state.outputs.get("answer", "")
existing = self._graph_runtime_state.get_output("answer", "")
if existing: if existing:
self._graph_runtime_state.outputs["answer"] = f"{existing}{value}"
self._graph_runtime_state.set_output("answer", f"{existing}{value}")
else: else:
self._graph_runtime_state.outputs["answer"] = value
self._graph_runtime_state.set_output("answer", value)
else: else:
self._graph_runtime_state.outputs[key] = value
self._graph_runtime_state.set_output(key, value)

+ 3
- 3
api/core/workflow/graph_engine/event_management/event_manager.py Dosyayı Görüntüle



from core.workflow.graph_events import GraphEngineEvent from core.workflow.graph_events import GraphEngineEvent


from ..layers.base import Layer
from ..layers.base import GraphEngineLayer




@final @final
"""Initialize the event manager.""" """Initialize the event manager."""
self._events: list[GraphEngineEvent] = [] self._events: list[GraphEngineEvent] = []
self._lock = ReadWriteLock() self._lock = ReadWriteLock()
self._layers: list[Layer] = []
self._layers: list[GraphEngineLayer] = []
self._execution_complete = threading.Event() self._execution_complete = threading.Event()


def set_layers(self, layers: list[Layer]) -> None:
def set_layers(self, layers: list[GraphEngineLayer]) -> None:
""" """
Set the layers to notify on event collection. Set the layers to notify on event collection.



+ 9
- 6
api/core/workflow/graph_engine/graph_engine.py Dosyayı Görüntüle

from core.workflow.entities import GraphRuntimeState from core.workflow.entities import GraphRuntimeState
from core.workflow.enums import NodeExecutionType from core.workflow.enums import NodeExecutionType
from core.workflow.graph import Graph from core.workflow.graph import Graph
from core.workflow.graph.read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper
from core.workflow.graph_events import ( from core.workflow.graph_events import (
GraphEngineEvent, GraphEngineEvent,
GraphNodeEventBase, GraphNodeEventBase,
from .error_handling import ErrorHandler from .error_handling import ErrorHandler
from .event_management import EventHandler, EventManager from .event_management import EventHandler, EventManager
from .graph_traversal import EdgeProcessor, SkipPropagator from .graph_traversal import EdgeProcessor, SkipPropagator
from .layers.base import Layer
from .layers.base import GraphEngineLayer
from .orchestration import Dispatcher, ExecutionCoordinator from .orchestration import Dispatcher, ExecutionCoordinator
from .protocols.command_channel import CommandChannel from .protocols.command_channel import CommandChannel
from .response_coordinator import ResponseStreamCoordinator from .response_coordinator import ResponseStreamCoordinator
from .state_management import UnifiedStateManager from .state_management import UnifiedStateManager
from .worker_management import SimpleWorkerPool
from .worker_management import WorkerPool


logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)


context_vars = contextvars.copy_context() context_vars = contextvars.copy_context()


# Create worker pool for parallel node execution # Create worker pool for parallel node execution
self._worker_pool = SimpleWorkerPool(
self._worker_pool = WorkerPool(
ready_queue=self._ready_queue, ready_queue=self._ready_queue,
event_queue=self._event_queue, event_queue=self._event_queue,
graph=self._graph, graph=self._graph,


# === Extensibility === # === Extensibility ===
# Layers allow plugins to extend engine functionality # Layers allow plugins to extend engine functionality
self._layers: list[Layer] = []
self._layers: list[GraphEngineLayer] = []


# === Validation === # === Validation ===
# Ensure all nodes share the same GraphRuntimeState instance # Ensure all nodes share the same GraphRuntimeState instance
if id(node.graph_runtime_state) != expected_state_id: if id(node.graph_runtime_state) != expected_state_id:
raise ValueError(f"GraphRuntimeState consistency violation: Node '{node.id}' has a different instance") raise ValueError(f"GraphRuntimeState consistency violation: Node '{node.id}' has a different instance")


def layer(self, layer: Layer) -> "GraphEngine":
def layer(self, layer: GraphEngineLayer) -> "GraphEngine":
"""Add a layer for extending functionality.""" """Add a layer for extending functionality."""
self._layers.append(layer) self._layers.append(layer)
return self return self
def _initialize_layers(self) -> None: def _initialize_layers(self) -> None:
"""Initialize layers with context.""" """Initialize layers with context."""
self._event_manager.set_layers(self._layers) self._event_manager.set_layers(self._layers)
# Create a read-only wrapper for the runtime state
read_only_state = ReadOnlyGraphRuntimeStateWrapper(self._graph_runtime_state)
for layer in self._layers: for layer in self._layers:
try: try:
layer.initialize(self._graph_runtime_state, self._command_channel)
layer.initialize(read_only_state, self._command_channel)
except Exception as e: except Exception as e:
logger.warning("Failed to initialize layer %s: %s", layer.__class__.__name__, e) logger.warning("Failed to initialize layer %s: %s", layer.__class__.__name__, e)



+ 2
- 2
api/core/workflow/graph_engine/layers/__init__.py Dosyayı Görüntüle

with middleware-like components that can observe events and interact with execution. with middleware-like components that can observe events and interact with execution.
""" """


from .base import Layer
from .base import GraphEngineLayer
from .debug_logging import DebugLoggingLayer from .debug_logging import DebugLoggingLayer
from .execution_limits import ExecutionLimitsLayer from .execution_limits import ExecutionLimitsLayer


__all__ = [ __all__ = [
"DebugLoggingLayer", "DebugLoggingLayer",
"ExecutionLimitsLayer", "ExecutionLimitsLayer",
"Layer",
"GraphEngineLayer",
] ]

+ 8
- 8
api/core/workflow/graph_engine/layers/base.py Dosyayı Görüntüle



from abc import ABC, abstractmethod from abc import ABC, abstractmethod


from core.workflow.entities import GraphRuntimeState
from core.workflow.graph.graph_runtime_state_protocol import ReadOnlyGraphRuntimeState
from core.workflow.graph_engine.protocols.command_channel import CommandChannel from core.workflow.graph_engine.protocols.command_channel import CommandChannel
from core.workflow.graph_events import GraphEngineEvent from core.workflow.graph_events import GraphEngineEvent




class Layer(ABC):
class GraphEngineLayer(ABC):
""" """
Abstract base class for GraphEngine layers. Abstract base class for GraphEngine layers.




def __init__(self) -> None: def __init__(self) -> None:
"""Initialize the layer. Subclasses can override with custom parameters.""" """Initialize the layer. Subclasses can override with custom parameters."""
self.graph_runtime_state: GraphRuntimeState | None = None
self.graph_runtime_state: ReadOnlyGraphRuntimeState | None = None
self.command_channel: CommandChannel | None = None self.command_channel: CommandChannel | None = None


def initialize(self, graph_runtime_state: GraphRuntimeState, command_channel: CommandChannel) -> None:
def initialize(self, graph_runtime_state: ReadOnlyGraphRuntimeState, command_channel: CommandChannel) -> None:
""" """
Initialize the layer with engine dependencies. Initialize the layer with engine dependencies.


Called by GraphEngine before execution starts to inject the runtime state
and command channel. This allows layers to access engine context and send
commands.
Called by GraphEngine before execution starts to inject the read-only runtime state
and command channel. This allows layers to observe engine context and send
commands, but prevents direct state modification.


Args: Args:
graph_runtime_state: The runtime state of the graph execution
graph_runtime_state: Read-only view of the runtime state
command_channel: Channel for sending commands to the engine command_channel: Channel for sending commands to the engine
""" """
self.graph_runtime_state = graph_runtime_state self.graph_runtime_state = graph_runtime_state

+ 2
- 2
api/core/workflow/graph_engine/layers/debug_logging.py Dosyayı Görüntüle

NodeRunSucceededEvent, NodeRunSucceededEvent,
) )


from .base import Layer
from .base import GraphEngineLayer




@final @final
class DebugLoggingLayer(Layer):
class DebugLoggingLayer(GraphEngineLayer):
""" """
A layer that provides comprehensive logging of GraphEngine execution. A layer that provides comprehensive logging of GraphEngine execution.



+ 2
- 2
api/core/workflow/graph_engine/layers/execution_limits.py Dosyayı Görüntüle

from typing_extensions import override from typing_extensions import override


from core.workflow.graph_engine.entities.commands import AbortCommand, CommandType from core.workflow.graph_engine.entities.commands import AbortCommand, CommandType
from core.workflow.graph_engine.layers import Layer
from core.workflow.graph_engine.layers import GraphEngineLayer
from core.workflow.graph_events import ( from core.workflow.graph_events import (
GraphEngineEvent, GraphEngineEvent,
NodeRunStartedEvent, NodeRunStartedEvent,




@final @final
class ExecutionLimitsLayer(Layer):
class ExecutionLimitsLayer(GraphEngineLayer):
""" """
Layer that enforces execution limits for workflows. Layer that enforces execution limits for workflows.



+ 2
- 2
api/core/workflow/graph_engine/orchestration/execution_coordinator.py Dosyayı Görüntüle

from ..domain import GraphExecution from ..domain import GraphExecution
from ..event_management import EventManager from ..event_management import EventManager
from ..state_management import UnifiedStateManager from ..state_management import UnifiedStateManager
from ..worker_management import SimpleWorkerPool
from ..worker_management import WorkerPool


if TYPE_CHECKING: if TYPE_CHECKING:
from ..event_management import EventHandler from ..event_management import EventHandler
event_handler: "EventHandler", event_handler: "EventHandler",
event_collector: EventManager, event_collector: EventManager,
command_processor: CommandProcessor, command_processor: CommandProcessor,
worker_pool: SimpleWorkerPool,
worker_pool: WorkerPool,
) -> None: ) -> None:
""" """
Initialize the execution coordinator. Initialize the execution coordinator.

+ 16
- 14
api/core/workflow/graph_engine/worker.py Dosyayı Görüntüle

import queue import queue
import threading import threading
import time import time
from collections.abc import Callable
from datetime import datetime from datetime import datetime
from typing import final from typing import final
from uuid import uuid4 from uuid import uuid4
worker_id: int = 0, worker_id: int = 0,
flask_app: Flask | None = None, flask_app: Flask | None = None,
context_vars: contextvars.Context | None = None, context_vars: contextvars.Context | None = None,
on_idle_callback: Callable[[int], None] | None = None,
on_active_callback: Callable[[int], None] | None = None,
) -> None: ) -> None:
""" """
Initialize worker thread. Initialize worker thread.
worker_id: Unique identifier for this worker worker_id: Unique identifier for this worker
flask_app: Optional Flask application for context preservation flask_app: Optional Flask application for context preservation
context_vars: Optional context variables to preserve in worker thread context_vars: Optional context variables to preserve in worker thread
on_idle_callback: Optional callback when worker becomes idle
on_active_callback: Optional callback when worker becomes active
""" """
super().__init__(name=f"GraphWorker-{worker_id}", daemon=True) super().__init__(name=f"GraphWorker-{worker_id}", daemon=True)
self._ready_queue = ready_queue self._ready_queue = ready_queue
self._flask_app = flask_app self._flask_app = flask_app
self._context_vars = context_vars self._context_vars = context_vars
self._stop_event = threading.Event() self._stop_event = threading.Event()
self._on_idle_callback = on_idle_callback
self._on_active_callback = on_active_callback
self._last_task_time = time.time() self._last_task_time = time.time()


def stop(self) -> None: def stop(self) -> None:
"""Signal the worker to stop processing.""" """Signal the worker to stop processing."""
self._stop_event.set() self._stop_event.set()


@property
def is_idle(self) -> bool:
"""Check if the worker is currently idle."""
# Worker is idle if it hasn't processed a task recently (within 0.2 seconds)
return (time.time() - self._last_task_time) > 0.2

@property
def idle_duration(self) -> float:
"""Get the duration in seconds since the worker last processed a task."""
return time.time() - self._last_task_time

@property
def worker_id(self) -> int:
"""Get the worker's ID."""
return self._worker_id

@override @override
def run(self) -> None: def run(self) -> None:
""" """
try: try:
node_id = self._ready_queue.get(timeout=0.1) node_id = self._ready_queue.get(timeout=0.1)
except queue.Empty: except queue.Empty:
# Notify that worker is idle
if self._on_idle_callback:
self._on_idle_callback(self._worker_id)
continue continue


# Notify that worker is active
if self._on_active_callback:
self._on_active_callback(self._worker_id)

self._last_task_time = time.time() self._last_task_time = time.time()
node = self._graph.nodes[node_id] node = self._graph.nodes[node_id]
try: try:

+ 2
- 2
api/core/workflow/graph_engine/worker_management/__init__.py Dosyayı Görüntüle

scaling, and activity tracking. scaling, and activity tracking.
""" """


from .simple_worker_pool import SimpleWorkerPool
from .worker_pool import WorkerPool


__all__ = [ __all__ = [
"SimpleWorkerPool",
"WorkerPool",
] ]

api/core/workflow/graph_engine/worker_management/simple_worker_pool.py → api/core/workflow/graph_engine/worker_management/worker_pool.py Dosyayı Görüntüle

DynamicScaler, and WorkerFactory into a single class. DynamicScaler, and WorkerFactory into a single class.
""" """


import logging
import queue import queue
import threading import threading
from typing import TYPE_CHECKING, final from typing import TYPE_CHECKING, final


from ..worker import Worker from ..worker import Worker


logger = logging.getLogger(__name__)

if TYPE_CHECKING: if TYPE_CHECKING:
from contextvars import Context from contextvars import Context






@final @final
class SimpleWorkerPool:
class WorkerPool:
""" """
Simple worker pool with integrated management. Simple worker pool with integrated management.


self._lock = threading.RLock() self._lock = threading.RLock()
self._running = False self._running = False


# No longer tracking worker states with callbacks to avoid lock contention

def start(self, initial_count: int | None = None) -> None: def start(self, initial_count: int | None = None) -> None:
""" """
Start the worker pool. Start the worker pool.
else: else:
initial_count = min(self._min_workers + 2, self._max_workers) initial_count = min(self._min_workers + 2, self._max_workers)


logger.debug(
"Starting worker pool: %d workers (nodes=%d, min=%d, max=%d)",
initial_count,
node_count,
self._min_workers,
self._max_workers,
)

# Create initial workers # Create initial workers
for _ in range(initial_count): for _ in range(initial_count):
self._create_worker() self._create_worker()
"""Stop all workers in the pool.""" """Stop all workers in the pool."""
with self._lock: with self._lock:
self._running = False self._running = False
worker_count = len(self._workers)

if worker_count > 0:
logger.debug("Stopping worker pool: %d workers", worker_count)


# Stop all workers # Stop all workers
for worker in self._workers: for worker in self._workers:
worker.start() worker.start()
self._workers.append(worker) self._workers.append(worker)


def _remove_worker(self, worker: Worker, worker_id: int) -> None:
"""Remove a specific worker from the pool."""
# Stop the worker
worker.stop()

# Wait for it to finish
if worker.is_alive():
worker.join(timeout=2.0)

# Remove from list
if worker in self._workers:
self._workers.remove(worker)

def _try_scale_up(self, queue_depth: int, current_count: int) -> bool:
"""
Try to scale up workers if needed.

Args:
queue_depth: Current queue depth
current_count: Current number of workers

Returns:
True if scaled up, False otherwise
"""
if queue_depth > self._scale_up_threshold and current_count < self._max_workers:
old_count = current_count
self._create_worker()

logger.debug(
"Scaled up workers: %d -> %d (queue_depth=%d exceeded threshold=%d)",
old_count,
len(self._workers),
queue_depth,
self._scale_up_threshold,
)
return True
return False

def _try_scale_down(self, queue_depth: int, current_count: int, active_count: int, idle_count: int) -> bool:
"""
Try to scale down workers if we have excess capacity.

Args:
queue_depth: Current queue depth
current_count: Current number of workers
active_count: Number of active workers
idle_count: Number of idle workers

Returns:
True if scaled down, False otherwise
"""
# Skip if we're at minimum or have no idle workers
if current_count <= self._min_workers or idle_count == 0:
return False

# Check if we have excess capacity
has_excess_capacity = (
queue_depth <= active_count # Active workers can handle current queue
or idle_count > active_count # More idle than active workers
or (queue_depth == 0 and idle_count > 0) # No work and have idle workers
)

if not has_excess_capacity:
return False

# Find and remove idle workers that have been idle long enough
workers_to_remove = []

for worker in self._workers:
# Check if worker is idle and has exceeded idle time threshold
if worker.is_idle and worker.idle_duration >= self._scale_down_idle_time:
# Don't remove if it would leave us unable to handle the queue
remaining_workers = current_count - len(workers_to_remove) - 1
if remaining_workers >= self._min_workers and remaining_workers >= max(1, queue_depth // 2):
workers_to_remove.append((worker, worker.worker_id))
# Only remove one worker per check to avoid aggressive scaling
break

# Remove idle workers if any found
if workers_to_remove:
old_count = current_count
for worker, worker_id in workers_to_remove:
self._remove_worker(worker, worker_id)

logger.debug(
"Scaled down workers: %d -> %d (removed %d idle workers after %.1fs, "
"queue_depth=%d, active=%d, idle=%d)",
old_count,
len(self._workers),
len(workers_to_remove),
self._scale_down_idle_time,
queue_depth,
active_count,
idle_count - len(workers_to_remove),
)
return True

return False

def check_and_scale(self) -> None: def check_and_scale(self) -> None:
"""Check and perform scaling if needed.""" """Check and perform scaling if needed."""
with self._lock: with self._lock:
current_count = len(self._workers) current_count = len(self._workers)
queue_depth = self._ready_queue.qsize() queue_depth = self._ready_queue.qsize()


# Simple scaling logic
if queue_depth > self._scale_up_threshold and current_count < self._max_workers:
self._create_worker()
# Count active vs idle workers by querying their state directly
idle_count = sum(1 for worker in self._workers if worker.is_idle)
active_count = current_count - idle_count

# Try to scale up if queue is backing up
self._try_scale_up(queue_depth, current_count)

# Try to scale down if we have excess capacity
self._try_scale_down(queue_depth, current_count, active_count, idle_count)


def get_worker_count(self) -> int: def get_worker_count(self) -> int:
"""Get current number of workers.""" """Get current number of workers."""

+ 1
- 1
api/core/workflow/nodes/agent/agent_node.py Dosyayı Görüntüle

for key, value in msg_metadata.items() for key, value in msg_metadata.items()
if key in WorkflowNodeExecutionMetadataKey.__members__.values() if key in WorkflowNodeExecutionMetadataKey.__members__.values()
} }
if message.message.json_object is not None:
if message.message.json_object:
json_list.append(message.message.json_object) json_list.append(message.message.json_object)
elif message.type == ToolInvokeMessage.MessageType.LINK: elif message.type == ToolInvokeMessage.MessageType.LINK:
assert isinstance(message.message, ToolInvokeMessage.TextMessage) assert isinstance(message.message, ToolInvokeMessage.TextMessage)

+ 0
- 3
api/core/workflow/nodes/base/entities.py Dosyayı Görüntüle



@model_validator(mode="after") @model_validator(mode="after")
def validate_value_type(self) -> "DefaultValue": def validate_value_type(self) -> "DefaultValue":
if self.type is None:
raise DefaultValueTypeError("type field is required")

# Type validation configuration # Type validation configuration
type_validators = { type_validators = {
DefaultValueType.STRING: { DefaultValueType.STRING: {

+ 36
- 23
api/core/workflow/nodes/code/code_node.py Dosyayı Görüntüle

""" """
if value is None: if value is None:
return None return None
if not isinstance(value, str):
raise OutputValidationError(f"Output variable `{variable}` must be a string")


if len(value) > dify_config.CODE_MAX_STRING_LENGTH: if len(value) > dify_config.CODE_MAX_STRING_LENGTH:
raise OutputValidationError( raise OutputValidationError(
def _check_boolean(self, value: bool | None, variable: str) -> bool | None: def _check_boolean(self, value: bool | None, variable: str) -> bool | None:
if value is None: if value is None:
return None return None
if not isinstance(value, bool):
raise OutputValidationError(f"Output variable `{variable}` must be a boolean")


return value return value


""" """
if value is None: if value is None:
return None return None
if not isinstance(value, int | float):
raise OutputValidationError(f"Output variable `{variable}` must be a number")


if value > dify_config.CODE_MAX_NUMBER or value < dify_config.CODE_MIN_NUMBER: if value > dify_config.CODE_MAX_NUMBER or value < dify_config.CODE_MIN_NUMBER:
raise OutputValidationError( raise OutputValidationError(
) )
elif output_config.type == SegmentType.NUMBER: elif output_config.type == SegmentType.NUMBER:
# check if number available # check if number available
checked = self._check_number(value=result[output_name], variable=f"{prefix}{dot}{output_name}")
value = result.get(output_name)
if not isinstance(value, (int, float, None)):
raise OutputValidationError(
f"Output {prefix}{dot}{output_name} is not a number,"
f" got {type(result.get(output_name))} instead."
)
checked = self._check_number(value=value, variable=f"{prefix}{dot}{output_name}")
# If the output is a boolean and the output schema specifies a NUMBER type, # If the output is a boolean and the output schema specifies a NUMBER type,
# convert the boolean value to an integer. # convert the boolean value to an integer.
# #


elif output_config.type == SegmentType.STRING: elif output_config.type == SegmentType.STRING:
# check if string available # check if string available
value = result.get("output_name")
if value is not None and not isinstance(value, str):
raise OutputValidationError(f"Output value `{value}` is not string")
transformed_result[output_name] = self._check_string( transformed_result[output_name] = self._check_string(
value=result[output_name],
value=value,
variable=f"{prefix}{dot}{output_name}", variable=f"{prefix}{dot}{output_name}",
) )
elif output_config.type == SegmentType.BOOLEAN: elif output_config.type == SegmentType.BOOLEAN:
) )
elif output_config.type == SegmentType.ARRAY_NUMBER: elif output_config.type == SegmentType.ARRAY_NUMBER:
# check if array of number available # check if array of number available
if not isinstance(result[output_name], list):
if result[output_name] is None:
value = result[output_name]
if not isinstance(value, list):
if value is None:
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise OutputValidationError( raise OutputValidationError(
f"Output {prefix}{dot}{output_name} is not an array,"
f" got {type(result.get(output_name))} instead."
f"Output {prefix}{dot}{output_name} is not an array, got {type(value)} instead."
) )
else: else:
if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH:
if len(value) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH:
raise OutputValidationError( raise OutputValidationError(
f"The length of output variable `{prefix}{dot}{output_name}` must be" f"The length of output variable `{prefix}{dot}{output_name}` must be"
f" less than {dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH} elements." f" less than {dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH} elements."
) )


for i, inner_value in enumerate(value):
if not isinstance(inner_value, (int, float)):
raise OutputValidationError(
f"The element at index {i} of output variable `{prefix}{dot}{output_name}` must be"
f" a number."
)
_ = self._check_number(value=inner_value, variable=f"{prefix}{dot}{output_name}[{i}]")
transformed_result[output_name] = [ transformed_result[output_name] = [
# If the element is a boolean and the output schema specifies a `array[number]` type, # If the element is a boolean and the output schema specifies a `array[number]` type,
# convert the boolean value to an integer. # convert the boolean value to an integer.
# #
# This ensures compatibility with existing workflows that may use # This ensures compatibility with existing workflows that may use
# `True` and `False` as values for NUMBER type outputs. # `True` and `False` as values for NUMBER type outputs.
self._convert_boolean_to_int(
self._check_number(value=value, variable=f"{prefix}{dot}{output_name}[{i}]"),
)
for i, value in enumerate(result[output_name])
self._convert_boolean_to_int(v)
for v in value
] ]
elif output_config.type == SegmentType.ARRAY_STRING: elif output_config.type == SegmentType.ARRAY_STRING:
# check if array of string available # check if array of string available
] ]
elif output_config.type == SegmentType.ARRAY_BOOLEAN: elif output_config.type == SegmentType.ARRAY_BOOLEAN:
# check if array of object available # check if array of object available
if not isinstance(result[output_name], list):
if result[output_name] is None:
value = result[output_name]
if not isinstance(value, list):
if value is None:
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise OutputValidationError( raise OutputValidationError(
f" got {type(result.get(output_name))} instead." f" got {type(result.get(output_name))} instead."
) )
else: else:
transformed_result[output_name] = [
self._check_boolean(value=value, variable=f"{prefix}{dot}{output_name}[{i}]")
for i, value in enumerate(result[output_name])
]
for i, inner_value in enumerate(value):
if not isinstance(inner_value, bool | None):
raise OutputValidationError(
f"Output {prefix}{dot}{output_name}[{i}] is not a boolean,"
f" got {type(inner_value)} instead."
)
_ = self._check_boolean(value=inner_value, variable=f"{prefix}{dot}{output_name}[{i}]")
transformed_result[output_name] = value


else: else:
raise OutputValidationError(f"Output type {output_config.type} is not supported.") raise OutputValidationError(f"Output type {output_config.type} is not supported.")

+ 6
- 17
api/core/workflow/nodes/http_request/executor.py Dosyayı Görüntüle

if authorization.config is None: if authorization.config is None:
raise AuthorizationConfigError("authorization config is required") raise AuthorizationConfigError("authorization config is required")


if self.auth.config.api_key is None:
raise AuthorizationConfigError("api_key is required")

if not authorization.config.header: if not authorization.config.header:
authorization.config.header = "Authorization" authorization.config.header = "Authorization"


if self.files and not all(f[0] == "__multipart_placeholder__" for f in self.files): if self.files and not all(f[0] == "__multipart_placeholder__" for f in self.files):
for file_entry in self.files: for file_entry in self.files:
# file_entry should be (key, (filename, content, mime_type)), but handle edge cases # file_entry should be (key, (filename, content, mime_type)), but handle edge cases
if len(file_entry) != 2 or not isinstance(file_entry[1], tuple) or len(file_entry[1]) < 2:
if len(file_entry) != 2 or len(file_entry[1]) < 2:
continue # skip malformed entries continue # skip malformed entries
key = file_entry[0] key = file_entry[0]
content = file_entry[1][1] content = file_entry[1][1]
body_string += f"--{boundary}\r\n" body_string += f"--{boundary}\r\n"
body_string += f'Content-Disposition: form-data; name="{key}"\r\n\r\n' body_string += f'Content-Disposition: form-data; name="{key}"\r\n\r\n'
# decode content safely # decode content safely
if isinstance(content, bytes):
try:
body_string += content.decode("utf-8")
except UnicodeDecodeError:
body_string += content.decode("utf-8", errors="replace")
elif isinstance(content, str):
body_string += content
else:
body_string += f"[Unsupported content type: {type(content).__name__}]"
try:
body_string += content.decode("utf-8")
except UnicodeDecodeError:
body_string += content.decode("utf-8", errors="replace")
body_string += "\r\n" body_string += "\r\n"
body_string += f"--{boundary}--\r\n" body_string += f"--{boundary}--\r\n"
elif self.node_data.body: elif self.node_data.body:
if self.content: if self.content:
if isinstance(self.content, str):
body_string = self.content
elif isinstance(self.content, bytes):
body_string = self.content.decode("utf-8", errors="replace")
body_string = self.content.decode("utf-8", errors="replace")
elif self.data and self.node_data.body.type == "x-www-form-urlencoded": elif self.data and self.node_data.body.type == "x-www-form-urlencoded":
body_string = urlencode(self.data) body_string = urlencode(self.data)
elif self.data and self.node_data.body.type == "form-data": elif self.data and self.node_data.body.type == "form-data":

+ 4
- 10
api/core/workflow/nodes/list_operator/node.py Dosyayı Görüntüle

) )
result = list(filter(filter_func, variable.value)) result = list(filter(filter_func, variable.value))
variable = variable.model_copy(update={"value": result}) variable = variable.model_copy(update={"value": result})
elif isinstance(variable, ArrayBooleanSegment):
if not isinstance(condition.value, bool):
raise InvalidFilterValueError(f"Invalid filter value: {condition.value}")
else:
filter_func = _get_boolean_filter_func(condition=condition.comparison_operator, value=condition.value) filter_func = _get_boolean_filter_func(condition=condition.comparison_operator, value=condition.value)
result = list(filter(filter_func, variable.value)) result = list(filter(filter_func, variable.value))
variable = variable.model_copy(update={"value": result}) variable = variable.model_copy(update={"value": result})
else:
raise AssertionError("this statment should be unreachable.")
return variable return variable


def _apply_order(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS: def _apply_order(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS:
if isinstance(variable, (ArrayStringSegment, ArrayNumberSegment, ArrayBooleanSegment)): if isinstance(variable, (ArrayStringSegment, ArrayNumberSegment, ArrayBooleanSegment)):
result = sorted(variable.value, reverse=self._node_data.order_by == Order.DESC)
result = sorted(variable.value, reverse=self._node_data.order_by.value == Order.DESC)
variable = variable.model_copy(update={"value": result}) variable = variable.model_copy(update={"value": result})
elif isinstance(variable, ArrayFileSegment):
else:
result = _order_file( result = _order_file(
order=self._node_data.order_by.value, order_by=self._node_data.order_by.key, array=variable.value order=self._node_data.order_by.value, order_by=self._node_data.order_by.key, array=variable.value
) )
variable = variable.model_copy(update={"value": result}) variable = variable.model_copy(update={"value": result})
else:
raise AssertionError("this statement should be unreachable")


return variable return variable


if key in {"name", "extension", "mime_type", "url"} and isinstance(value, str): if key in {"name", "extension", "mime_type", "url"} and isinstance(value, str):
extract_func = _get_file_extract_string_func(key=key) extract_func = _get_file_extract_string_func(key=key)
return lambda x: _get_string_filter_func(condition=condition, value=value)(extract_func(x)) return lambda x: _get_string_filter_func(condition=condition, value=value)(extract_func(x))
if key in {"type", "transfer_method"} and isinstance(value, Sequence):
if key in {"type", "transfer_method"}:
extract_func = _get_file_extract_string_func(key=key) extract_func = _get_file_extract_string_func(key=key)
return lambda x: _get_sequence_filter_func(condition=condition, value=value)(extract_func(x)) return lambda x: _get_sequence_filter_func(condition=condition, value=value)(extract_func(x))
elif key == "size" and isinstance(value, str): elif key == "size" and isinstance(value, str):

+ 5
- 11
api/core/workflow/nodes/llm/node.py Dosyayı Görüntüle

generator = self._fetch_context(node_data=self._node_data) generator = self._fetch_context(node_data=self._node_data)
context = None context = None
for event in generator: for event in generator:
if isinstance(event, RunRetrieverResourceEvent):
context = event.context
yield event
context = event.context
yield event
if context: if context:
node_inputs["#context#"] = context node_inputs["#context#"] = context


outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason} outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
if structured_output: if structured_output:
outputs["structured_output"] = structured_output.structured_output outputs["structured_output"] = structured_output.structured_output
if self._file_outputs is not None:
if self._file_outputs:
outputs["files"] = ArrayFileSegment(value=self._file_outputs) outputs["files"] = ArrayFileSegment(value=self._file_outputs)


# Send final chunk event to indicate streaming is complete # Send final chunk event to indicate streaming is complete


prompt_template = typed_node_data.prompt_template prompt_template = typed_node_data.prompt_template
variable_selectors = [] variable_selectors = []
if isinstance(prompt_template, list) and all(
isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
):
if isinstance(prompt_template, list):
for prompt in prompt_template: for prompt in prompt_template:
if prompt.edition_type != "jinja2": if prompt.edition_type != "jinja2":
variable_template_parser = VariableTemplateParser(template=prompt.text) variable_template_parser = VariableTemplateParser(template=prompt.text)
return return
if isinstance(contents, str): if isinstance(contents, str):
yield contents yield contents
elif isinstance(contents, list):
else:
for item in contents: for item in contents:
if isinstance(item, TextPromptMessageContent): if isinstance(item, TextPromptMessageContent):
yield item.data yield item.data
else: else:
logger.warning("unknown item type encountered, type=%s", type(item)) logger.warning("unknown item type encountered, type=%s", type(item))
yield str(item) yield str(item)
else:
logger.warning("unknown contents type encountered, type=%s", type(contents))
yield str(contents)


@property @property
def retry(self) -> bool: def retry(self) -> bool:

+ 4
- 4
api/core/workflow/nodes/loop/loop_node.py Dosyayı Görüntüle

for key, value in graph_engine.graph_runtime_state.outputs.items(): for key, value in graph_engine.graph_runtime_state.outputs.items():
if key == "answer": if key == "answer":
# Concatenate answer outputs with newline # Concatenate answer outputs with newline
existing_answer = self.graph_runtime_state.outputs.get("answer", "")
existing_answer = self.graph_runtime_state.get_output("answer", "")
if existing_answer: if existing_answer:
self.graph_runtime_state.outputs["answer"] = f"{existing_answer}{value}"
self.graph_runtime_state.set_output("answer", f"{existing_answer}{value}")
else: else:
self.graph_runtime_state.outputs["answer"] = value
self.graph_runtime_state.set_output("answer", value)
else: else:
# For other outputs, just update # For other outputs, just update
self.graph_runtime_state.outputs[key] = value
self.graph_runtime_state.set_output(key, value)


# Update the total tokens from this iteration # Update the total tokens from this iteration
cost_tokens += graph_engine.graph_runtime_state.total_tokens cost_tokens += graph_engine.graph_runtime_state.total_tokens

+ 0
- 2
api/core/workflow/nodes/parameter_extractor/entities.py Dosyayı Görüntüle





def _validate_type(parameter_type: str) -> SegmentType: def _validate_type(parameter_type: str) -> SegmentType:
if not isinstance(parameter_type, str):
raise TypeError(f"type should be str, got {type(parameter_type)}, value={parameter_type}")
if parameter_type not in _VALID_PARAMETER_TYPES: if parameter_type not in _VALID_PARAMETER_TYPES:
raise ValueError(f"type {parameter_type} is not allowd to use in Parameter Extractor node.") raise ValueError(f"type {parameter_type} is not allowd to use in Parameter Extractor node.")



+ 14
- 19
api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py Dosyayı Görüntüle

from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities import ImagePromptMessageContent from core.model_runtime.entities import ImagePromptMessageContent
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
PromptMessage, PromptMessage,


from .entities import ParameterExtractorNodeData from .entities import ParameterExtractorNodeData
from .exc import ( from .exc import (
InvalidInvokeResultError,
InvalidModelModeError, InvalidModelModeError,
InvalidModelTypeError, InvalidModelTypeError,
InvalidNumberOfParametersError, InvalidNumberOfParametersError,
) )


# handle invoke result # handle invoke result
if not isinstance(invoke_result, LLMResult):
raise InvalidInvokeResultError(f"Invalid invoke result: {invoke_result}")


text = invoke_result.message.content or "" text = invoke_result.message.content or ""
if not isinstance(text, str): if not isinstance(text, str):
# deduct quota # deduct quota
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)


if text is None:
text = ""

return text, usage, tool_call return text, usage, tool_call


def _generate_function_call_prompt( def _generate_function_call_prompt(
return int(value) return int(value)
elif isinstance(value, (int, float)): elif isinstance(value, (int, float)):
return value return value
elif not isinstance(value, str):
return None
if "." in value:
try:
return float(value)
except ValueError:
return None
elif isinstance(value, str):
if "." in value:
try:
return float(value)
except ValueError:
return None
else:
try:
return int(value)
except ValueError:
return None
else: else:
try:
return int(value)
except ValueError:
return None
return None


def _transform_result(self, data: ParameterExtractorNodeData, result: dict) -> dict: def _transform_result(self, data: ParameterExtractorNodeData, result: dict) -> dict:
""" """
for parameter in data.parameters: for parameter in data.parameters:
if parameter.type == "number": if parameter.type == "number":
result[parameter.name] = 0 result[parameter.name] = 0
elif parameter.type == "bool":
elif parameter.type == "boolean":
result[parameter.name] = False result[parameter.name] = False
elif parameter.type in {"string", "select"}: elif parameter.type in {"string", "select"}:
result[parameter.name] = "" result[parameter.name] = ""

+ 1
- 1
api/core/workflow/nodes/tool/tool_node.py Dosyayı Görüntüle

elif message.type == ToolInvokeMessage.MessageType.JSON: elif message.type == ToolInvokeMessage.MessageType.JSON:
assert isinstance(message.message, ToolInvokeMessage.JsonMessage) assert isinstance(message.message, ToolInvokeMessage.JsonMessage)
# JSON message handling for tool node # JSON message handling for tool node
if message.message.json_object is not None:
if message.message.json_object:
json.append(message.message.json_object) json.append(message.message.json_object)
elif message.type == ToolInvokeMessage.MessageType.LINK: elif message.type == ToolInvokeMessage.MessageType.LINK:
assert isinstance(message.message, ToolInvokeMessage.TextMessage) assert isinstance(message.message, ToolInvokeMessage.TextMessage)

+ 0
- 5
api/core/workflow/nodes/variable_assigner/v1/node.py Dosyayı Görüntüle



case WriteMode.CLEAR: case WriteMode.CLEAR:
income_value = get_zero_value(original_variable.value_type) income_value = get_zero_value(original_variable.value_type)
if income_value is None:
raise VariableOperatorNodeError("income value not found")
updated_variable = original_variable.model_copy(update={"value": income_value.to_object()}) updated_variable = original_variable.model_copy(update={"value": income_value.to_object()})


case _:
raise VariableOperatorNodeError(f"unsupported write mode: {self._node_data.write_mode}")

# Over write the variable. # Over write the variable.
self.graph_runtime_state.variable_pool.add(assigned_variable_selector, updated_variable) self.graph_runtime_state.variable_pool.add(assigned_variable_selector, updated_variable)



+ 0
- 2
api/core/workflow/nodes/variable_assigner/v2/helpers.py Dosyayı Görüntüle

# Only array variable can be appended or extended # Only array variable can be appended or extended
# Only array variable can have elements removed # Only array variable can have elements removed
return variable_type.is_array_type() return variable_type.is_array_type()
case _:
return False




def is_variable_input_supported(*, operation: Operation): def is_variable_input_supported(*, operation: Operation):

+ 0
- 2
api/core/workflow/nodes/variable_assigner/v2/node.py Dosyayı Görüntüle

if not variable.value: if not variable.value:
return variable.value return variable.value
return variable.value[:-1] return variable.value[:-1]
case _:
raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type)

+ 2
- 1
api/extensions/storage/clickzetta_volume/file_lifecycle.py Dosyayı Görüntüle

"""ClickZetta Volume file lifecycle management """ClickZetta Volume file lifecycle management


This module provides file lifecycle management features including version control, automatic cleanup, backup and restore
This module provides file lifecycle management features including version control,
automatic cleanup, backup and restore.
Supports complete lifecycle management for knowledge base files. Supports complete lifecycle management for knowledge base files.
""" """



+ 0
- 1
api/pyrightconfig.json Dosyayı Görüntüle

"pythonVersion": "3.11", "pythonVersion": "3.11",
"pythonPlatform": "All", "pythonPlatform": "All",
"reportMissingTypeStubs": false, "reportMissingTypeStubs": false,
"reportGeneralTypeIssues": "none",
"reportOptionalMemberAccess": "none", "reportOptionalMemberAccess": "none",
"reportOptionalIterable": "none", "reportOptionalIterable": "none",
"reportOptionalOperand": "none", "reportOptionalOperand": "none",

+ 1
- 1
api/services/recommended_app_service.py Dosyayı Görüntüle

mode = dify_config.HOSTED_FETCH_APP_TEMPLATES_MODE mode = dify_config.HOSTED_FETCH_APP_TEMPLATES_MODE
retrieval_instance = RecommendAppRetrievalFactory.get_recommend_app_factory(mode)() retrieval_instance = RecommendAppRetrievalFactory.get_recommend_app_factory(mode)()
result = retrieval_instance.get_recommended_apps_and_categories(language) result = retrieval_instance.get_recommended_apps_and_categories(language)
if not result.get("recommended_apps") and language != "en-US":
if not result.get("recommended_apps"):
result = ( result = (
RecommendAppRetrievalFactory.get_buildin_recommend_app_retrieval().fetch_recommended_apps_from_builtin( RecommendAppRetrievalFactory.get_buildin_recommend_app_retrieval().fetch_recommended_apps_from_builtin(
"en-US" "en-US"

+ 0
- 0
api/tests/test_containers_integration_tests/services/workflow/__init__.py Dosyayı Görüntüle


+ 553
- 0
api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py Dosyayı Görüntüle

import json
from unittest.mock import patch

import pytest
from faker import Faker

from core.app.app_config.entities import (
DatasetEntity,
DatasetRetrieveConfigEntity,
ExternalDataVariableEntity,
ModelConfigEntity,
PromptTemplateEntity,
VariableEntity,
VariableEntityType,
)
from core.model_runtime.entities.llm_entities import LLMMode
from models.account import Account, Tenant
from models.api_based_extension import APIBasedExtension
from models.model import App, AppMode, AppModelConfig
from models.workflow import Workflow
from services.workflow.workflow_converter import WorkflowConverter


class TestWorkflowConverter:
"""Integration tests for WorkflowConverter using testcontainers."""

@pytest.fixture
def mock_external_service_dependencies(self):
"""Mock setup for external service dependencies."""
with (
patch("services.workflow.workflow_converter.encrypter") as mock_encrypter,
patch("services.workflow.workflow_converter.SimplePromptTransform") as mock_prompt_transform,
patch("services.workflow.workflow_converter.AgentChatAppConfigManager") as mock_agent_chat_config_manager,
patch("services.workflow.workflow_converter.ChatAppConfigManager") as mock_chat_config_manager,
patch("services.workflow.workflow_converter.CompletionAppConfigManager") as mock_completion_config_manager,
):
# Setup default mock returns
mock_encrypter.decrypt_token.return_value = "decrypted_api_key"
mock_prompt_transform.return_value.get_prompt_template.return_value = {
"prompt_template": type("obj", (object,), {"template": "You are a helpful assistant {{text_input}}"})(),
"prompt_rules": {"human_prefix": "Human", "assistant_prefix": "Assistant"},
}
mock_agent_chat_config_manager.get_app_config.return_value = self._create_mock_app_config()
mock_chat_config_manager.get_app_config.return_value = self._create_mock_app_config()
mock_completion_config_manager.get_app_config.return_value = self._create_mock_app_config()

yield {
"encrypter": mock_encrypter,
"prompt_transform": mock_prompt_transform,
"agent_chat_config_manager": mock_agent_chat_config_manager,
"chat_config_manager": mock_chat_config_manager,
"completion_config_manager": mock_completion_config_manager,
}

def _create_mock_app_config(self):
"""Helper method to create a mock app config."""
mock_config = type("obj", (object,), {})()
mock_config.variables = [
VariableEntity(
variable="text_input",
label="Text Input",
type=VariableEntityType.TEXT_INPUT,
)
]
mock_config.model = ModelConfigEntity(
provider="openai",
model="gpt-4",
mode=LLMMode.CHAT.value,
parameters={},
stop=[],
)
mock_config.prompt_template = PromptTemplateEntity(
prompt_type=PromptTemplateEntity.PromptType.SIMPLE,
simple_prompt_template="You are a helpful assistant {{text_input}}",
)
mock_config.dataset = None
mock_config.external_data_variables = []
mock_config.additional_features = type("obj", (object,), {"file_upload": None})()
mock_config.app_model_config_dict = {}
return mock_config

def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies):
"""
Helper method to create a test account and tenant for testing.

Args:
db_session_with_containers: Database session from testcontainers infrastructure
mock_external_service_dependencies: Mock dependencies

Returns:
tuple: (account, tenant) - Created account and tenant instances
"""
fake = Faker()

# Create account
account = Account(
email=fake.email(),
name=fake.name(),
interface_language="en-US",
status="active",
)

from extensions.ext_database import db

db.session.add(account)
db.session.commit()

# Create tenant for the account
tenant = Tenant(
name=fake.company(),
status="normal",
)
db.session.add(tenant)
db.session.commit()

# Create tenant-account join
from models.account import TenantAccountJoin, TenantAccountRole

join = TenantAccountJoin(
tenant_id=tenant.id,
account_id=account.id,
role=TenantAccountRole.OWNER.value,
current=True,
)
db.session.add(join)
db.session.commit()

# Set current tenant for account
account.current_tenant = tenant

return account, tenant

def _create_test_app(self, db_session_with_containers, mock_external_service_dependencies, tenant, account):
"""
Helper method to create a test app for testing.

Args:
db_session_with_containers: Database session from testcontainers infrastructure
mock_external_service_dependencies: Mock dependencies
tenant: Tenant instance
account: Account instance

Returns:
App: Created app instance
"""
fake = Faker()

# Create app
app = App(
tenant_id=tenant.id,
name=fake.company(),
mode=AppMode.CHAT.value,
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
enable_site=True,
enable_api=True,
api_rpm=100,
api_rph=10,
is_demo=False,
is_public=False,
created_by=account.id,
updated_by=account.id,
)

from extensions.ext_database import db

db.session.add(app)
db.session.commit()

# Create app model config
app_model_config = AppModelConfig(
app_id=app.id,
provider="openai",
model="gpt-4",
configs={},
created_by=account.id,
updated_by=account.id,
)
db.session.add(app_model_config)
db.session.commit()

# Link app model config to app
app.app_model_config_id = app_model_config.id
db.session.commit()

return app

def test_convert_to_workflow_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful conversion of app to workflow.

This test verifies:
- Proper app to workflow conversion
- Correct database state after conversion
- Proper relationship establishment
- Workflow creation with correct configuration
"""
# Arrange: Create test data
fake = Faker()
account, tenant = self._create_test_account_and_tenant(
db_session_with_containers, mock_external_service_dependencies
)
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account)

# Act: Execute the conversion
workflow_converter = WorkflowConverter()
new_app = workflow_converter.convert_to_workflow(
app_model=app,
account=account,
name="Test Workflow App",
icon_type="emoji",
icon="🚀",
icon_background="#4CAF50",
)

# Assert: Verify the expected outcomes
assert new_app is not None
assert new_app.name == "Test Workflow App"
assert new_app.mode == AppMode.ADVANCED_CHAT.value
assert new_app.icon_type == "emoji"
assert new_app.icon == "🚀"
assert new_app.icon_background == "#4CAF50"
assert new_app.tenant_id == app.tenant_id
assert new_app.created_by == account.id

# Verify database state
from extensions.ext_database import db

db.session.refresh(new_app)
assert new_app.id is not None

# Verify workflow was created
workflow = db.session.query(Workflow).where(Workflow.app_id == new_app.id).first()
assert workflow is not None
assert workflow.tenant_id == app.tenant_id
assert workflow.type == "chat"

def test_convert_to_workflow_without_app_model_config_error(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test error handling when app model config is missing.

This test verifies:
- Proper error handling for missing app model config
- Correct exception type and message
- Database state remains unchanged
"""
# Arrange: Create test data without app model config
fake = Faker()
account, tenant = self._create_test_account_and_tenant(
db_session_with_containers, mock_external_service_dependencies
)

app = App(
tenant_id=tenant.id,
name=fake.company(),
mode=AppMode.CHAT.value,
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
enable_site=True,
enable_api=True,
api_rpm=100,
api_rph=10,
is_demo=False,
is_public=False,
created_by=account.id,
updated_by=account.id,
)

from extensions.ext_database import db

db.session.add(app)
db.session.commit()

# Act & Assert: Verify proper error handling
workflow_converter = WorkflowConverter()

# Check initial state
initial_workflow_count = db.session.query(Workflow).count()

with pytest.raises(ValueError, match="App model config is required"):
workflow_converter.convert_to_workflow(
app_model=app,
account=account,
name="Test Workflow App",
icon_type="emoji",
icon="🚀",
icon_background="#4CAF50",
)

# Verify database state remains unchanged
# The workflow creation happens in convert_app_model_config_to_workflow
# which is called before the app_model_config check, so we need to clean up
db.session.rollback()
final_workflow_count = db.session.query(Workflow).count()
assert final_workflow_count == initial_workflow_count

def test_convert_app_model_config_to_workflow_success(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test successful conversion of app model config to workflow.

This test verifies:
- Proper app model config to workflow conversion
- Correct workflow graph structure
- Proper node creation and configuration
- Database state management
"""
# Arrange: Create test data
fake = Faker()
account, tenant = self._create_test_account_and_tenant(
db_session_with_containers, mock_external_service_dependencies
)
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account)

# Act: Execute the conversion
workflow_converter = WorkflowConverter()
workflow = workflow_converter.convert_app_model_config_to_workflow(
app_model=app,
app_model_config=app.app_model_config,
account_id=account.id,
)

# Assert: Verify the expected outcomes
assert workflow is not None
assert workflow.tenant_id == app.tenant_id
assert workflow.app_id == app.id
assert workflow.type == "chat"
assert workflow.version == Workflow.VERSION_DRAFT
assert workflow.created_by == account.id

# Verify workflow graph structure
graph = json.loads(workflow.graph)
assert "nodes" in graph
assert "edges" in graph
assert len(graph["nodes"]) > 0
assert len(graph["edges"]) > 0

# Verify start node exists
start_node = next((node for node in graph["nodes"] if node["data"]["type"] == "start"), None)
assert start_node is not None
assert start_node["id"] == "start"

# Verify LLM node exists
llm_node = next((node for node in graph["nodes"] if node["data"]["type"] == "llm"), None)
assert llm_node is not None
assert llm_node["id"] == "llm"

# Verify answer node exists for chat mode
answer_node = next((node for node in graph["nodes"] if node["data"]["type"] == "answer"), None)
assert answer_node is not None
assert answer_node["id"] == "answer"

# Verify database state
from extensions.ext_database import db

db.session.refresh(workflow)
assert workflow.id is not None

# Verify features were set
features = json.loads(workflow._features) if workflow._features else {}
assert isinstance(features, dict)

def test_convert_to_start_node_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful conversion to start node.

This test verifies:
- Proper start node creation with variables
- Correct node structure and data
- Variable encoding and formatting
"""
# Arrange: Create test variables
variables = [
VariableEntity(
variable="text_input",
label="Text Input",
type=VariableEntityType.TEXT_INPUT,
),
VariableEntity(
variable="number_input",
label="Number Input",
type=VariableEntityType.NUMBER,
),
]

# Act: Execute the conversion
workflow_converter = WorkflowConverter()
start_node = workflow_converter._convert_to_start_node(variables=variables)

# Assert: Verify the expected outcomes
assert start_node is not None
assert start_node["id"] == "start"
assert start_node["data"]["title"] == "START"
assert start_node["data"]["type"] == "start"
assert len(start_node["data"]["variables"]) == 2

# Verify variable encoding
first_variable = start_node["data"]["variables"][0]
assert first_variable["variable"] == "text_input"
assert first_variable["label"] == "Text Input"
assert first_variable["type"] == "text-input"

second_variable = start_node["data"]["variables"][1]
assert second_variable["variable"] == "number_input"
assert second_variable["label"] == "Number Input"
assert second_variable["type"] == "number"

def test_convert_to_http_request_node_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful conversion to HTTP request node.

This test verifies:
- Proper HTTP request node creation
- Correct API configuration and authorization
- Code node creation for response parsing
- External data variable mapping
"""
# Arrange: Create test data
fake = Faker()
account, tenant = self._create_test_account_and_tenant(
db_session_with_containers, mock_external_service_dependencies
)

app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account)

# Create API based extension
api_based_extension = APIBasedExtension(
tenant_id=tenant.id,
name="Test API Extension",
api_key="encrypted_api_key",
api_endpoint="https://api.example.com/test",
)

from extensions.ext_database import db

db.session.add(api_based_extension)
db.session.commit()

# Mock encrypter
mock_external_service_dependencies["encrypter"].decrypt_token.return_value = "decrypted_api_key"

variables = [
VariableEntity(
variable="user_input",
label="User Input",
type=VariableEntityType.TEXT_INPUT,
)
]

external_data_variables = [
ExternalDataVariableEntity(
variable="external_data", type="api", config={"api_based_extension_id": api_based_extension.id}
)
]

# Act: Execute the conversion
workflow_converter = WorkflowConverter()
nodes, external_data_variable_node_mapping = workflow_converter._convert_to_http_request_node(
app_model=app,
variables=variables,
external_data_variables=external_data_variables,
)

# Assert: Verify the expected outcomes
assert len(nodes) == 2 # HTTP request node + code node
assert len(external_data_variable_node_mapping) == 1

# Verify HTTP request node
http_request_node = nodes[0]
assert http_request_node["data"]["type"] == "http-request"
assert http_request_node["data"]["method"] == "post"
assert http_request_node["data"]["url"] == api_based_extension.api_endpoint
assert http_request_node["data"]["authorization"]["type"] == "api-key"
assert http_request_node["data"]["authorization"]["config"]["type"] == "bearer"
assert http_request_node["data"]["authorization"]["config"]["api_key"] == "decrypted_api_key"

# Verify code node
code_node = nodes[1]
assert code_node["data"]["type"] == "code"
assert code_node["data"]["code_language"] == "python3"
assert "response_json" in code_node["data"]["variables"][0]["variable"]

# Verify mapping
assert external_data_variable_node_mapping["external_data"] == code_node["id"]

def test_convert_to_knowledge_retrieval_node_success(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test successful conversion to knowledge retrieval node.

This test verifies:
- Proper knowledge retrieval node creation
- Correct dataset configuration
- Model configuration integration
- Query variable selector setup
"""
# Arrange: Create test data
fake = Faker()
account, tenant = self._create_test_account_and_tenant(
db_session_with_containers, mock_external_service_dependencies
)

# Create dataset config
dataset_config = DatasetEntity(
dataset_ids=["dataset_1", "dataset_2"],
retrieve_config=DatasetRetrieveConfigEntity(
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE,
top_k=10,
score_threshold=0.8,
reranking_model={"provider": "cohere", "model": "rerank-v2"},
reranking_enabled=True,
),
)

model_config = ModelConfigEntity(
provider="openai",
model="gpt-4",
mode=LLMMode.CHAT.value,
parameters={"temperature": 0.7},
stop=[],
)

# Act: Execute the conversion for advanced chat mode
workflow_converter = WorkflowConverter()
node = workflow_converter._convert_to_knowledge_retrieval_node(
new_app_mode=AppMode.ADVANCED_CHAT,
dataset_config=dataset_config,
model_config=model_config,
)

# Assert: Verify the expected outcomes
assert node is not None
assert node["data"]["type"] == "knowledge-retrieval"
assert node["data"]["title"] == "KNOWLEDGE RETRIEVAL"
assert node["data"]["dataset_ids"] == ["dataset_1", "dataset_2"]
assert node["data"]["retrieval_mode"] == "multiple"
assert node["data"]["query_variable_selector"] == ["sys", "query"]

# Verify multiple retrieval config
multiple_config = node["data"]["multiple_retrieval_config"]
assert multiple_config["top_k"] == 10
assert multiple_config["score_threshold"] == 0.8
assert multiple_config["reranking_model"]["provider"] == "cohere"
assert multiple_config["reranking_model"]["model"] == "rerank-v2"

# Verify single retrieval config is None for multiple strategy
assert node["data"]["single_retrieval_config"] is None

+ 64
- 1
api/tests/unit_tests/core/mcp/server/test_streamable_http.py Dosyayı Görüntüle

import json import json
from unittest.mock import Mock, patch from unittest.mock import Mock, patch


import jsonschema
import pytest import pytest


from core.app.app_config.entities import VariableEntity, VariableEntityType from core.app.app_config.entities import VariableEntity, VariableEntityType
assert parameters["category"]["enum"] == ["A", "B", "C"] assert parameters["category"]["enum"] == ["A", "B", "C"]


assert "count" in parameters assert "count" in parameters
assert parameters["count"]["type"] == "float"
assert parameters["count"]["type"] == "number"


# FILE type should be skipped - it creates empty dict but gets filtered later # FILE type should be skipped - it creates empty dict but gets filtered later
# Check that it doesn't have any meaningful content # Check that it doesn't have any meaningful content
assert "category" not in required assert "category" not in required


# Note: _get_request_id function has been removed as request_id is now passed as parameter # Note: _get_request_id function has been removed as request_id is now passed as parameter

def test_convert_input_form_to_parameters_jsonschema_validation_ok(self):
"""Current schema uses 'number' for numeric fields; it should be a valid JSON Schema."""
user_input_form = [
VariableEntity(
type=VariableEntityType.NUMBER,
variable="count",
description="Count",
label="Count",
required=True,
),
VariableEntity(
type=VariableEntityType.TEXT_INPUT,
variable="name",
description="User name",
label="Name",
required=False,
),
]

parameters_dict = {
"count": "Enter count",
"name": "Enter your name",
}

parameters, required = convert_input_form_to_parameters(user_input_form, parameters_dict)

# Build a complete JSON Schema
schema = {
"type": "object",
"properties": parameters,
"required": required,
}

# 1) The schema itself must be valid
jsonschema.Draft202012Validator.check_schema(schema)

# 2) Both float and integer instances should pass validation
jsonschema.validate(instance={"count": 3.14, "name": "alice"}, schema=schema)
jsonschema.validate(instance={"count": 2, "name": "bob"}, schema=schema)

def test_legacy_float_type_schema_is_invalid(self):
"""Legacy/buggy behavior: using 'float' should produce an invalid JSON Schema."""
# Manually construct a legacy/incorrect schema (simulating old behavior)
bad_schema = {
"type": "object",
"properties": {
"count": {
"type": "float", # Invalid type: JSON Schema does not support 'float'
"description": "Enter count",
}
},
"required": ["count"],
}

# The schema itself should raise a SchemaError
with pytest.raises(jsonschema.exceptions.SchemaError):
jsonschema.Draft202012Validator.check_schema(bad_schema)

# Or validation should also raise SchemaError
with pytest.raises(jsonschema.exceptions.SchemaError):
jsonschema.validate(instance={"count": 1.23}, schema=bad_schema)

+ 111
- 0
api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py Dosyayı Görüntüle

from time import time

import pytest

from core.workflow.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.entities.variable_pool import VariablePool


class TestGraphRuntimeState:
def test_property_getters_and_setters(self):
# FIXME(-LAN-): Mock VariablePool if needed
variable_pool = VariablePool()
start_time = time()

state = GraphRuntimeState(variable_pool=variable_pool, start_at=start_time)

# Test variable_pool property (read-only)
assert state.variable_pool == variable_pool

# Test start_at property
assert state.start_at == start_time
new_time = time() + 100
state.start_at = new_time
assert state.start_at == new_time

# Test total_tokens property
assert state.total_tokens == 0
state.total_tokens = 100
assert state.total_tokens == 100

# Test node_run_steps property
assert state.node_run_steps == 0
state.node_run_steps = 5
assert state.node_run_steps == 5

def test_outputs_immutability(self):
variable_pool = VariablePool()
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())

# Test that getting outputs returns a copy
outputs1 = state.outputs
outputs2 = state.outputs
assert outputs1 == outputs2
assert outputs1 is not outputs2 # Different objects

# Test that modifying retrieved outputs doesn't affect internal state
outputs = state.outputs
outputs["test"] = "value"
assert "test" not in state.outputs

# Test set_output method
state.set_output("key1", "value1")
assert state.get_output("key1") == "value1"

# Test update_outputs method
state.update_outputs({"key2": "value2", "key3": "value3"})
assert state.get_output("key2") == "value2"
assert state.get_output("key3") == "value3"

def test_llm_usage_immutability(self):
variable_pool = VariablePool()
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())

# Test that getting llm_usage returns a copy
usage1 = state.llm_usage
usage2 = state.llm_usage
assert usage1 is not usage2 # Different objects

def test_type_validation(self):
variable_pool = VariablePool()
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())

# Test total_tokens validation
with pytest.raises(ValueError):
state.total_tokens = -1

# Test node_run_steps validation
with pytest.raises(ValueError):
state.node_run_steps = -1

def test_helper_methods(self):
variable_pool = VariablePool()
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())

# Test increment_node_run_steps
initial_steps = state.node_run_steps
state.increment_node_run_steps()
assert state.node_run_steps == initial_steps + 1

# Test add_tokens
initial_tokens = state.total_tokens
state.add_tokens(50)
assert state.total_tokens == initial_tokens + 50

# Test add_tokens validation
with pytest.raises(ValueError):
state.add_tokens(-1)

def test_deep_copy_for_nested_objects(self):
variable_pool = VariablePool()
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())

# Test deep copy for nested dict
nested_data = {"level1": {"level2": {"value": "test"}}}
state.set_output("nested", nested_data)

retrieved = state.get_output("nested")
retrieved["level1"]["level2"]["value"] = "modified"

# Original should remain unchanged
assert state.get_output("nested")["level1"]["level2"]["value"] == "test"

+ 4
- 4
api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py Dosyayı Görüntüle



def test_layer_chaining(): def test_layer_chaining():
"""Test chaining multiple layers.""" """Test chaining multiple layers."""
from core.workflow.graph_engine.layers import DebugLoggingLayer, Layer
from core.workflow.graph_engine.layers import DebugLoggingLayer, GraphEngineLayer


# Create a custom test layer # Create a custom test layer
class TestLayer(Layer):
class TestLayer(GraphEngineLayer):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.events_received = [] self.events_received = []


def test_layer_error_handling(): def test_layer_error_handling():
"""Test that layer errors don't crash the engine.""" """Test that layer errors don't crash the engine."""
from core.workflow.graph_engine.layers import Layer
from core.workflow.graph_engine.layers import GraphEngineLayer


# Create a layer that throws errors # Create a layer that throws errors
class FaultyLayer(Layer):
class FaultyLayer(GraphEngineLayer):
def on_graph_start(self): def on_graph_start(self):
raise RuntimeError("Intentional error in on_graph_start") raise RuntimeError("Intentional error in on_graph_start")



+ 12
- 5
api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py Dosyayı Görüntüle

from collections.abc import Callable, Sequence from collections.abc import Callable, Sequence
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass, field from dataclasses import dataclass, field
from functools import lru_cache
from pathlib import Path from pathlib import Path
from typing import Any, Optional from typing import Any, Optional


raise ValueError(f"Fixtures directory does not exist: {self.fixtures_dir}") raise ValueError(f"Fixtures directory does not exist: {self.fixtures_dir}")


def load_fixture(self, fixture_name: str) -> dict[str, Any]: def load_fixture(self, fixture_name: str) -> dict[str, Any]:
"""Load a YAML fixture file."""
"""Load a YAML fixture file with caching to avoid repeated parsing."""
if not fixture_name.endswith(".yml") and not fixture_name.endswith(".yaml"): if not fixture_name.endswith(".yml") and not fixture_name.endswith(".yaml"):
fixture_name = f"{fixture_name}.yml" fixture_name = f"{fixture_name}.yml"


fixture_path = self.fixtures_dir / fixture_name fixture_path = self.fixtures_dir / fixture_name
if not fixture_path.exists():
raise FileNotFoundError(f"Fixture file not found: {fixture_path}")

return load_yaml_file(str(fixture_path), ignore_error=False)
return _load_fixture(fixture_path, fixture_name)


def create_graph_from_fixture( def create_graph_from_fixture(
self, self,
report.append("=" * 80) report.append("=" * 80)


return "\n".join(report) return "\n".join(report)


@lru_cache(maxsize=32)
def _load_fixture(fixture_path: Path, fixture_name: str) -> dict[str, Any]:
"""Load a YAML fixture file with caching to avoid repeated parsing."""
if not fixture_path.exists():
raise FileNotFoundError(f"Fixture file not found: {fixture_path}")

return load_yaml_file(str(fixture_path), ignore_error=False)

+ 8
- 1
dev/basedpyright-check Dosyayı Görüntüle

SCRIPT_DIR="$(dirname "$(realpath "$0")")" SCRIPT_DIR="$(dirname "$(realpath "$0")")"
cd "$SCRIPT_DIR/.." cd "$SCRIPT_DIR/.."


# Get the path argument if provided
PATH_TO_CHECK="$1"

# run basedpyright checks # run basedpyright checks
uv run --directory api --dev basedpyright
if [ -n "$PATH_TO_CHECK" ]; then
uv run --directory api --dev basedpyright "$PATH_TO_CHECK"
else
uv run --directory api --dev basedpyright
fi

+ 1
- 1
web/app/components/app/configuration/config-var/config-modal/type-select.tsx Dosyayı Görüntüle

<InputVarTypeIcon type={selectedItem?.value as InputVarType} className='size-4 shrink-0 text-text-secondary' /> <InputVarTypeIcon type={selectedItem?.value as InputVarType} className='size-4 shrink-0 text-text-secondary' />
<span <span
className={` className={`
ml-1.5 ${!selectedItem?.name && 'text-components-input-text-placeholder'}
ml-1.5 text-components-input-text-filled ${!selectedItem?.name && 'text-components-input-text-placeholder'}
`} `}
> >
{selectedItem?.name} {selectedItem?.name}

+ 2
- 2
web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx Dosyayı Görüntüle

const [selectedCredential, setSelectedCredential] = useState<Credential & { addNewCredential?: boolean } | undefined>() const [selectedCredential, setSelectedCredential] = useState<Credential & { addNewCredential?: boolean } | undefined>()
const formRef2 = useRef<FormRefObject>(null) const formRef2 = useRef<FormRefObject>(null)
const isEditMode = !!Object.keys(formValues).filter((key) => { const isEditMode = !!Object.keys(formValues).filter((key) => {
return key !== '__model_name' && key !== '__model_type'
return key !== '__model_name' && key !== '__model_type' && !!formValues[key]
}).length && isCurrentWorkspaceManager }).length && isCurrentWorkspaceManager


const handleSave = useCallback(async () => { const handleSave = useCallback(async () => {
__authorization_name__, __authorization_name__,
...rest ...rest
} = values } = values
if (__model_name && __model_type && __authorization_name__) {
if (__model_name && __model_type) {
await handleSaveCredential({ await handleSaveCredential({
credential_id: credential?.credential_id, credential_id: credential?.credential_id,
credentials: rest, credentials: rest,

+ 2
- 2
web/app/components/header/nav/nav-selector/index.tsx Dosyayı Görüntüle

import { debounce } from 'lodash-es' import { debounce } from 'lodash-es'
import cn from '@/utils/classnames' import cn from '@/utils/classnames'
import AppIcon from '@/app/components/base/app-icon' import AppIcon from '@/app/components/base/app-icon'
import { AiText, ChatBot, CuteRobot } from '@/app/components/base/icons/src/vender/solid/communication'
import { AiText, BubbleTextMod, ChatBot, CuteRobot } from '@/app/components/base/icons/src/vender/solid/communication'
import { Route } from '@/app/components/base/icons/src/vender/solid/mapsAndTravel' import { Route } from '@/app/components/base/icons/src/vender/solid/mapsAndTravel'
import { useAppContext } from '@/context/app-context' import { useAppContext } from '@/context/app-context'
import { useStore as useAppStore } from '@/app/components/app/store' import { useStore as useAppStore } from '@/app/components/app/store'
'absolute -bottom-0.5 -right-0.5 h-3.5 w-3.5 rounded border-[0.5px] border-[rgba(0,0,0,0.02)] bg-white p-0.5 shadow-sm', 'absolute -bottom-0.5 -right-0.5 h-3.5 w-3.5 rounded border-[0.5px] border-[rgba(0,0,0,0.02)] bg-white p-0.5 shadow-sm',
)}> )}>
{nav.mode === 'advanced-chat' && ( {nav.mode === 'advanced-chat' && (
<ChatBot className='h-2.5 w-2.5 text-[#1570EF]' />
<BubbleTextMod className='h-2.5 w-2.5 text-[#1570EF]' />
)} )}
{nav.mode === 'agent-chat' && ( {nav.mode === 'agent-chat' && (
<CuteRobot className='h-2.5 w-2.5 text-indigo-600' /> <CuteRobot className='h-2.5 w-2.5 text-indigo-600' />

+ 41
- 2
web/app/components/workflow/hooks/use-nodes-interactions.ts Dosyayı Görüntüle

import { import {
genNewNodeTitleFromOld, genNewNodeTitleFromOld,
generateNewNode, generateNewNode,
getNestedNodePosition,
getNodeCustomTypeByNodeDataType, getNodeCustomTypeByNodeDataType,
getNodesConnectedSourceOrTargetHandleIdsMap, getNodesConnectedSourceOrTargetHandleIdsMap,
getTopLeftNodePosition, getTopLeftNodePosition,
}) })
newChildren.push(newIterationStartNode!) newChildren.push(newIterationStartNode!)
} }

if (nodeToPaste.data.type === BlockEnum.Loop) {
else if (nodeToPaste.data.type === BlockEnum.Loop) {
newLoopStartNode!.parentId = newNode.id; newLoopStartNode!.parentId = newNode.id;
(newNode.data as LoopNodeType).start_node_id = newLoopStartNode!.id (newNode.data as LoopNodeType).start_node_id = newLoopStartNode!.id


}) })
newChildren.push(newLoopStartNode!) newChildren.push(newLoopStartNode!)
} }
else {
// single node paste
const selectedNode = nodes.find(node => node.selected)
if (selectedNode) {
const commonNestedDisallowPasteNodes = [
// end node only can be placed outermost layer
BlockEnum.End,
]

// handle disallow paste node
if (commonNestedDisallowPasteNodes.includes(nodeToPaste.data.type))
return

// handle paste to nested block
if (selectedNode.data.type === BlockEnum.Iteration) {
newNode.data.isInIteration = true
newNode.data.iteration_id = selectedNode.data.iteration_id
newNode.parentId = selectedNode.id
newNode.positionAbsolute = {
x: newNode.position.x,
y: newNode.position.y,
}
// set position base on parent node
newNode.position = getNestedNodePosition(newNode, selectedNode)
}
else if (selectedNode.data.type === BlockEnum.Loop) {
newNode.data.isInLoop = true
newNode.data.loop_id = selectedNode.data.loop_id
newNode.parentId = selectedNode.id
newNode.positionAbsolute = {
x: newNode.position.x,
y: newNode.position.y,
}
// set position base on parent node
newNode.position = getNestedNodePosition(newNode, selectedNode)
}
}
}


nodesToPaste.push(newNode) nodesToPaste.push(newNode)


nodesToPaste.push(...newChildren) nodesToPaste.push(...newChildren)
}) })


// only handle edge when paste nested block
edges.forEach((edge) => { edges.forEach((edge) => {
const sourceId = idMapping[edge.source] const sourceId = idMapping[edge.source]
const targetId = idMapping[edge.target] const targetId = idMapping[edge.target]

+ 2
- 1
web/app/components/workflow/operator/export-image.tsx Dosyayı Görüntüle

style: { style: {
width: `${contentWidth}px`, width: `${contentWidth}px`,
height: `${contentHeight}px`, height: `${contentHeight}px`,
transform: `translate(${padding - nodesBounds.x}px, ${padding - nodesBounds.y}px) scale(${zoom})`,
transform: `translate(${padding - nodesBounds.x}px, ${padding - nodesBounds.y}px)`,
transformOrigin: 'top left',
}, },
} }



+ 7
- 0
web/app/components/workflow/utils/node.ts Dosyayı Görüntüle

} }
} }


export const getNestedNodePosition = (node: Node, parentNode: Node) => {
return {
x: node.position.x - parentNode.position.x,
y: node.position.y - parentNode.position.y,
}
}

export const hasRetryNode = (nodeType?: BlockEnum) => { export const hasRetryNode = (nodeType?: BlockEnum) => {
return nodeType === BlockEnum.LLM || nodeType === BlockEnum.Tool || nodeType === BlockEnum.HttpRequest || nodeType === BlockEnum.Code return nodeType === BlockEnum.LLM || nodeType === BlockEnum.Tool || nodeType === BlockEnum.HttpRequest || nodeType === BlockEnum.Code
} }

+ 2
- 0
web/global.d.ts Dosyayı Görüntüle

let MDXComponent: (props: any) => JSX.Element let MDXComponent: (props: any) => JSX.Element
export default MDXComponent export default MDXComponent
} }

import './types/i18n'

+ 120
- 0
web/i18n-config/check-i18n-sync.js Dosyayı Görüntüle

#!/usr/bin/env node

const fs = require('fs')
const path = require('path')
const { camelCase } = require('lodash')

// Import the NAMESPACES array from i18next-config.ts
function getNamespacesFromConfig() {
const configPath = path.join(__dirname, 'i18next-config.ts')
const configContent = fs.readFileSync(configPath, 'utf8')
// Extract NAMESPACES array using regex
const namespacesMatch = configContent.match(/const NAMESPACES = \[([\s\S]*?)\]/)
if (!namespacesMatch) {
throw new Error('Could not find NAMESPACES array in i18next-config.ts')
}
// Parse the namespaces
const namespacesStr = namespacesMatch[1]
const namespaces = namespacesStr
.split(',')
.map(line => line.trim())
.filter(line => line.startsWith("'") || line.startsWith('"'))
.map(line => line.slice(1, -1)) // Remove quotes
return namespaces
}

function getNamespacesFromTypes() {
const typesPath = path.join(__dirname, '../types/i18n.d.ts')
if (!fs.existsSync(typesPath)) {
return null
}
const typesContent = fs.readFileSync(typesPath, 'utf8')
// Extract namespaces from Messages type
const messagesMatch = typesContent.match(/export type Messages = \{([\s\S]*?)\}/)
if (!messagesMatch) {
return null
}
// Parse the properties
const propertiesStr = messagesMatch[1]
const properties = propertiesStr
.split('\n')
.map(line => line.trim())
.filter(line => line.includes(':'))
.map(line => line.split(':')[0].trim())
.filter(prop => prop.length > 0)
return properties
}

function main() {
try {
console.log('🔍 Checking i18n types synchronization...')
// Get namespaces from config
const configNamespaces = getNamespacesFromConfig()
console.log(`📦 Found ${configNamespaces.length} namespaces in config`)
// Convert to camelCase for comparison
const configCamelCase = configNamespaces.map(ns => camelCase(ns)).sort()
// Get namespaces from type definitions
const typeNamespaces = getNamespacesFromTypes()
if (!typeNamespaces) {
console.error('❌ Type definitions file not found or invalid')
console.error(' Run: pnpm run gen:i18n-types')
process.exit(1)
}
console.log(`🔧 Found ${typeNamespaces.length} namespaces in types`)
const typeCamelCase = typeNamespaces.sort()
// Compare arrays
const configSet = new Set(configCamelCase)
const typeSet = new Set(typeCamelCase)
// Find missing in types
const missingInTypes = configCamelCase.filter(ns => !typeSet.has(ns))
// Find extra in types
const extraInTypes = typeCamelCase.filter(ns => !configSet.has(ns))
let hasErrors = false
if (missingInTypes.length > 0) {
hasErrors = true
console.error('❌ Missing in type definitions:')
missingInTypes.forEach(ns => console.error(` - ${ns}`))
}
if (extraInTypes.length > 0) {
hasErrors = true
console.error('❌ Extra in type definitions:')
extraInTypes.forEach(ns => console.error(` - ${ns}`))
}
if (hasErrors) {
console.error('\n💡 To fix synchronization issues:')
console.error(' Run: pnpm run gen:i18n-types')
process.exit(1)
}
console.log('✅ i18n types are synchronized')
} catch (error) {
console.error('❌ Error:', error.message)
process.exit(1)
}
}

if (require.main === module) {
main()
}

+ 135
- 0
web/i18n-config/generate-i18n-types.js Dosyayı Görüntüle

#!/usr/bin/env node

const fs = require('fs')
const path = require('path')
const { camelCase } = require('lodash')

// Import the NAMESPACES array from i18next-config.ts
function getNamespacesFromConfig() {
const configPath = path.join(__dirname, 'i18next-config.ts')
const configContent = fs.readFileSync(configPath, 'utf8')
// Extract NAMESPACES array using regex
const namespacesMatch = configContent.match(/const NAMESPACES = \[([\s\S]*?)\]/)
if (!namespacesMatch) {
throw new Error('Could not find NAMESPACES array in i18next-config.ts')
}
// Parse the namespaces
const namespacesStr = namespacesMatch[1]
const namespaces = namespacesStr
.split(',')
.map(line => line.trim())
.filter(line => line.startsWith("'") || line.startsWith('"'))
.map(line => line.slice(1, -1)) // Remove quotes
return namespaces
}

function generateTypeDefinitions(namespaces) {
const header = `// TypeScript type definitions for Dify's i18next configuration
// This file is auto-generated. Do not edit manually.
// To regenerate, run: pnpm run gen:i18n-types
import 'react-i18next'

// Extract types from translation files using typeof import pattern`

// Generate individual type definitions
const typeDefinitions = namespaces.map(namespace => {
const typeName = camelCase(namespace).replace(/^\w/, c => c.toUpperCase()) + 'Messages'
return `type ${typeName} = typeof import('../i18n/en-US/${namespace}').default`
}).join('\n')

// Generate Messages interface
const messagesInterface = `
// Complete type structure that matches i18next-config.ts camelCase conversion
export type Messages = {
${namespaces.map(namespace => {
const camelCased = camelCase(namespace)
const typeName = camelCase(namespace).replace(/^\w/, c => c.toUpperCase()) + 'Messages'
return ` ${camelCased}: ${typeName};`
}).join('\n')}
}`

const utilityTypes = `
// Utility type to flatten nested object keys into dot notation
type FlattenKeys<T> = T extends object
? {
[K in keyof T]: T[K] extends object
? \`\${K & string}.\${FlattenKeys<T[K]> & string}\`
: \`\${K & string}\`
}[keyof T]
: never

export type ValidTranslationKeys = FlattenKeys<Messages>`

const moduleDeclarations = `
// Extend react-i18next with Dify's type structure
declare module 'react-i18next' {
interface CustomTypeOptions {
defaultNS: 'translation';
resources: {
translation: Messages;
};
}
}

// Extend i18next for complete type safety
declare module 'i18next' {
interface CustomTypeOptions {
defaultNS: 'translation';
resources: {
translation: Messages;
};
}
}`

return [header, typeDefinitions, messagesInterface, utilityTypes, moduleDeclarations].join('\n\n')
}

function main() {
const args = process.argv.slice(2)
const checkMode = args.includes('--check')
try {
console.log('📦 Generating i18n type definitions...')
// Get namespaces from config
const namespaces = getNamespacesFromConfig()
console.log(`✅ Found ${namespaces.length} namespaces`)
// Generate type definitions
const typeDefinitions = generateTypeDefinitions(namespaces)
const outputPath = path.join(__dirname, '../types/i18n.d.ts')
if (checkMode) {
// Check mode: compare with existing file
if (!fs.existsSync(outputPath)) {
console.error('❌ Type definitions file does not exist')
process.exit(1)
}
const existingContent = fs.readFileSync(outputPath, 'utf8')
if (existingContent.trim() !== typeDefinitions.trim()) {
console.error('❌ Type definitions are out of sync')
console.error(' Run: pnpm run gen:i18n-types')
process.exit(1)
}
console.log('✅ Type definitions are in sync')
} else {
// Generate mode: write file
fs.writeFileSync(outputPath, typeDefinitions)
console.log(`✅ Generated type definitions: ${outputPath}`)
}
} catch (error) {
console.error('❌ Error:', error.message)
process.exit(1)
}
}

if (require.main === module) {
main()
}

+ 10
- 10
web/i18n/id-ID/app-annotation.ts Dosyayı Görüntüle

}, },
table: { table: {
header: { header: {
answer: 'menjawab',
answer: 'Jawaban',
question: 'pertanyaan', question: 'pertanyaan',
createdAt: 'dibuat di', createdAt: 'dibuat di',
hits: 'Hits',
hits: 'Kecocokan',
addAnnotation: 'Tambahkan Anotasi', addAnnotation: 'Tambahkan Anotasi',
bulkImport: 'Impor Massal', bulkImport: 'Impor Massal',
clearAllConfirm: 'Menghapus semua anotasi?', clearAllConfirm: 'Menghapus semua anotasi?',
answerName: 'Bot Pendongeng', answerName: 'Bot Pendongeng',
}, },
addModal: { addModal: {
answerName: 'Menjawab',
answerName: 'Jawaban',
title: 'Tambahkan Anotasi Balasan', title: 'Tambahkan Anotasi Balasan',
queryName: 'Pertanyaan', queryName: 'Pertanyaan',
createNext: 'Tambahkan respons beranotasi lainnya', createNext: 'Tambahkan respons beranotasi lainnya',
run: 'Jalankan Batch', run: 'Jalankan Batch',
cancel: 'Membatalkan', cancel: 'Membatalkan',
title: 'Impor Massal', title: 'Impor Massal',
browse: 'ramban',
browse: 'Telusuri',
template: 'Unduh templat di sini', template: 'Unduh templat di sini',
tip: 'File CSV harus sesuai dengan struktur berikut:', tip: 'File CSV harus sesuai dengan struktur berikut:',
answer: 'menjawab',
answer: 'Jawaban',
contentTitle: 'konten potongan', contentTitle: 'konten potongan',
processing: 'Dalam pemrosesan batch', processing: 'Dalam pemrosesan batch',
completed: 'Impor selesai', completed: 'Impor selesai',
answerRequired: 'Jawaban diperlukan', answerRequired: 'Jawaban diperlukan',
}, },
viewModal: { viewModal: {
hit: 'Pukul',
hitHistory: 'Riwayat Hit',
noHitHistory: 'Tidak ada riwayat hit',
hit: 'Kecocokan',
hitHistory: 'Riwayat Kecocokan',
noHitHistory: 'Tidak ada riwayat kecocokan',
annotatedResponse: 'Balas Anotasi', annotatedResponse: 'Balas Anotasi',
hits: 'Hits',
hits: 'Kecocokan',
}, },
hitHistoryTable: { hitHistoryTable: {
response: 'Jawaban', response: 'Jawaban',
match: 'Korek api',
match: 'Kecocokan',
query: 'Kueri', query: 'Kueri',
source: 'Sumber', source: 'Sumber',
time: 'Waktu', time: 'Waktu',

+ 13
- 13
web/i18n/id-ID/app-api.ts Dosyayı Görüntüle

completionMode: { completionMode: {
createCompletionApi: 'Membuat Pesan Penyelesaian', createCompletionApi: 'Membuat Pesan Penyelesaian',
messageIDTip: 'ID Pesan', messageIDTip: 'ID Pesan',
messageFeedbackApi: 'Umpan balik pesan (seperti)',
ratingTip: 'suka atau tidak suka, null adalah undo',
messageFeedbackApi: 'Umpan balik pesan (mis. spam, tidak relevan, pujian)',
ratingTip: '(mis. suka/tidak suka), null berarti membatalkan penilaian',
parametersApi: 'Dapatkan informasi parameter aplikasi', parametersApi: 'Dapatkan informasi parameter aplikasi',
parametersApiTip: 'Ambil parameter Input yang dikonfigurasi, termasuk nama variabel, nama bidang, jenis, dan nilai default. Biasanya digunakan untuk menampilkan bidang ini dalam formulir atau mengisi nilai default setelah klien dimuat.', parametersApiTip: 'Ambil parameter Input yang dikonfigurasi, termasuk nama variabel, nama bidang, jenis, dan nilai default. Biasanya digunakan untuk menampilkan bidang ini dalam formulir atau mengisi nilai default setelah klien dimuat.',
info: 'Untuk pembuatan teks berkualitas tinggi, seperti artikel, ringkasan, dan terjemahan, gunakan API pesan penyelesaian dengan input pengguna. Pembuatan teks bergantung pada parameter model dan templat prompt yang ditetapkan di Dify Prompt Engineering.', info: 'Untuk pembuatan teks berkualitas tinggi, seperti artikel, ringkasan, dan terjemahan, gunakan API pesan penyelesaian dengan input pengguna. Pembuatan teks bergantung pada parameter model dan templat prompt yang ditetapkan di Dify Prompt Engineering.',
conversationsListLimitTip: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan', conversationsListLimitTip: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan',
chatMsgHistoryLimit: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan', chatMsgHistoryLimit: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan',
conversationsListFirstIdTip: 'ID rekaman terakhir di halaman saat ini, default tidak ada.', conversationsListFirstIdTip: 'ID rekaman terakhir di halaman saat ini, default tidak ada.',
messageFeedbackApi: 'Umpan balik pengguna terminal pesan, seperti',
messageFeedbackApi: 'Umpan balik pengguna terminal pesan (mis. spam, tidak relevan, pujian)',
parametersApi: 'Dapatkan informasi parameter aplikasi', parametersApi: 'Dapatkan informasi parameter aplikasi',
streaming: 'streaming kembali. Implementasi pengembalian streaming berdasarkan SSE (Server-Sent Events).', streaming: 'streaming kembali. Implementasi pengembalian streaming berdasarkan SSE (Server-Sent Events).',
inputsTips: '(Opsional) Berikan bidang input pengguna sebagai pasangan kunci-nilai, sesuai dengan variabel di Prompt Eng. Kunci adalah nama variabel, Nilai adalah nilai parameter. Jika jenis bidang adalah Pilih, Nilai yang dikirimkan harus menjadi salah satu pilihan prasetel.', inputsTips: '(Opsional) Berikan bidang input pengguna sebagai pasangan kunci-nilai, sesuai dengan variabel di Prompt Eng. Kunci adalah nama variabel, Nilai adalah nilai parameter. Jika jenis bidang adalah Pilih, Nilai yang dikirimkan harus menjadi salah satu pilihan prasetel.',
createChatApiTip: 'Buat pesan percakapan baru atau lanjutkan dialog yang ada.', createChatApiTip: 'Buat pesan percakapan baru atau lanjutkan dialog yang ada.',
chatMsgHistoryApiTip: 'Halaman pertama mengembalikan bilah \'batas\' terbaru, yang dalam urutan terbalik.', chatMsgHistoryApiTip: 'Halaman pertama mengembalikan bilah \'batas\' terbaru, yang dalam urutan terbalik.',
conversationsListApi: 'Dapatkan daftar percakapan', conversationsListApi: 'Dapatkan daftar percakapan',
ratingTip: 'suka atau tidak suka, null adalah undo',
ratingTip: '(mis. suka/tidak suka), null berarti membatalkan penilaian',
conversationRenamingApi: 'Penggantian nama percakapan', conversationRenamingApi: 'Penggantian nama percakapan',
}, },
develop: { develop: {
pathParams: 'Parameter Jalur', pathParams: 'Parameter Jalur',
requestBody: 'Isi Permintaan', requestBody: 'Isi Permintaan',
}, },
apiServer: 'API Server',
apiServer: 'Server API',
copied: 'Disalin', copied: 'Disalin',
copy: 'Menyalin',
ok: 'Dalam Layanan',
regenerate: 'Regenerasi',
status: 'Keadaan',
copy: 'Salin',
ok: 'OK',
regenerate: 'Hasilkan Ulang',
status: 'Status',
never: 'Tidak pernah', never: 'Tidak pernah',
playing: 'Bermain',
play: 'Bermain',
disabled: 'Cacat',
playing: 'Sedang Memutar',
play: 'Putar',
disabled: 'Dinonaktifkan',
apiKey: 'Kunci API', apiKey: 'Kunci API',
pause: 'Jeda', pause: 'Jeda',
loading: 'Loading',
loading: 'Memuat...',
} }


export default translation export default translation

+ 4
- 4
web/i18n/id-ID/app-debug.ts Dosyayı Görüntüle

}, },
variableTable: { variableTable: {
action: 'Tindakan', action: 'Tindakan',
typeString: 'Tali',
typeString: 'String',
optional: 'Fakultatif', optional: 'Fakultatif',
typeSelect: 'Pilih', typeSelect: 'Pilih',
type: 'Jenis Masukan', type: 'Jenis Masukan',
name: 'Audio', name: 'Audio',
}, },
document: { document: {
name: 'Surat',
name: 'Dokumen',
}, },
video: { video: {
name: 'Video', name: 'Video',
language: 'Bahasa', language: 'Bahasa',
title: 'Pengaturan Suara', title: 'Pengaturan Suara',
autoPlay: 'Putar Otomatis', autoPlay: 'Putar Otomatis',
autoPlayDisabled: 'Off',
autoPlayDisabled: 'Dinonaktifkan',
resolutionTooltip: 'Bahasa pendukung suara text-to-speech。', resolutionTooltip: 'Bahasa pendukung suara text-to-speech。',
}, },
settings: 'Pengaturan', settings: 'Pengaturan',
}, },
inputs: { inputs: {
queryPlaceholder: 'Silakan masukkan teks permintaan.', queryPlaceholder: 'Silakan masukkan teks permintaan.',
run: 'LARI',
run: 'Jalankan',
completionVarTip: 'Isi nilai variabel, yang akan secara otomatis diganti dengan kata-kata prompt setiap kali pertanyaan diajukan.', completionVarTip: 'Isi nilai variabel, yang akan secara otomatis diganti dengan kata-kata prompt setiap kali pertanyaan diajukan.',
noVar: 'Isi nilai variabel, yang akan secara otomatis diganti dalam kata prompt setiap kali sesi baru dimulai.', noVar: 'Isi nilai variabel, yang akan secara otomatis diganti dalam kata prompt setiap kali sesi baru dimulai.',
noPrompt: 'Coba tulis beberapa prompt dalam input pra-prompt', noPrompt: 'Coba tulis beberapa prompt dalam input pra-prompt',

+ 8
- 8
web/i18n/id-ID/app-log.ts Dosyayı Görüntüle

version: 'VERSI', version: 'VERSI',
time: 'Waktu yang dibuat', time: 'Waktu yang dibuat',
messageCount: 'Jumlah Pesan', messageCount: 'Jumlah Pesan',
summary: 'Titel',
adminRate: 'Tingkat Op.',
summary: 'Ringkasan',
adminRate: 'Tingkat Admin',
user: 'Pengguna Akhir atau Akun', user: 'Pengguna Akhir atau Akun',
startTime: 'WAKTU MULAI', startTime: 'WAKTU MULAI',
updatedTime: 'Waktu yang diperbarui', updatedTime: 'Waktu yang diperbarui',
runtime: 'WAKTU BERJALAN', runtime: 'WAKTU BERJALAN',
}, },
pagination: { pagination: {
previous: 'Prev',
next: 'Depan',
previous: 'Sebelumnya',
next: 'Selanjutnya',
}, },
empty: { empty: {
element: { element: {
}, },
}, },
detail: { detail: {
timeConsuming: '',
timeConsuming: 'Memakan waktu',
operation: { operation: {
dislike: 'tidak suka', dislike: 'tidak suka',
like: 'suka', like: 'suka',
addAnnotation: 'Tambahkan Peningkatan',
editAnnotation: 'Edit Peningkatan',
addAnnotation: 'Tambahkan Anotasi',
editAnnotation: 'Edit Anotasi',
annotationPlaceholder: 'Masukkan jawaban yang diharapkan yang Anda inginkan untuk dibalas AI, yang dapat digunakan untuk penyempurnaan model dan peningkatan berkelanjutan kualitas pembuatan teks di masa mendatang.', annotationPlaceholder: 'Masukkan jawaban yang diharapkan yang Anda inginkan untuk dibalas AI, yang dapat digunakan untuk penyempurnaan model dan peningkatan berkelanjutan kualitas pembuatan teks di masa mendatang.',
}, },
time: 'Waktu', time: 'Waktu',
}, },
ascending: 'Naik', ascending: 'Naik',
descending: 'Turun', descending: 'Turun',
sortBy: 'Kota hitam:',
sortBy: 'Urutkan berdasarkan',
}, },
runDetail: { runDetail: {
fileListDetail: 'Detail', fileListDetail: 'Detail',

+ 2
- 2
web/i18n/id-ID/app-overview.ts Dosyayı Görüntüle

explanation: 'Mudah diintegrasikan ke dalam aplikasi Anda', explanation: 'Mudah diintegrasikan ke dalam aplikasi Anda',
}, },
status: { status: {
disable: 'Cacat',
running: 'Dalam Layanan',
disable: 'Nonaktif',
running: 'Berjalan',
}, },
title: 'Ikhtisar', title: 'Ikhtisar',
}, },

+ 8
- 8
web/i18n/id-ID/app.ts Dosyayı Görüntüle

appCreated: 'Aplikasi dibuat', appCreated: 'Aplikasi dibuat',
appNamePlaceholder: 'Beri nama aplikasi Anda', appNamePlaceholder: 'Beri nama aplikasi Anda',
appCreateDSLErrorPart3: 'Versi DSL aplikasi saat ini:', appCreateDSLErrorPart3: 'Versi DSL aplikasi saat ini:',
Cancel: 'Membatalkan',
Cancel: 'Batal',
previewDemo: 'Pratinjau demo', previewDemo: 'Pratinjau demo',
appCreateDSLWarning: 'Perhatian: Perbedaan versi DSL dapat memengaruhi fitur tertentu', appCreateDSLWarning: 'Perhatian: Perbedaan versi DSL dapat memengaruhi fitur tertentu',
appCreateDSLErrorPart1: 'Perbedaan yang signifikan dalam versi DSL telah terdeteksi. Memaksa impor dapat menyebabkan aplikasi tidak berfungsi.', appCreateDSLErrorPart1: 'Perbedaan yang signifikan dalam versi DSL telah terdeteksi. Memaksa impor dapat menyebabkan aplikasi tidak berfungsi.',
showTemplates: 'Saya ingin memilih dari templat', showTemplates: 'Saya ingin memilih dari templat',
caution: 'Hati', caution: 'Hati',
chatbotShortDescription: 'Chatbot berbasis LLM dengan pengaturan sederhana', chatbotShortDescription: 'Chatbot berbasis LLM dengan pengaturan sederhana',
Confirm: 'Mengkonfirmasi',
Confirm: 'Konfirmasi',
agentAssistant: 'Asisten Agen Baru', agentAssistant: 'Asisten Agen Baru',
appCreateFailed: 'Gagal membuat aplikasi', appCreateFailed: 'Gagal membuat aplikasi',
appCreateDSLErrorTitle: 'Ketidakcocokan Versi', appCreateDSLErrorTitle: 'Ketidakcocokan Versi',
appTypeRequired: 'Silakan pilih jenis aplikasi', appTypeRequired: 'Silakan pilih jenis aplikasi',
advancedShortDescription: 'Alur kerja disempurnakan untuk obrolan multi-giliran', advancedShortDescription: 'Alur kerja disempurnakan untuk obrolan multi-giliran',
completeAppIntro: 'Saya ingin membuat aplikasi yang menghasilkan teks berkualitas tinggi berdasarkan petunjuk, seperti menghasilkan artikel, ringkasan, terjemahan, dan banyak lagi.', completeAppIntro: 'Saya ingin membuat aplikasi yang menghasilkan teks berkualitas tinggi berdasarkan petunjuk, seperti menghasilkan artikel, ringkasan, terjemahan, dan banyak lagi.',
Create: 'Menciptakan',
Create: 'Buat',
advancedUserDescription: 'Alur kerja dengan fitur memori tambahan dan antarmuka chatbot.', advancedUserDescription: 'Alur kerja dengan fitur memori tambahan dan antarmuka chatbot.',
dropDSLToCreateApp: 'Jatuhkan file DSL di sini untuk membuat aplikasi', dropDSLToCreateApp: 'Jatuhkan file DSL di sini untuk membuat aplikasi',
completeApp: 'Pembuat Teks', completeApp: 'Pembuat Teks',
searchAllTemplate: 'Cari semua templat...', searchAllTemplate: 'Cari semua templat...',
}, },
iconPicker: { iconPicker: {
cancel: 'Membatalkan',
cancel: 'Batal',
emoji: 'Emoji', emoji: 'Emoji',
image: 'Citra', image: 'Citra',
ok: 'OKE',
ok: 'OK',
}, },
answerIcon: { answerIcon: {
title: 'Gunakan ikon aplikasi web untuk mengganti 🤖', title: 'Gunakan ikon aplikasi web untuk mengganti 🤖',
}, },
weave: { weave: {
description: 'Weave adalah platform sumber terbuka untuk mengevaluasi, menguji, dan memantau aplikasi LLM.', description: 'Weave adalah platform sumber terbuka untuk mengevaluasi, menguji, dan memantau aplikasi LLM.',
title: 'Anyam',
title: 'Weave',
}, },
aliyun: { aliyun: {
title: 'Monitor Awan', title: 'Monitor Awan',
collapse: 'Roboh', collapse: 'Roboh',
tracing: 'Menelusuri', tracing: 'Menelusuri',
title: 'Melacak performa aplikasi', title: 'Melacak performa aplikasi',
disabled: 'Cacat',
enabled: 'Dalam Layanan',
disabled: 'Nonaktif',
enabled: 'Aktif',
config: 'Konfigurasi', config: 'Konfigurasi',
description: 'Mengonfigurasi penyedia LLMOps Pihak Ketiga dan melacak performa aplikasi.', description: 'Mengonfigurasi penyedia LLMOps Pihak Ketiga dan melacak performa aplikasi.',
inUse: 'Sedang digunakan', inUse: 'Sedang digunakan',

+ 18
- 18
web/i18n/id-ID/common.ts Dosyayı Görüntüle

const translation = { const translation = {
theme: { theme: {
theme: 'Tema', theme: 'Tema',
light: 'ringan',
auto: 'sistem',
dark: 'gelap',
light: 'Terang',
auto: 'Otomatis',
dark: 'Gelap',
}, },
api: { api: {
success: 'Keberhasilan', success: 'Keberhasilan',
setup: 'Setup', setup: 'Setup',
download: 'Mengunduh', download: 'Mengunduh',
getForFree: 'Dapatkan gratis', getForFree: 'Dapatkan gratis',
reload: 'Reload',
lineBreak: 'Istirahat baris',
reload: 'Muat Ulang',
lineBreak: 'Baris Baru',
learnMore: 'Pelajari lebih lanjut', learnMore: 'Pelajari lebih lanjut',
saveAndRegenerate: 'Simpan & Buat Ulang Potongan Anak', saveAndRegenerate: 'Simpan & Buat Ulang Potongan Anak',
zoomOut: 'Perkecil', zoomOut: 'Perkecil',
selectAll: 'Pilih Semua', selectAll: 'Pilih Semua',
in: 'di', in: 'di',
skip: 'Lewat', skip: 'Lewat',
remove: 'Buka',
remove: 'Hapus',
rename: 'Ubah nama', rename: 'Ubah nama',
close: 'Tutup', close: 'Tutup',
ok: 'OKE', ok: 'OKE',
log: 'Batang', log: 'Batang',
delete: 'Menghapus', delete: 'Menghapus',
viewDetails: 'Lihat Detail', viewDetails: 'Lihat Detail',
view: 'Melihat',
clear: 'Jelas',
view: 'Lihat',
clear: 'Hapus',
deleteApp: 'Hapus Aplikasi', deleteApp: 'Hapus Aplikasi',
downloadSuccess: 'Unduh Selesai.', downloadSuccess: 'Unduh Selesai.',
change: 'Ubah', change: 'Ubah',
copied: 'Disalin', copied: 'Disalin',
deSelectAll: 'Batalkan pilihan Semua', deSelectAll: 'Batalkan pilihan Semua',
saveAndEnable: 'Simpan & Aktifkan', saveAndEnable: 'Simpan & Aktifkan',
refresh: 'Restart',
refresh: 'Segarkan',
downloadFailed: 'Unduhan gagal. Silakan coba lagi nanti.', downloadFailed: 'Unduhan gagal. Silakan coba lagi nanti.',
edit: 'Mengedit', edit: 'Mengedit',
send: 'Kirim', send: 'Kirim',
add: 'Tambah', add: 'Tambah',
copy: 'Menyalin', copy: 'Menyalin',
audioSourceUnavailable: 'AudioSource tidak tersedia', audioSourceUnavailable: 'AudioSource tidak tersedia',
submit: 'Tunduk',
submit: 'Kirim',
duplicate: 'Duplikat', duplicate: 'Duplikat',
save: 'Simpan', save: 'Simpan',
added: 'Ditambahkan', added: 'Ditambahkan',
}, },
}, },
unit: { unit: {
char: 'Tank',
char: 'karakter',
}, },
actionMsg: { actionMsg: {
noModification: 'Tidak ada modifikasi saat ini.', noModification: 'Tidak ada modifikasi saat ini.',
account: 'Rekening', account: 'Rekening',
newApp: 'Aplikasi Baru', newApp: 'Aplikasi Baru',
explore: 'Menjelajahi', explore: 'Menjelajahi',
apps: 'Belajar',
apps: 'Aplikasi',
status: 'beta', status: 'beta',
tools: 'Perkakas', tools: 'Perkakas',
exploreMarketplace: 'Jelajahi Marketplace', exploreMarketplace: 'Jelajahi Marketplace',
settings: 'Pengaturan', settings: 'Pengaturan',
support: 'Dukung', support: 'Dukung',
github: 'GitHub', github: 'GitHub',
about: 'Sekitar',
about: 'Tentang',
workspace: 'Workspace', workspace: 'Workspace',
createWorkspace: 'Membuat Ruang Kerja', createWorkspace: 'Membuat Ruang Kerja',
}, },
}, },
integratedAlert: 'Notion terintegrasi melalui kredensial internal, tidak perlu mengotorisasi ulang.', integratedAlert: 'Notion terintegrasi melalui kredensial internal, tidak perlu mengotorisasi ulang.',
disconnected: 'Terputus', disconnected: 'Terputus',
remove: 'Buka',
remove: 'Hapus',
addWorkspace: 'Menambahkan ruang kerja', addWorkspace: 'Menambahkan ruang kerja',
description: 'Menggunakan Notion sebagai sumber data untuk Pengetahuan.', description: 'Menggunakan Notion sebagai sumber data untuk Pengetahuan.',
connected: 'Terhubung', connected: 'Terhubung',
pagesAuthorized: 'Halaman yang disahkan', pagesAuthorized: 'Halaman yang disahkan',
changeAuthorizedPages: 'Mengubah halaman resmi', changeAuthorizedPages: 'Mengubah halaman resmi',
title: 'Gagasan',
title: 'Notion',
sync: 'Sync', sync: 'Sync',
connectedWorkspace: 'Ruang kerja yang terhubung', connectedWorkspace: 'Ruang kerja yang terhubung',
}, },
'claude-2': 'Claude-2', 'claude-2': 'Claude-2',
'gpt-3.5-turbo': 'GPT-3.5-Turbo', 'gpt-3.5-turbo': 'GPT-3.5-Turbo',
'gpt-4': 'GPT-4', 'gpt-4': 'GPT-4',
'whisper-1': 'Bisikan-1',
'whisper-1': 'Whisper-1',
'text-davinci-003': 'Teks-Davinci-003', 'text-davinci-003': 'Teks-Davinci-003',
'gpt-4-32k': 'GPT-4-32K', 'gpt-4-32k': 'GPT-4-32K',
'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K',
}, },
resend: 'Kirim Ulang', resend: 'Kirim Ulang',
conversationName: 'Nama percakapan', conversationName: 'Nama percakapan',
thinking: 'Pikiran...',
thinking: 'Sedang berpikir...',
conversationNameCanNotEmpty: 'Nama percakapan diperlukan', conversationNameCanNotEmpty: 'Nama percakapan diperlukan',
thought: 'Pikiran', thought: 'Pikiran',
renameConversation: 'Ganti Nama Percakapan', renameConversation: 'Ganti Nama Percakapan',
deleteDescription: 'Apakah Anda yakin ingin menghapus gambar profil Anda? Akun Anda akan menggunakan avatar awal default.', deleteDescription: 'Apakah Anda yakin ingin menghapus gambar profil Anda? Akun Anda akan menggunakan avatar awal default.',
}, },
imageInput: { imageInput: {
browse: 'ramban',
browse: 'Telusuri',
supportedFormats: 'Mendukung PNG, JPG, JPEG, WEBP dan GIF', supportedFormats: 'Mendukung PNG, JPG, JPEG, WEBP dan GIF',
dropImageHere: 'Letakkan gambar Anda di sini, atau', dropImageHere: 'Letakkan gambar Anda di sini, atau',
}, },

+ 9
- 9
web/i18n/id-ID/custom.ts Dosyayı Görüntüle

}, },
webapp: { webapp: {
changeLogoTip: 'Format SVG atau PNG dengan ukuran minimum 40x40px', changeLogoTip: 'Format SVG atau PNG dengan ukuran minimum 40x40px',
removeBrand: 'Hapus Didukung oleh Dify',
changeLogo: 'Perubahan Didukung oleh Citra Merek',
title: 'Sesuaikan merek aplikasi web',
removeBrand: 'Hapus Branding Dify',
changeLogo: 'Ubah Logo Merek',
title: 'Kustomisasi Branding Aplikasi Web',
}, },
app: { app: {
title: 'Menyesuaikan merek header aplikasi',
title: 'Kustomisasi Branding Header Aplikasi',
changeLogoTip: 'Format SVG atau PNG dengan ukuran minimal 80x80px', changeLogoTip: 'Format SVG atau PNG dengan ukuran minimal 80x80px',
}, },
customize: { customize: {
suffix: 'untuk meningkatkan ke edisi Enterprise.',
prefix: 'Untuk menyesuaikan logo merek di dalam aplikasi, silakan',
contactUs: 'Hubungi',
suffix: 'untuk upgrade ke edisi Enterprise.',
prefix: 'Untuk kustomisasi logo merek di dalam aplikasi, silakan',
contactUs: 'Hubungi Kami',
}, },
custom: 'Kustomisasi', custom: 'Kustomisasi',
uploading: 'Meng',
uploading: 'Mengunggah...',
upload: 'Unggah', upload: 'Unggah',
change: 'Ubah', change: 'Ubah',
restore: 'Pulihkan Default', restore: 'Pulihkan Default',
apply: 'Berlaku',
apply: 'Terapkan',
uploadedFail: 'Unggahan gambar gagal, silakan unggah ulang.', uploadedFail: 'Unggahan gambar gagal, silakan unggah ulang.',
} }



+ 10
- 10
web/i18n/id-ID/dataset-creation.ts Dosyayı Görüntüle

tip: 'Pengetahuan kosong tidak akan berisi dokumen, dan Anda dapat mengunggah dokumen kapan saja.', tip: 'Pengetahuan kosong tidak akan berisi dokumen, dan Anda dapat mengunggah dokumen kapan saja.',
}, },
website: { website: {
configure: 'Mengkonfigurasi',
configure: 'Konfigurasikan',
fireCrawlNotConfigured: 'Firecrawl tidak dikonfigurasi', fireCrawlNotConfigured: 'Firecrawl tidak dikonfigurasi',
chooseProvider: 'Pilih penyedia', chooseProvider: 'Pilih penyedia',
configureFirecrawl: 'Mengonfigurasi Firecrawl',
configureFirecrawl: 'Konfigurasikan Firecrawl',
watercrawlDoc: 'Dokumen Watercrawl', watercrawlDoc: 'Dokumen Watercrawl',
options: 'Pilihan', options: 'Pilihan',
firecrawlTitle: 'Mengekstrak konten web dengan 🔥Firecrawl', firecrawlTitle: 'Mengekstrak konten web dengan 🔥Firecrawl',
jinaReaderNotConfigured: 'Jina Reader tidak dikonfigurasi', jinaReaderNotConfigured: 'Jina Reader tidak dikonfigurasi',
preview: 'Pratayang', preview: 'Pratayang',
resetAll: 'Atur Ulang Semua', resetAll: 'Atur Ulang Semua',
run: 'Lari',
run: 'Jalankan',
limit: 'Batas', limit: 'Batas',
useSitemap: 'Menggunakan peta situs', useSitemap: 'Menggunakan peta situs',
jinaReaderDoc: 'Pelajari lebih lanjut tentang Jina Reader', jinaReaderDoc: 'Pelajari lebih lanjut tentang Jina Reader',
maxDepth: 'Kedalaman maks', maxDepth: 'Kedalaman maks',
jinaReaderDocLink: 'https://jina.ai/reader', jinaReaderDocLink: 'https://jina.ai/reader',
selectAll: 'Pilih Semua', selectAll: 'Pilih Semua',
maxDepthTooltip: 'Kedalaman maksimum untuk di-crawl relatif terhadap URL yang dimasukkan. Kedalaman 0 hanya mengikis halaman url yang dimasukkan, kedalaman 1 mengikis url dan semuanya setelah dimasukkanURL satu /, dan seterusnya.',
maxDepthTooltip: 'Kedalaman maksimum untuk di-crawl relatif terhadap URL yang dimasukkan. Kedalaman 0 hanya mengikis halaman url yang dimasukkan, kedalaman 1 mengikis url dan semuanya setelah dimasukkan URL satu /, dan seterusnya.',
waterCrawlNotConfiguredDescription: 'Konfigurasikan Watercrawl dengan kunci API untuk menggunakannya.', waterCrawlNotConfiguredDescription: 'Konfigurasikan Watercrawl dengan kunci API untuk menggunakannya.',
firecrawlDoc: 'Dokumen Firecrawl', firecrawlDoc: 'Dokumen Firecrawl',
configureWatercrawl: 'Mengonfigurasi Watercrawl',
configureWatercrawl: 'Konfigurasikan Watercrawl',
}, },
pagePreview: 'Pratinjau Halaman', pagePreview: 'Pratinjau Halaman',
notionSyncTitle: 'Gagasan tidak terhubung',
notionSyncTitle: 'Notion tidak terhubung',
filePreview: 'Pratinjau File', filePreview: 'Pratinjau File',
cancel: 'Membatalkan', cancel: 'Membatalkan',
emptyDatasetCreation: 'Saya ingin membuat Pengetahuan kosong', emptyDatasetCreation: 'Saya ingin membuat Pengetahuan kosong',
button: 'Depan',
button: 'Berikutnya',
notionSyncTip: 'Untuk menyinkronkan dengan Notion, koneksi ke Notion harus dibuat terlebih dahulu.', notionSyncTip: 'Untuk menyinkronkan dengan Notion, koneksi ke Notion harus dibuat terlebih dahulu.',
connect: 'Buka terhubung',
connect: 'Hubungkan',
}, },
stepTwo: { stepTwo: {
paragraph: 'Paragraf', paragraph: 'Paragraf',
previewChunkTip: 'Klik tombol \'Pratinjau Potongan\' di sebelah kiri untuk memuat pratinjau', previewChunkTip: 'Klik tombol \'Pratinjau Potongan\' di sebelah kiri untuk memuat pratinjau',
sideTipP4: 'Potongan dan pembersihan yang tepat meningkatkan kinerja model, memberikan hasil yang lebih akurat dan berharga.', sideTipP4: 'Potongan dan pembersihan yang tepat meningkatkan kinerja model, memberikan hasil yang lebih akurat dan berharga.',
previewTitleButton: 'Pratayang', previewTitleButton: 'Pratayang',
switch: 'Sakelar',
switch: 'Beralih',
datasetSettingLink: 'Pengaturan pengetahuan.', datasetSettingLink: 'Pengaturan pengetahuan.',
rules: 'Aturan Pra-pemrosesan Teks', rules: 'Aturan Pra-pemrosesan Teks',
sideTipP2: 'Segmentasi membagi teks panjang menjadi paragraf sehingga model dapat memahami dengan lebih baik. Ini meningkatkan kualitas dan relevansi hasil model.', sideTipP2: 'Segmentasi membagi teks panjang menjadi paragraf sehingga model dapat memahami dengan lebih baik. Ini meningkatkan kualitas dan relevansi hasil model.',
resume: 'Melanjutkan pemrosesan', resume: 'Melanjutkan pemrosesan',
stop: 'Hentikan pemrosesan', stop: 'Hentikan pemrosesan',
creationContent: 'Kami secara otomatis menamai Pengetahuan, Anda dapat memodifikasinya kapan saja.', creationContent: 'Kami secara otomatis menamai Pengetahuan, Anda dapat memodifikasinya kapan saja.',
modelButtonConfirm: 'Mengkonfirmasi',
modelButtonConfirm: 'Konfirmasi',
sideTipContent: 'Setelah dokumen selesai diindeks, Pengetahuan dapat diintegrasikan ke dalam aplikasi sebagai konteks, Anda dapat menemukan pengaturan konteks di halaman orkestrasi perintah. Anda juga dapat membuatnya sebagai plugin pengindeksan ChatGPT independen untuk dirilis.', sideTipContent: 'Setelah dokumen selesai diindeks, Pengetahuan dapat diintegrasikan ke dalam aplikasi sebagai konteks, Anda dapat menemukan pengaturan konteks di halaman orkestrasi perintah. Anda juga dapat membuatnya sebagai plugin pengindeksan ChatGPT independen untuk dirilis.',
modelButtonCancel: 'Membatalkan', modelButtonCancel: 'Membatalkan',
label: 'Nama pengetahuan', label: 'Nama pengetahuan',

+ 2
- 2
web/i18n/id-ID/dataset-hit-testing.ts Dosyayı Görüntüle

countWarning: 'Hingga 200 karakter.', countWarning: 'Hingga 200 karakter.',
placeholder: 'Silakan masukkan teks, disarankan untuk memasukkan kalimat deklaratif singkat.', placeholder: 'Silakan masukkan teks, disarankan untuk memasukkan kalimat deklaratif singkat.',
indexWarning: 'Pengetahuan berkualitas tinggi saja.', indexWarning: 'Pengetahuan berkualitas tinggi saja.',
testing: 'Ujian',
testing: 'Pengujian',
}, },
hit: { hit: {
emptyTip: 'Hasil Pengujian Pengambilan akan ditampilkan di sini', emptyTip: 'Hasil Pengujian Pengambilan akan ditampilkan di sini',
open: 'Buka', open: 'Buka',
settingTitle: 'Pengaturan Pengambilan', settingTitle: 'Pengaturan Pengambilan',
dateTimeFormat: 'MM / DD / YYYY hh: mm A', dateTimeFormat: 'MM / DD / YYYY hh: mm A',
desc: 'Uji efek pukulan Pengetahuan berdasarkan teks kueri yang diberikan.',
desc: 'Uji dampak pengetahuan terhadap hasil pencarian berdasarkan teks kueri yang diberikan.',
viewDetail: 'Lihat Detail', viewDetail: 'Lihat Detail',
viewChart: 'Lihat GRAFIK VAKTOR', viewChart: 'Lihat GRAFIK VAKTOR',
chunkDetail: 'Detail Potongan', chunkDetail: 'Detail Potongan',

+ 1
- 1
web/i18n/id-ID/dataset-settings.ts Dosyayı Görüntüle

retrievalSetting: { retrievalSetting: {
title: 'Pengaturan Pengambilan', title: 'Pengaturan Pengambilan',
description: 'tentang metode pengambilan.', description: 'tentang metode pengambilan.',
longDescription: 'tentang metode pengambilan, Anda dapat mengudagnya kapan saja di pengaturan Pengetahuan.',
longDescription: 'tentang metode pengambilan, Anda dapat mengunduhnya kapan saja di pengaturan Pengetahuan.',
method: 'Metode Pengambilan', method: 'Metode Pengambilan',
learnMore: 'Pelajari lebih lanjut', learnMore: 'Pelajari lebih lanjut',
}, },

+ 2
- 2
web/i18n/id-ID/dataset.ts Dosyayı Görüntüle

search: 'Metadata pencarian', search: 'Metadata pencarian',
}, },
datasetMetadata: { datasetMetadata: {
disabled: 'Cacat',
disabled: 'Nonaktif',
addMetaData: 'Tambahkan Metadata', addMetaData: 'Tambahkan Metadata',
description: 'Anda dapat mengelola semua metadata dalam pengetahuan ini di sini. Modifikasi akan disinkronkan ke setiap dokumen.', description: 'Anda dapat mengelola semua metadata dalam pengetahuan ini di sini. Modifikasi akan disinkronkan ke setiap dokumen.',
deleteTitle: 'Konfirmasi untuk menghapus', deleteTitle: 'Konfirmasi untuk menghapus',
rename: 'Ubah nama', rename: 'Ubah nama',
builtInDescription: 'Metadata bawaan secara otomatis diekstrak dan dihasilkan. Itu harus diaktifkan sebelum digunakan dan tidak dapat diedit.', builtInDescription: 'Metadata bawaan secara otomatis diekstrak dan dihasilkan. Itu harus diaktifkan sebelum digunakan dan tidak dapat diedit.',
namePlaceholder: 'Nama metadata', namePlaceholder: 'Nama metadata',
builtIn: 'Built-in',
builtIn: 'Bawaan',
}, },
documentMetadata: { documentMetadata: {
metadataToolTip: 'Metadata berfungsi sebagai filter penting yang meningkatkan akurasi dan relevansi pengambilan informasi. Anda dapat memodifikasi dan menambahkan metadata untuk dokumen ini di sini.', metadataToolTip: 'Metadata berfungsi sebagai filter penting yang meningkatkan akurasi dan relevansi pengambilan informasi. Anda dapat memodifikasi dan menambahkan metadata untuk dokumen ini di sini.',

+ 1
- 1
web/i18n/id-ID/education.ts Dosyayı Görüntüle

}, },
dateFormat: 'MM / DD / YYYY', dateFormat: 'MM / DD / YYYY',
}, },
submit: 'Tunduk',
submit: 'Kirim',
toVerified: 'Dapatkan Pendidikan Terverifikasi', toVerified: 'Dapatkan Pendidikan Terverifikasi',
currentSigned: 'SAAT INI MASUK SEBAGAI', currentSigned: 'SAAT INI MASUK SEBAGAI',
successTitle: 'Anda telah mendapatkan Dify Education Verified', successTitle: 'Anda telah mendapatkan Dify Education Verified',

+ 4
- 4
web/i18n/id-ID/explore.ts Dosyayı Görüntüle

const translation = { const translation = {
sidebar: { sidebar: {
action: { action: {
unpin: 'Lepaskan pin',
pin: 'Pin',
delete: 'Menghapus',
rename: 'Ubah nama',
unpin: 'Lepaskan sematan',
pin: 'Sematkan',
delete: 'Hapus',
rename: 'Ganti nama',
}, },
delete: { delete: {
content: 'Apakah Anda yakin ingin menghapus aplikasi ini?', content: 'Apakah Anda yakin ingin menghapus aplikasi ini?',

+ 13
- 13
web/i18n/id-ID/login.ts Dosyayı Görüntüle

continueWithCode: 'Lanjutkan dengan kode', continueWithCode: 'Lanjutkan dengan kode',
sendVerificationCode: 'Kirim Kode Verifikasi', sendVerificationCode: 'Kirim Kode Verifikasi',
invalidInvitationCode: 'Kode undangan tidak valid', invalidInvitationCode: 'Kode undangan tidak valid',
installBtn: 'Mengatur',
joinTipStart: 'Mengundang Anda bergabung',
installBtn: 'Siapkan',
joinTipStart: 'Mengundang Anda untuk bergabung',
or: 'ATAU', or: 'ATAU',
namePlaceholder: 'Nama pengguna Anda', namePlaceholder: 'Nama pengguna Anda',
withSSO: 'Lanjutkan dengan SSO', withSSO: 'Lanjutkan dengan SSO',
invitationCodePlaceholder: 'Kode undangan Anda', invitationCodePlaceholder: 'Kode undangan Anda',
emailPlaceholder: 'Email Anda', emailPlaceholder: 'Email Anda',
tos: 'Ketentuan Layanan', tos: 'Ketentuan Layanan',
go: 'Pergi ke Dify',
go: 'Buka Dify',
forgotPassword: 'Lupa Kata Sandi Anda?', forgotPassword: 'Lupa Kata Sandi Anda?',
sendUsMail: 'Kirimkan perkenalan Anda melalui email kepada kami, dan kami akan menangani permintaan undangan.', sendUsMail: 'Kirimkan perkenalan Anda melalui email kepada kami, dan kami akan menangani permintaan undangan.',
pp: 'Kebijakan Privasi', pp: 'Kebijakan Privasi',
activatedTipEnd: 'tim', activatedTipEnd: 'tim',
backToSignIn: 'Kembali untuk login',
backToSignIn: 'Kembali ke halaman masuk',
passwordChanged: 'Masuk sekarang', passwordChanged: 'Masuk sekarang',
withGitHub: 'Lanjutkan dengan GitHub', withGitHub: 'Lanjutkan dengan GitHub',
accountAlreadyInited: 'Akun sudah diinisialisasi', accountAlreadyInited: 'Akun sudah diinisialisasi',
withGoogle: 'Lanjutkan dengan Google', withGoogle: 'Lanjutkan dengan Google',
rightDesc: 'Bangun aplikasi AI yang menawan secara visual, dapat dioperasikan, dan ditingkatkan dengan mudah.',
rightDesc: 'Bangun aplikasi AI yang menarik secara visual, mudah dioperasikan, dan mudah diskalakan.',
invitationCode: 'Kode Undangan', invitationCode: 'Kode Undangan',
invalidToken: 'Token tidak valid atau kedaluwarsa', invalidToken: 'Token tidak valid atau kedaluwarsa',
setAdminAccount: 'Menyiapkan akun admin', setAdminAccount: 'Menyiapkan akun admin',
forgotPasswordDesc: 'Silakan masukkan alamat email Anda untuk mengatur ulang kata sandi Anda. Kami akan mengirimi Anda email dengan instruksi tentang cara mengatur ulang kata sandi Anda.', forgotPasswordDesc: 'Silakan masukkan alamat email Anda untuk mengatur ulang kata sandi Anda. Kami akan mengirimi Anda email dengan instruksi tentang cara mengatur ulang kata sandi Anda.',
confirmPassword: 'Konfirmasi Kata Sandi', confirmPassword: 'Konfirmasi Kata Sandi',
changePasswordBtn: 'Menetapkan kata sandi',
changePasswordBtn: 'Tetapkan kata sandi',
resetPassword: 'Atur Ulang Kata Sandi', resetPassword: 'Atur Ulang Kata Sandi',
explore: 'Jelajahi Dify', explore: 'Jelajahi Dify',
useVerificationCode: 'Gunakan Kode Verifikasi', useVerificationCode: 'Gunakan Kode Verifikasi',
licenseLost: 'Lisensi Hilang', licenseLost: 'Lisensi Hilang',
licenseInactive: 'Lisensi Tidak Aktif', licenseInactive: 'Lisensi Tidak Aktif',
enterYourName: 'Silakan masukkan nama pengguna Anda', enterYourName: 'Silakan masukkan nama pengguna Anda',
back: 'Belakang',
back: 'Kembali',
activated: 'Masuk sekarang', activated: 'Masuk sekarang',
goToInit: 'Jika Anda belum menginisialisasi akun, silakan buka halaman inisialisasi', goToInit: 'Jika Anda belum menginisialisasi akun, silakan buka halaman inisialisasi',
licenseExpired: 'Lisensi Kedaluwarsa', licenseExpired: 'Lisensi Kedaluwarsa',
validate: 'Memvalidasi', validate: 'Memvalidasi',
resetPasswordDesc: 'Ketik email yang Anda gunakan untuk mendaftar di Dify dan kami akan mengirimkan email reset kata sandi kepada Anda.', resetPasswordDesc: 'Ketik email yang Anda gunakan untuk mendaftar di Dify dan kami akan mengirimkan email reset kata sandi kepada Anda.',
licenseLostTip: 'Gagal menghubungkan server lisensi Dify. Hubungi administrator Anda untuk terus menggunakan Dify.', licenseLostTip: 'Gagal menghubungkan server lisensi Dify. Hubungi administrator Anda untuk terus menggunakan Dify.',
signBtn: 'Tandatangan',
signBtn: 'Masuk',
sendResetLink: 'Kirim tautan reset', sendResetLink: 'Kirim tautan reset',
createAndSignIn: 'Membuat dan masuk',
createAndSignIn: 'Buat dan masuk',
licenseExpiredTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda telah kedaluwarsa. Hubungi administrator Anda untuk terus menggunakan Dify.', licenseExpiredTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda telah kedaluwarsa. Hubungi administrator Anda untuk terus menggunakan Dify.',
email: 'Alamat email', email: 'Alamat email',
noLoginMethodTip: 'Silakan hubungi admin sistem untuk menambahkan metode autentikasi.', noLoginMethodTip: 'Silakan hubungi admin sistem untuk menambahkan metode autentikasi.',
licenseInactiveTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda tidak aktif. Hubungi administrator Anda untuk terus menggunakan Dify.', licenseInactiveTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda tidak aktif. Hubungi administrator Anda untuk terus menggunakan Dify.',
rightTitle: 'Buka potensi penuh LLM', rightTitle: 'Buka potensi penuh LLM',
welcome: '👋 Selamat datang di Dify, silakan login untuk melanjutkan.', welcome: '👋 Selamat datang di Dify, silakan login untuk melanjutkan.',
changePassword: 'Menetapkan kata sandi',
setAdminAccountDesc: 'Hak istimewa maksimum untuk akun admin, yang dapat digunakan untuk membuat aplikasi dan mengelola penyedia LLM, dll.',
join: 'Ikat',
changePassword: 'Ubah kata sandi',
setAdminAccountDesc: 'Akun admin memiliki hak istimewa penuh untuk membuat aplikasi, mengelola penyedia LLM, dll.',
join: 'Gabung',
forget: 'Lupa Kata Sandi Anda?', forget: 'Lupa Kata Sandi Anda?',
backToLogin: 'Kembali ke login',
backToLogin: 'Kembali ke halaman masuk',
oneMoreStep: 'Satu langkah lagi', oneMoreStep: 'Satu langkah lagi',
} }



+ 5
- 5
web/i18n/id-ID/oauth.ts Dosyayı Görüntüle

authorizeFailed: 'Otorisasi gagal', authorizeFailed: 'Otorisasi gagal',
authAppInfoFetchFailed: 'Gagal mengambil info aplikasi untuk otorisasi', authAppInfoFetchFailed: 'Gagal mengambil info aplikasi untuk otorisasi',
}, },
continue: 'Terus',
unknownApp: 'Aplikasi Tidak Dikenal',
login: 'Login',
connect: 'Hubungkan ke',
switchAccount: 'Beralih Akun',
continue: 'Lanjut',
unknownApp: 'Aplikasi tidak dikenal',
login: 'Masuk',
connect: 'Hubungkan',
switchAccount: 'Ganti Akun',
} }


export default translation export default translation

+ 5
- 5
web/i18n/id-ID/plugin.ts Dosyayı Görüntüle

local: 'Plugin Lokal', local: 'Plugin Lokal',
}, },
operation: { operation: {
remove: 'Buka',
remove: 'Hapus',
info: 'Plugin Info', info: 'Plugin Info',
update: 'Pemutakhiran', update: 'Pemutakhiran',
detail: 'Rincian', detail: 'Rincian',
empty: 'Klik tombol \' \' untuk menambahkan alat. Anda dapat menambahkan beberapa alat.', empty: 'Klik tombol \' \' untuk menambahkan alat. Anda dapat menambahkan beberapa alat.',
params: 'KONFIGURASI PENALARAN', params: 'KONFIGURASI PENALARAN',
unsupportedMCPTool: 'Saat ini versi plugin strategi agen yang dipilih tidak mendukung alat MCP.', unsupportedMCPTool: 'Saat ini versi plugin strategi agen yang dipilih tidak mendukung alat MCP.',
auto: 'Mobil',
auto: 'Otomatis',
descriptionPlaceholder: 'Deskripsi singkat tentang tujuan alat, misalnya, mendapatkan suhu untuk lokasi tertentu.', descriptionPlaceholder: 'Deskripsi singkat tentang tujuan alat, misalnya, mendapatkan suhu untuk lokasi tertentu.',
toolSetting: 'Pengaturan Alat', toolSetting: 'Pengaturan Alat',
settings: 'PENGATURAN PENGGUNA', settings: 'PENGATURAN PENGGUNA',
installing: 'Menginstal...', installing: 'Menginstal...',
uploadFailed: 'Upload gagal', uploadFailed: 'Upload gagal',
pluginLoadErrorDesc: 'Plugin ini tidak akan diinstal', pluginLoadErrorDesc: 'Plugin ini tidak akan diinstal',
next: 'Depan',
next: 'Lanjut',
installedSuccessfully: 'Instalasi berhasil', installedSuccessfully: 'Instalasi berhasil',
install: 'Pasang', install: 'Pasang',
installFailed: 'Instalasi gagal', installFailed: 'Instalasi gagal',
back: 'Belakang',
back: 'Kembali',
readyToInstallPackage: 'Tentang menginstal plugin berikut', readyToInstallPackage: 'Tentang menginstal plugin berikut',
installedSuccessfullyDesc: 'Plugin telah berhasil diinstal.', installedSuccessfullyDesc: 'Plugin telah berhasil diinstal.',
pluginLoadError: 'Kesalahan pemuatan plugin', pluginLoadError: 'Kesalahan pemuatan plugin',
empower: 'Berdayakan pengembangan AI Anda', empower: 'Berdayakan pengembangan AI Anda',
partnerTip: 'Diverifikasi oleh partner Dify', partnerTip: 'Diverifikasi oleh partner Dify',
moreFrom: 'Selengkapnya dari Marketplace', moreFrom: 'Selengkapnya dari Marketplace',
sortBy: 'Kota hitam',
sortBy: 'Urutkan berdasarkan',
and: 'dan', and: 'dan',
difyMarketplace: 'Dify Marketplace', difyMarketplace: 'Dify Marketplace',
verifiedTip: 'Diverifikasi oleh Dify', verifiedTip: 'Diverifikasi oleh Dify',

+ 8
- 8
web/i18n/id-ID/time.ts Dosyayı Görüntüle

const translation = { const translation = {
daysInWeek: { daysInWeek: {
Wed: 'Bertaruh',
Wed: 'Rabu',
Thu: 'Kamis', Thu: 'Kamis',
Sun: 'Matahari',
Tue: 'Membunuh',
Mon: 'Mon',
Sat: 'Hari sabtu',
Fri: 'Bebas',
Sun: 'Minggu',
Tue: 'Selasa',
Mon: 'Senin',
Sat: 'Sabtu',
Fri: 'Jumat',
}, },
months: { months: {
August: 'Agustus', August: 'Agustus',
October: 'Oktober', October: 'Oktober',
May: 'Menjahit',
May: 'Mei',
September: 'September', September: 'September',
December: 'Desember', December: 'Desember',
November: 'November', November: 'November',
dateFormats: { dateFormats: {
display: 'MMMM D, YYYY', display: 'MMMM D, YYYY',
input: 'YYYY-MM-DD', input: 'YYYY-MM-DD',
outputWithTime: 'YYYY-MM-DDTHH:mm:ss. SSSZ',
outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ',
output: 'YYYY-MM-DD', output: 'YYYY-MM-DD',
displayWithTime: 'MMMM D, YYYY hh:mm A', displayWithTime: 'MMMM D, YYYY hh:mm A',
}, },

+ 13
- 13
web/i18n/id-ID/workflow.ts Dosyayı Görüntüle

title: 'Tarik', title: 'Tarik',
}, },
}, },
undo: 'Buka',
undo: 'Urungkan',
embedIntoSite: 'Sematkan ke Situs', embedIntoSite: 'Sematkan ke Situs',
editing: 'Mengedit', editing: 'Mengedit',
inRunMode: 'Dalam Mode Jalankan', inRunMode: 'Dalam Mode Jalankan',
addParallelNode: 'Tambahkan Node Paralel', addParallelNode: 'Tambahkan Node Paralel',
onFailure: 'Pada Kegagalan', onFailure: 'Pada Kegagalan',
update: 'Pemutakhiran', update: 'Pemutakhiran',
parallelRun: 'Lari Paralel',
configure: 'Mengkonfigurasi',
parallelRun: 'Jalankan Paralel',
configure: 'Konfigurasikan',
copy: 'Menyalin', copy: 'Menyalin',
redo: 'Siap',
redo: 'Ulangi',
runApp: 'Jalankan Aplikasi', runApp: 'Jalankan Aplikasi',
noHistory: 'Tidak Ada Sejarah', noHistory: 'Tidak Ada Sejarah',
importDSLTip: 'Draf saat ini akan ditimpa.\nEkspor alur kerja sebagai cadangan sebelum mengimpor.', importDSLTip: 'Draf saat ini akan ditimpa.\nEkspor alur kerja sebagai cadangan sebelum mengimpor.',
importSuccess: 'Berhasil Impor', importSuccess: 'Berhasil Impor',
jumpToNode: 'Lompat ke simpul ini', jumpToNode: 'Lompat ke simpul ini',
tagBound: 'Jumlah aplikasi yang menggunakan tag ini', tagBound: 'Jumlah aplikasi yang menggunakan tag ini',
model: 'Pola',
model: 'Model',
workflowAsToolTip: 'Konfigurasi ulang alat diperlukan setelah pembaruan alur kerja.', workflowAsToolTip: 'Konfigurasi ulang alat diperlukan setelah pembaruan alur kerja.',
currentDraft: 'Draf Saat Ini', currentDraft: 'Draf Saat Ini',
parallel: 'SEJAJAR', parallel: 'SEJAJAR',
importWarning: 'Hati', importWarning: 'Hati',
running: 'Menjalankan', running: 'Menjalankan',
publishedAt: 'Diterbitkan', publishedAt: 'Diterbitkan',
run: 'Lari',
run: 'Jalankan',
importDSL: 'Impor DSL', importDSL: 'Impor DSL',
featuresDescription: 'Tingkatkan pengalaman pengguna aplikasi web', featuresDescription: 'Tingkatkan pengalaman pengguna aplikasi web',
inPreviewMode: 'Dalam Mode Pratinjau', inPreviewMode: 'Dalam Mode Pratinjau',
nodeAdd: 'Node ditambahkan', nodeAdd: 'Node ditambahkan',
nodePaste: 'Node ditempelkan', nodePaste: 'Node ditempelkan',
noteDelete: 'Catatan dihapus', noteDelete: 'Catatan dihapus',
hint: 'Indian',
hint: 'Petunjuk',
nodeTitleChange: 'Judul simpul diubah', nodeTitleChange: 'Judul simpul diubah',
title: 'Perubahan Riwayat',
title: 'Riwayat Perubahan',
nodeDescriptionChange: 'Deskripsi simpul diubah', nodeDescriptionChange: 'Deskripsi simpul diubah',
clearHistory: 'Hapus Sejarah', clearHistory: 'Hapus Sejarah',
placeholder: 'Anda belum mengubah apa pun', placeholder: 'Anda belum mengubah apa pun',
errorMsg: { errorMsg: {
fields: { fields: {
variable: 'Nama Variabel', variable: 'Nama Variabel',
model: 'Pola',
model: 'Model',
rerankModel: 'Model Peringkat Ulang yang dikonfigurasi', rerankModel: 'Model Peringkat Ulang yang dikonfigurasi',
visionVariable: 'Variabel Penglihatan', visionVariable: 'Variabel Penglihatan',
variableValue: 'Nilai Variabel', variableValue: 'Nilai Variabel',
'question-classifier': 'Pengklasifikasi Pertanyaan', 'question-classifier': 'Pengklasifikasi Pertanyaan',
'iteration-start': 'Iterasi Mulai', 'iteration-start': 'Iterasi Mulai',
'knowledge-retrieval': 'Pengambilan Pengetahuan', 'knowledge-retrieval': 'Pengambilan Pengetahuan',
'loop': 'Lari',
'loop': 'Perulangan',
'assigner': 'Penerima Variabel', 'assigner': 'Penerima Variabel',
'agent': 'Agen', 'agent': 'Agen',
'list-operator': 'Operator Daftar', 'list-operator': 'Operator Daftar',
'answer': 'Menjawab',
'answer': 'Jawaban',
'parameter-extractor': 'Ekstraktor Parameter', 'parameter-extractor': 'Ekstraktor Parameter',
'document-extractor': 'Ekstraktor Dokumen', 'document-extractor': 'Ekstraktor Dokumen',
'end': 'Ujung', 'end': 'Ujung',
horizontal: 'Horisontal', horizontal: 'Horisontal',
distributeHorizontal: 'Spasi Secara Horizontal', distributeHorizontal: 'Spasi Secara Horizontal',
zoomTo100: 'Perbesar hingga 100%', zoomTo100: 'Perbesar hingga 100%',
alignLeft: 'Kiri',
alignLeft: 'Rata Kiri',
distributeVertical: 'Ruang Secara Vertikal', distributeVertical: 'Ruang Secara Vertikal',
zoomTo50: 'Perbesar hingga 50%', zoomTo50: 'Perbesar hingga 50%',
alignBottom: 'Dasar',
alignBottom: 'Rata Bawah',
}, },
variableReference: { variableReference: {
conversationVars: 'variabel percakapan', conversationVars: 'variabel percakapan',

+ 1
- 1
web/i18n/tr-TR/common.ts Dosyayı Görüntüle

content: 'Geri Bildirim İçeriği', content: 'Geri Bildirim İçeriği',
subtitle: 'Lütfen bu yanıtla ilgili neyin yanlış gittiğini bize bildirin', subtitle: 'Lütfen bu yanıtla ilgili neyin yanlış gittiğini bize bildirin',
title: 'Geri Bildirim Sağla', title: 'Geri Bildirim Sağla',
placeholder: 'Lütfen neyin yanlış gittiğini veya nasıl iyileşebileceğimizı açıklayın...',
placeholder: 'Lütfen neyin yanlış gittiğini veya nasıl iyileşebileceğimizi açıklayın...',
}, },
} }



+ 1
- 1
web/i18n/uk-UA/common.ts Dosyayı Görüntüle

title: 'Надати відгук', title: 'Надати відгук',
content: 'Зміст відгуку', content: 'Зміст відгуку',
placeholder: 'Будь ласка, опишіть, що пішло не так або як ми можемо покращити...', placeholder: 'Будь ласка, опишіть, що пішло не так або як ми можемо покращити...',
subtitle: 'Будь ласка, скажіть нам, що пішло не так з цим відповіді',
subtitle: 'Будь ласка, скажіть нам, що пішло не так із цією відповіддю',
}, },
} }



+ 96
- 0
web/types/i18n.d.ts Dosyayı Görüntüle

// TypeScript type definitions for Dify's i18next configuration
// This file is auto-generated. Do not edit manually.
// To regenerate, run: pnpm run gen:i18n-types
import 'react-i18next'

// Extract types from translation files using typeof import pattern

type AppAnnotationMessages = typeof import('../i18n/en-US/app-annotation').default
type AppApiMessages = typeof import('../i18n/en-US/app-api').default
type AppDebugMessages = typeof import('../i18n/en-US/app-debug').default
type AppLogMessages = typeof import('../i18n/en-US/app-log').default
type AppOverviewMessages = typeof import('../i18n/en-US/app-overview').default
type AppMessages = typeof import('../i18n/en-US/app').default
type BillingMessages = typeof import('../i18n/en-US/billing').default
type CommonMessages = typeof import('../i18n/en-US/common').default
type CustomMessages = typeof import('../i18n/en-US/custom').default
type DatasetCreationMessages = typeof import('../i18n/en-US/dataset-creation').default
type DatasetDocumentsMessages = typeof import('../i18n/en-US/dataset-documents').default
type DatasetHitTestingMessages = typeof import('../i18n/en-US/dataset-hit-testing').default
type DatasetSettingsMessages = typeof import('../i18n/en-US/dataset-settings').default
type DatasetMessages = typeof import('../i18n/en-US/dataset').default
type EducationMessages = typeof import('../i18n/en-US/education').default
type ExploreMessages = typeof import('../i18n/en-US/explore').default
type LayoutMessages = typeof import('../i18n/en-US/layout').default
type LoginMessages = typeof import('../i18n/en-US/login').default
type OauthMessages = typeof import('../i18n/en-US/oauth').default
type PluginTagsMessages = typeof import('../i18n/en-US/plugin-tags').default
type PluginMessages = typeof import('../i18n/en-US/plugin').default
type RegisterMessages = typeof import('../i18n/en-US/register').default
type RunLogMessages = typeof import('../i18n/en-US/run-log').default
type ShareMessages = typeof import('../i18n/en-US/share').default
type TimeMessages = typeof import('../i18n/en-US/time').default
type ToolsMessages = typeof import('../i18n/en-US/tools').default
type WorkflowMessages = typeof import('../i18n/en-US/workflow').default

// Complete type structure that matches i18next-config.ts camelCase conversion
export type Messages = {
appAnnotation: AppAnnotationMessages;
appApi: AppApiMessages;
appDebug: AppDebugMessages;
appLog: AppLogMessages;
appOverview: AppOverviewMessages;
app: AppMessages;
billing: BillingMessages;
common: CommonMessages;
custom: CustomMessages;
datasetCreation: DatasetCreationMessages;
datasetDocuments: DatasetDocumentsMessages;
datasetHitTesting: DatasetHitTestingMessages;
datasetSettings: DatasetSettingsMessages;
dataset: DatasetMessages;
education: EducationMessages;
explore: ExploreMessages;
layout: LayoutMessages;
login: LoginMessages;
oauth: OauthMessages;
pluginTags: PluginTagsMessages;
plugin: PluginMessages;
register: RegisterMessages;
runLog: RunLogMessages;
share: ShareMessages;
time: TimeMessages;
tools: ToolsMessages;
workflow: WorkflowMessages;
}

// Utility type to flatten nested object keys into dot notation
type FlattenKeys<T> = T extends object
? {
[K in keyof T]: T[K] extends object
? `${K & string}.${FlattenKeys<T[K]> & string}`
: `${K & string}`
}[keyof T]
: never

export type ValidTranslationKeys = FlattenKeys<Messages>

// Extend react-i18next with Dify's type structure
declare module 'react-i18next' {
type CustomTypeOptions = {
defaultNS: 'translation';
resources: {
translation: Messages;
};
}
}

// Extend i18next for complete type safety
declare module 'i18next' {
type CustomTypeOptions = {
defaultNS: 'translation';
resources: {
translation: Messages;
};
}
}

Loading…
İptal
Kaydet