Sfoglia il codice sorgente

Merge branch 'main' into feat/rag-2

# Conflicts:
#	web/app/components/workflow/hooks/use-workflow.ts
tags/2.0.0-beta.1
jyong 3 mesi fa
parent
commit
a7a4c8228e
100 ha cambiato i file con 2565 aggiunte e 409 eliminazioni
  1. 1
    1
      README.md
  2. 1
    1
      README_AR.md
  3. 1
    1
      README_BN.md
  4. 1
    1
      README_CN.md
  5. 1
    1
      README_DE.md
  6. 1
    1
      README_ES.md
  7. 1
    1
      README_FR.md
  8. 1
    1
      README_JA.md
  9. 1
    1
      README_KL.md
  10. 1
    1
      README_KR.md
  11. 1
    1
      README_PT.md
  12. 1
    1
      README_TR.md
  13. 1
    1
      README_TW.md
  14. 1
    1
      README_VI.md
  15. 50
    2
      api/configs/middleware/vdb/elasticsearch_config.py
  16. 36
    21
      api/controllers/console/app/annotation.py
  17. 4
    4
      api/controllers/console/datasets/metadata.py
  18. 3
    0
      api/controllers/service_api/app/completion.py
  19. 4
    4
      api/controllers/service_api/dataset/metadata.py
  20. 2
    0
      api/core/app/app_config/entities.py
  21. 23
    5
      api/core/ops/aliyun_trace/aliyun_trace.py
  22. 9
    3
      api/core/ops/aliyun_trace/data_exporter/traceclient.py
  23. 25
    14
      api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py
  24. 1
    1
      api/core/ops/entities/config_entity.py
  25. 1
    0
      api/core/ops/entities/trace_entity.py
  26. 10
    9
      api/core/ops/langfuse_trace/langfuse_trace.py
  27. 8
    9
      api/core/ops/langsmith_trace/langsmith_trace.py
  28. 9
    10
      api/core/ops/opik_trace/opik_trace.py
  29. 11
    5
      api/core/ops/ops_trace_manager.py
  30. 25
    10
      api/core/ops/weave_trace/weave_trace.py
  31. 149
    31
      api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py
  32. 2
    0
      api/core/rag/entities/metadata_entities.py
  33. 4
    1
      api/core/rag/extractor/notion_extractor.py
  34. 1
    1
      api/core/tools/workflow_as_tool/tool.py
  35. 2
    2
      api/core/workflow/nodes/http_request/executor.py
  36. 2
    0
      api/core/workflow/nodes/knowledge_retrieval/entities.py
  37. 22
    0
      api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py
  38. 8
    11
      api/schedule/queue_monitor_task.py
  39. 60
    1
      api/services/annotation_service.py
  40. 2
    2
      api/services/external_knowledge_service.py
  41. 1
    1
      api/services/workflow/workflow_converter.py
  42. 9
    7
      api/services/workflow_service.py
  43. 2
    0
      api/tasks/add_document_to_index_task.py
  44. 1
    0
      api/tasks/create_segment_to_index_task.py
  45. 2
    0
      api/tasks/document_indexing_sync_task.py
  46. 2
    2
      api/tasks/retry_document_indexing_task.py
  47. 3
    1
      api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py
  48. 59
    0
      api/tests/unit_tests/configs/test_dify_config.py
  49. 13
    6
      api/tests/unit_tests/core/ops/test_config_entity.py
  50. 189
    0
      api/tests/unit_tests/services/test_metadata_bug_complete.py
  51. 108
    0
      api/tests/unit_tests/services/test_metadata_nullable_bug.py
  52. 11
    0
      docker/.env.example
  53. 8
    0
      docker/docker-compose.yaml
  54. 566
    0
      web/__tests__/check-i18n.test.ts
  55. 207
    0
      web/__tests__/plugin-tool-workflow-error.test.tsx
  56. 301
    0
      web/__tests__/workflow-parallel-limit.test.tsx
  57. 79
    0
      web/app/components/app/annotation/batch-action.tsx
  58. 27
    2
      web/app/components/app/annotation/index.tsx
  59. 64
    4
      web/app/components/app/annotation/list.tsx
  60. 2
    0
      web/app/components/plugins/plugin-detail-panel/detail-header.tsx
  61. 0
    8
      web/app/components/workflow/constants.ts
  62. 4
    5
      web/app/components/workflow/hooks/use-workflow.ts
  63. 7
    1
      web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx
  64. 7
    1
      web/app/components/workflow/nodes/agent/use-single-run-form-params.ts
  65. 13
    3
      web/app/components/workflow/nodes/http/components/timeout/index.tsx
  66. 7
    1
      web/app/components/workflow/nodes/http/use-single-run-form-params.ts
  67. 4
    3
      web/app/components/workflow/nodes/iteration/panel.tsx
  68. 2
    0
      web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts
  69. 60
    42
      web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx
  70. 7
    1
      web/app/components/workflow/nodes/llm/use-single-run-form-params.ts
  71. 7
    1
      web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts
  72. 7
    1
      web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts
  73. 7
    1
      web/app/components/workflow/nodes/tool/use-single-run-form-params.ts
  74. 10
    0
      web/app/components/workflow/nodes/utils.ts
  75. 11
    4
      web/config/index.ts
  76. 2
    2
      web/i18n-config/README.md
  77. 214
    9
      web/i18n-config/check-i18n.js
  78. 0
    1
      web/i18n/de-DE/app-debug.ts
  79. 0
    19
      web/i18n/de-DE/app.ts
  80. 22
    32
      web/i18n/de-DE/billing.ts
  81. 0
    1
      web/i18n/de-DE/common.ts
  82. 0
    2
      web/i18n/de-DE/dataset-creation.ts
  83. 0
    2
      web/i18n/de-DE/dataset-documents.ts
  84. 0
    1
      web/i18n/de-DE/dataset-hit-testing.ts
  85. 0
    1
      web/i18n/de-DE/login.ts
  86. 0
    1
      web/i18n/de-DE/run-log.ts
  87. 0
    1
      web/i18n/de-DE/tools.ts
  88. 0
    5
      web/i18n/de-DE/workflow.ts
  89. 10
    0
      web/i18n/en-US/app-annotation.ts
  90. 0
    15
      web/i18n/es-ES/app-debug.ts
  91. 0
    14
      web/i18n/es-ES/app.ts
  92. 22
    32
      web/i18n/es-ES/billing.ts
  93. 0
    1
      web/i18n/es-ES/common.ts
  94. 0
    2
      web/i18n/es-ES/dataset-creation.ts
  95. 0
    1
      web/i18n/es-ES/dataset-documents.ts
  96. 0
    1
      web/i18n/es-ES/dataset-hit-testing.ts
  97. 0
    1
      web/i18n/es-ES/login.ts
  98. 0
    1
      web/i18n/es-ES/tools.ts
  99. 1
    9
      web/i18n/es-ES/workflow.ts
  100. 0
    0
      web/i18n/fa-IR/workflow.ts

+ 1
- 1
README.md Vedi File

For those who'd like to contribute code, see our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). For those who'd like to contribute code, see our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
At the same time, please consider supporting Dify by sharing it on social media and at events and conferences. At the same time, please consider supporting Dify by sharing it on social media and at events and conferences.


> We are looking for contributors to help translate Dify into languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
> We are looking for contributors to help translate Dify into languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).


## Community & contact ## Community & contact



+ 1
- 1
README_AR.md Vedi File

لأولئك الذين يرغبون في المساهمة، انظر إلى [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) لدينا. لأولئك الذين يرغبون في المساهمة، انظر إلى [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) لدينا.
في الوقت نفسه، يرجى النظر في دعم Dify عن طريق مشاركته على وسائل التواصل الاجتماعي وفي الفعاليات والمؤتمرات. في الوقت نفسه، يرجى النظر في دعم Dify عن طريق مشاركته على وسائل التواصل الاجتماعي وفي الفعاليات والمؤتمرات.


> نحن نبحث عن مساهمين لمساعدة في ترجمة Dify إلى لغات أخرى غير اللغة الصينية المندرين أو الإنجليزية. إذا كنت مهتمًا بالمساعدة، يرجى الاطلاع على [README للترجمة](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) لمزيد من المعلومات، واترك لنا تعليقًا في قناة `global-users` على [خادم المجتمع على Discord](https://discord.gg/8Tpq4AcN9c).
> نحن نبحث عن مساهمين لمساعدة في ترجمة Dify إلى لغات أخرى غير اللغة الصينية المندرين أو الإنجليزية. إذا كنت مهتمًا بالمساعدة، يرجى الاطلاع على [README للترجمة](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) لمزيد من المعلومات، واترك لنا تعليقًا في قناة `global-users` على [خادم المجتمع على Discord](https://discord.gg/8Tpq4AcN9c).


**المساهمون** **المساهمون**



+ 1
- 1
README_BN.md Vedi File

যারা কোড অবদান রাখতে চান, তাদের জন্য আমাদের [অবদান নির্দেশিকা] দেখুন (https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)। যারা কোড অবদান রাখতে চান, তাদের জন্য আমাদের [অবদান নির্দেশিকা] দেখুন (https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)।
একই সাথে, সোশ্যাল মিডিয়া এবং ইভেন্ট এবং কনফারেন্সে এটি শেয়ার করে Dify কে সমর্থন করুন। একই সাথে, সোশ্যাল মিডিয়া এবং ইভেন্ট এবং কনফারেন্সে এটি শেয়ার করে Dify কে সমর্থন করুন।


> আমরা ম্যান্ডারিন বা ইংরেজি ছাড়া অন্য ভাষায় Dify অনুবাদ করতে সাহায্য করার জন্য অবদানকারীদের খুঁজছি। আপনি যদি সাহায্য করতে আগ্রহী হন, তাহলে আরও তথ্যের জন্য [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) দেখুন এবং আমাদের [ডিসকর্ড কমিউনিটি সার্ভার](https://discord.gg/8Tpq4AcN9c) এর `গ্লোবাল-ইউজারস` চ্যানেলে আমাদের একটি মন্তব্য করুন।
> আমরা ম্যান্ডারিন বা ইংরেজি ছাড়া অন্য ভাষায় Dify অনুবাদ করতে সাহায্য করার জন্য অবদানকারীদের খুঁজছি। আপনি যদি সাহায্য করতে আগ্রহী হন, তাহলে আরও তথ্যের জন্য [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) দেখুন এবং আমাদের [ডিসকর্ড কমিউনিটি সার্ভার](https://discord.gg/8Tpq4AcN9c) এর `গ্লোবাল-ইউজারস` চ্যানেলে আমাদের একটি মন্তব্য করুন।


## কমিউনিটি এবং যোগাযোগ ## কমিউনিটি এবং যোগাযোগ



+ 1
- 1
README_CN.md Vedi File

对于那些想要贡献代码的人,请参阅我们的[贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。 对于那些想要贡献代码的人,请参阅我们的[贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。
同时,请考虑通过社交媒体、活动和会议来支持 Dify 的分享。 同时,请考虑通过社交媒体、活动和会议来支持 Dify 的分享。


> 我们正在寻找贡献者来帮助将 Dify 翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助,请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)获取更多信息,并在我们的[Discord 社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。
> 我们正在寻找贡献者来帮助将 Dify 翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助,请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)获取更多信息,并在我们的[Discord 社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。


**Contributors** **Contributors**



+ 1
- 1
README_DE.md Vedi File

Falls Sie Code beitragen möchten, lesen Sie bitte unseren [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Gleichzeitig bitten wir Sie, Dify zu unterstützen, indem Sie es in den sozialen Medien teilen und auf Veranstaltungen und Konferenzen präsentieren. Falls Sie Code beitragen möchten, lesen Sie bitte unseren [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Gleichzeitig bitten wir Sie, Dify zu unterstützen, indem Sie es in den sozialen Medien teilen und auf Veranstaltungen und Konferenzen präsentieren.




> Wir suchen Mitwirkende, die dabei helfen, Dify in weitere Sprachen zu übersetzen – außer Mandarin oder Englisch. Wenn Sie Interesse an einer Mitarbeit haben, lesen Sie bitte die [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) für weitere Informationen und hinterlassen Sie einen Kommentar im `global-users`-Kanal unseres [Discord Community Servers](https://discord.gg/8Tpq4AcN9c).
> Wir suchen Mitwirkende, die dabei helfen, Dify in weitere Sprachen zu übersetzen – außer Mandarin oder Englisch. Wenn Sie Interesse an einer Mitarbeit haben, lesen Sie bitte die [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) für weitere Informationen und hinterlassen Sie einen Kommentar im `global-users`-Kanal unseres [Discord Community Servers](https://discord.gg/8Tpq4AcN9c).


## Gemeinschaft & Kontakt ## Gemeinschaft & Kontakt



+ 1
- 1
README_ES.md Vedi File

Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en eventos y conferencias. Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en eventos y conferencias.




> Estamos buscando colaboradores para ayudar con la traducción de Dify a idiomas que no sean el mandarín o el inglés. Si estás interesado en ayudar, consulta el [README de i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) para obtener más información y déjanos un comentario en el canal `global-users` de nuestro [Servidor de Comunidad en Discord](https://discord.gg/8Tpq4AcN9c).
> Estamos buscando colaboradores para ayudar con la traducción de Dify a idiomas que no sean el mandarín o el inglés. Si estás interesado en ayudar, consulta el [README de i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para obtener más información y déjanos un comentario en el canal `global-users` de nuestro [Servidor de Comunidad en Discord](https://discord.gg/8Tpq4AcN9c).


**Contribuidores** **Contribuidores**



+ 1
- 1
README_FR.md Vedi File

Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur les réseaux sociaux et lors d'événements et de conférences. Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur les réseaux sociaux et lors d'événements et de conférences.




> Nous recherchons des contributeurs pour aider à traduire Dify dans des langues autres que le mandarin ou l'anglais. Si vous êtes intéressé à aider, veuillez consulter le [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) pour plus d'informations, et laissez-nous un commentaire dans le canal `global-users` de notre [Serveur communautaire Discord](https://discord.gg/8Tpq4AcN9c).
> Nous recherchons des contributeurs pour aider à traduire Dify dans des langues autres que le mandarin ou l'anglais. Si vous êtes intéressé à aider, veuillez consulter le [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) pour plus d'informations, et laissez-nous un commentaire dans le canal `global-users` de notre [Serveur communautaire Discord](https://discord.gg/8Tpq4AcN9c).


**Contributeurs** **Contributeurs**



+ 1
- 1
README_JA.md Vedi File

同時に、DifyをSNSやイベント、カンファレンスで共有してサポートしていただけると幸いです。 同時に、DifyをSNSやイベント、カンファレンスで共有してサポートしていただけると幸いです。




> Difyを英語または中国語以外の言語に翻訳してくれる貢献者を募集しています。興味がある場合は、詳細については[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)を参照してください。また、[Discordコミュニティサーバー](https://discord.gg/8Tpq4AcN9c)の`global-users`チャンネルにコメントを残してください。
> Difyを英語または中国語以外の言語に翻訳してくれる貢献者を募集しています。興味がある場合は、詳細については[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)を参照してください。また、[Discordコミュニティサーバー](https://discord.gg/8Tpq4AcN9c)の`global-users`チャンネルにコメントを残してください。


**貢献者** **貢献者**



+ 1
- 1
README_KL.md Vedi File

At the same time, please consider supporting Dify by sharing it on social media and at events and conferences. At the same time, please consider supporting Dify by sharing it on social media and at events and conferences.




> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).


**Contributors** **Contributors**



+ 1
- 1
README_KR.md Vedi File

동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다. 동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다.




> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요.
> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요.


**기여자** **기여자**



+ 1
- 1
README_PT.md Vedi File

Para aqueles que desejam contribuir com código, veja nosso [Guia de Contribuição](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Para aqueles que desejam contribuir com código, veja nosso [Guia de Contribuição](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
Ao mesmo tempo, considere apoiar o Dify compartilhando-o nas redes sociais e em eventos e conferências. Ao mesmo tempo, considere apoiar o Dify compartilhando-o nas redes sociais e em eventos e conferências.


> Estamos buscando contribuidores para ajudar na tradução do Dify para idiomas além de Mandarim e Inglês. Se você tiver interesse em ajudar, consulte o [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) para mais informações e deixe-nos um comentário no canal `global-users` em nosso [Servidor da Comunidade no Discord](https://discord.gg/8Tpq4AcN9c).
> Estamos buscando contribuidores para ajudar na tradução do Dify para idiomas além de Mandarim e Inglês. Se você tiver interesse em ajudar, consulte o [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para mais informações e deixe-nos um comentário no canal `global-users` em nosso [Servidor da Comunidade no Discord](https://discord.gg/8Tpq4AcN9c).


**Contribuidores** **Contribuidores**



+ 1
- 1
README_TR.md Vedi File

Kod katkısında bulunmak isteyenler için [Katkı Kılavuzumuza](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) bakabilirsiniz. Kod katkısında bulunmak isteyenler için [Katkı Kılavuzumuza](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) bakabilirsiniz.
Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda paylaşarak desteklemeyi düşünün. Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda paylaşarak desteklemeyi düşünün.


> Dify'ı Mandarin veya İngilizce dışındaki dillere çevirmemize yardımcı olacak katkıda bulunanlara ihtiyacımız var. Yardımcı olmakla ilgileniyorsanız, lütfen daha fazla bilgi için [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) dosyasına bakın ve [Discord Topluluk Sunucumuzdaki](https://discord.gg/8Tpq4AcN9c) `global-users` kanalında bize bir yorum bırakın.
> Dify'ı Mandarin veya İngilizce dışındaki dillere çevirmemize yardımcı olacak katkıda bulunanlara ihtiyacımız var. Yardımcı olmakla ilgileniyorsanız, lütfen daha fazla bilgi için [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) dosyasına bakın ve [Discord Topluluk Sunucumuzdaki](https://discord.gg/8Tpq4AcN9c) `global-users` kanalında bize bir yorum bırakın.


**Katkıda Bulunanlar** **Katkıda Bulunanlar**



+ 1
- 1
README_TW.md Vedi File

對於想要貢獻程式碼的開發者,請參閱我們的[貢獻指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。 對於想要貢獻程式碼的開發者,請參閱我們的[貢獻指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。
同時,也請考慮透過在社群媒體和各種活動與會議上分享 Dify 來支持我們。 同時,也請考慮透過在社群媒體和各種活動與會議上分享 Dify 來支持我們。


> 我們正在尋找貢獻者協助將 Dify 翻譯成中文和英文以外的語言。如果您有興趣幫忙,請查看 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) 獲取更多資訊,並在我們的 [Discord 社群伺服器](https://discord.gg/8Tpq4AcN9c) 的 `global-users` 頻道留言給我們。
> 我們正在尋找貢獻者協助將 Dify 翻譯成中文和英文以外的語言。如果您有興趣幫忙,請查看 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) 獲取更多資訊,並在我們的 [Discord 社群伺服器](https://discord.gg/8Tpq4AcN9c) 的 `global-users` 頻道留言給我們。


## 社群與聯絡方式 ## 社群與聯絡方式



+ 1
- 1
README_VI.md Vedi File

Đồng thời, vui lòng xem xét hỗ trợ Dify bằng cách chia sẻ nó trên mạng xã hội và tại các sự kiện và hội nghị. Đồng thời, vui lòng xem xét hỗ trợ Dify bằng cách chia sẻ nó trên mạng xã hội và tại các sự kiện và hội nghị.




> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi.
> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi.


**Người đóng góp** **Người đóng góp**



+ 50
- 2
api/configs/middleware/vdb/elasticsearch_config.py Vedi File

from typing import Optional from typing import Optional


from pydantic import Field, PositiveInt
from pydantic import Field, PositiveInt, model_validator
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings




class ElasticsearchConfig(BaseSettings): class ElasticsearchConfig(BaseSettings):
""" """
Configuration settings for Elasticsearch
Configuration settings for both self-managed and Elastic Cloud deployments.
Can load from environment variables or .env files.
""" """


ELASTICSEARCH_HOST: Optional[str] = Field( ELASTICSEARCH_HOST: Optional[str] = Field(
description="Password for authenticating with Elasticsearch (default is 'elastic')", description="Password for authenticating with Elasticsearch (default is 'elastic')",
default="elastic", default="elastic",
) )

# Elastic Cloud (optional)
ELASTICSEARCH_USE_CLOUD: Optional[bool] = Field(
description="Set to True to use Elastic Cloud instead of self-hosted Elasticsearch", default=False
)
ELASTICSEARCH_CLOUD_URL: Optional[str] = Field(
description="Full URL for Elastic Cloud deployment (e.g., 'https://example.es.region.aws.found.io:443')",
default=None,
)
ELASTICSEARCH_API_KEY: Optional[str] = Field(
description="API key for authenticating with Elastic Cloud", default=None
)

# Common options
ELASTICSEARCH_CA_CERTS: Optional[str] = Field(
description="Path to CA certificate file for SSL verification", default=None
)
ELASTICSEARCH_VERIFY_CERTS: bool = Field(
description="Whether to verify SSL certificates (default is False)", default=False
)
ELASTICSEARCH_REQUEST_TIMEOUT: int = Field(
description="Request timeout in milliseconds (default is 100000)", default=100000
)
ELASTICSEARCH_RETRY_ON_TIMEOUT: bool = Field(
description="Whether to retry requests on timeout (default is True)", default=True
)
ELASTICSEARCH_MAX_RETRIES: int = Field(
description="Maximum number of retry attempts (default is 10000)", default=10000
)

@model_validator(mode="after")
def validate_elasticsearch_config(self):
"""Validate Elasticsearch configuration based on deployment type."""
if self.ELASTICSEARCH_USE_CLOUD:
if not self.ELASTICSEARCH_CLOUD_URL:
raise ValueError("ELASTICSEARCH_CLOUD_URL is required when using Elastic Cloud")
if not self.ELASTICSEARCH_API_KEY:
raise ValueError("ELASTICSEARCH_API_KEY is required when using Elastic Cloud")
else:
if not self.ELASTICSEARCH_HOST:
raise ValueError("ELASTICSEARCH_HOST is required for self-hosted Elasticsearch")
if not self.ELASTICSEARCH_USERNAME:
raise ValueError("ELASTICSEARCH_USERNAME is required for self-hosted Elasticsearch")
if not self.ELASTICSEARCH_PASSWORD:
raise ValueError("ELASTICSEARCH_PASSWORD is required for self-hosted Elasticsearch")

return self

+ 36
- 21
api/controllers/console/app/annotation.py Vedi File

return {"job_id": job_id, "job_status": job_status, "error_msg": error_msg}, 200 return {"job_id": job_id, "job_status": job_status, "error_msg": error_msg}, 200




class AnnotationListApi(Resource):
class AnnotationApi(Resource):
@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
def delete(self, app_id):
@cloud_edition_billing_resource_check("annotation")
@marshal_with(annotation_fields)
def post(self, app_id):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()


app_id = str(app_id) app_id = str(app_id)
AppAnnotationService.clear_all_annotations(app_id)
return {"result": "success"}, 204

parser = reqparse.RequestParser()
parser.add_argument("question", required=True, type=str, location="json")
parser.add_argument("answer", required=True, type=str, location="json")
args = parser.parse_args()
annotation = AppAnnotationService.insert_app_annotation_directly(args, app_id)
return annotation


class AnnotationExportApi(Resource):
@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self, app_id):
def delete(self, app_id):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()


app_id = str(app_id) app_id = str(app_id)
annotation_list = AppAnnotationService.export_annotation_list_by_app_id(app_id)
response = {"data": marshal(annotation_list, annotation_fields)}
return response, 200

# Use request.args.getlist to get annotation_ids array directly
annotation_ids = request.args.getlist("annotation_id")

# If annotation_ids are provided, handle batch deletion
if annotation_ids:
# Check if any annotation_ids contain empty strings or invalid values
if not all(annotation_id.strip() for annotation_id in annotation_ids if annotation_id):
return {
"code": "bad_request",
"message": "annotation_ids are required if the parameter is provided.",
}, 400

result = AppAnnotationService.delete_app_annotations_in_batch(app_id, annotation_ids)
return result, 204
# If no annotation_ids are provided, handle clearing all annotations
else:
AppAnnotationService.clear_all_annotations(app_id)
return {"result": "success"}, 204




class AnnotationCreateApi(Resource):
class AnnotationExportApi(Resource):
@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
@cloud_edition_billing_resource_check("annotation")
@marshal_with(annotation_fields)
def post(self, app_id):
def get(self, app_id):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()


app_id = str(app_id) app_id = str(app_id)
parser = reqparse.RequestParser()
parser.add_argument("question", required=True, type=str, location="json")
parser.add_argument("answer", required=True, type=str, location="json")
args = parser.parse_args()
annotation = AppAnnotationService.insert_app_annotation_directly(args, app_id)
return annotation
annotation_list = AppAnnotationService.export_annotation_list_by_app_id(app_id)
response = {"data": marshal(annotation_list, annotation_fields)}
return response, 200




class AnnotationUpdateDeleteApi(Resource): class AnnotationUpdateDeleteApi(Resource):
api.add_resource( api.add_resource(
AnnotationReplyActionStatusApi, "/apps/<uuid:app_id>/annotation-reply/<string:action>/status/<uuid:job_id>" AnnotationReplyActionStatusApi, "/apps/<uuid:app_id>/annotation-reply/<string:action>/status/<uuid:job_id>"
) )
api.add_resource(AnnotationListApi, "/apps/<uuid:app_id>/annotations")
api.add_resource(AnnotationApi, "/apps/<uuid:app_id>/annotations")
api.add_resource(AnnotationExportApi, "/apps/<uuid:app_id>/annotations/export") api.add_resource(AnnotationExportApi, "/apps/<uuid:app_id>/annotations/export")
api.add_resource(AnnotationUpdateDeleteApi, "/apps/<uuid:app_id>/annotations/<uuid:annotation_id>") api.add_resource(AnnotationUpdateDeleteApi, "/apps/<uuid:app_id>/annotations/<uuid:annotation_id>")
api.add_resource(AnnotationBatchImportApi, "/apps/<uuid:app_id>/annotations/batch-import") api.add_resource(AnnotationBatchImportApi, "/apps/<uuid:app_id>/annotations/batch-import")

+ 4
- 4
api/controllers/console/datasets/metadata.py Vedi File

@marshal_with(dataset_metadata_fields) @marshal_with(dataset_metadata_fields)
def post(self, dataset_id): def post(self, dataset_id):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
parser.add_argument("type", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
metadata_args = MetadataArgs(**args) metadata_args = MetadataArgs(**args)


@marshal_with(dataset_metadata_fields) @marshal_with(dataset_metadata_fields)
def patch(self, dataset_id, metadata_id): def patch(self, dataset_id, metadata_id):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()


dataset_id_str = str(dataset_id) dataset_id_str = str(dataset_id)
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)


parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json")
parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
metadata_args = MetadataOperationData(**args) metadata_args = MetadataOperationData(**args)



+ 3
- 0
api/controllers/service_api/app/completion.py Vedi File

parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json") parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")


args = parser.parse_args() args = parser.parse_args()
external_trace_id = get_external_trace_id(request)
if external_trace_id:
args["external_trace_id"] = external_trace_id


streaming = args["response_mode"] == "streaming" streaming = args["response_mode"] == "streaming"



+ 4
- 4
api/controllers/service_api/dataset/metadata.py Vedi File

@cloud_edition_billing_rate_limit_check("knowledge", "dataset") @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
def post(self, tenant_id, dataset_id): def post(self, tenant_id, dataset_id):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
parser.add_argument("type", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
metadata_args = MetadataArgs(**args) metadata_args = MetadataArgs(**args)


@cloud_edition_billing_rate_limit_check("knowledge", "dataset") @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
def patch(self, tenant_id, dataset_id, metadata_id): def patch(self, tenant_id, dataset_id, metadata_id):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()


dataset_id_str = str(dataset_id) dataset_id_str = str(dataset_id)
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)


parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json")
parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
metadata_args = MetadataOperationData(**args) metadata_args = MetadataOperationData(**args)



+ 2
- 0
api/core/app/app_config/entities.py Vedi File

"is not", "is not",
"empty", "empty",
"not empty", "not empty",
"in",
"not in",
# for number # for number
"=", "=",
"≠", "≠",

+ 23
- 5
api/core/ops/aliyun_trace/aliyun_trace.py Vedi File

from core.ops.aliyun_trace.data_exporter.traceclient import ( from core.ops.aliyun_trace.data_exporter.traceclient import (
TraceClient, TraceClient,
convert_datetime_to_nanoseconds, convert_datetime_to_nanoseconds,
convert_string_to_id,
convert_to_span_id, convert_to_span_id,
convert_to_trace_id, convert_to_trace_id,
generate_span_id, generate_span_id,
raise ValueError(f"Aliyun get run url failed: {str(e)}") raise ValueError(f"Aliyun get run url failed: {str(e)}")


def workflow_trace(self, trace_info: WorkflowTraceInfo): def workflow_trace(self, trace_info: WorkflowTraceInfo):
external_trace_id = trace_info.metadata.get("external_trace_id")
trace_id = external_trace_id or convert_to_trace_id(trace_info.workflow_run_id)
trace_id = convert_to_trace_id(trace_info.workflow_run_id)
if trace_info.trace_id:
trace_id = convert_string_to_id(trace_info.trace_id)
workflow_span_id = convert_to_span_id(trace_info.workflow_run_id, "workflow") workflow_span_id = convert_to_span_id(trace_info.workflow_run_id, "workflow")
self.add_workflow_span(trace_id, workflow_span_id, trace_info) self.add_workflow_span(trace_id, workflow_span_id, trace_info)


status = Status(StatusCode.ERROR, trace_info.error) status = Status(StatusCode.ERROR, trace_info.error)


trace_id = convert_to_trace_id(message_id) trace_id = convert_to_trace_id(message_id)
if trace_info.trace_id:
trace_id = convert_string_to_id(trace_info.trace_id)

message_span_id = convert_to_span_id(message_id, "message") message_span_id = convert_to_span_id(message_id, "message")
message_span = SpanData( message_span = SpanData(
trace_id=trace_id, trace_id=trace_id,
return return
message_id = trace_info.message_id message_id = trace_info.message_id


trace_id = convert_to_trace_id(message_id)
if trace_info.trace_id:
trace_id = convert_string_to_id(trace_info.trace_id)

documents_data = extract_retrieval_documents(trace_info.documents) documents_data = extract_retrieval_documents(trace_info.documents)
dataset_retrieval_span = SpanData( dataset_retrieval_span = SpanData(
trace_id=convert_to_trace_id(message_id),
trace_id=trace_id,
parent_span_id=convert_to_span_id(message_id, "message"), parent_span_id=convert_to_span_id(message_id, "message"),
span_id=generate_span_id(), span_id=generate_span_id(),
name="dataset_retrieval", name="dataset_retrieval",
if trace_info.error: if trace_info.error:
status = Status(StatusCode.ERROR, trace_info.error) status = Status(StatusCode.ERROR, trace_info.error)


trace_id = convert_to_trace_id(message_id)
if trace_info.trace_id:
trace_id = convert_string_to_id(trace_info.trace_id)

tool_span = SpanData( tool_span = SpanData(
trace_id=convert_to_trace_id(message_id),
trace_id=trace_id,
parent_span_id=convert_to_span_id(message_id, "message"), parent_span_id=convert_to_span_id(message_id, "message"),
span_id=generate_span_id(), span_id=generate_span_id(),
name=trace_info.tool_name, name=trace_info.tool_name,
status: Status = Status(StatusCode.OK) status: Status = Status(StatusCode.OK)
if trace_info.error: if trace_info.error:
status = Status(StatusCode.ERROR, trace_info.error) status = Status(StatusCode.ERROR, trace_info.error)

trace_id = convert_to_trace_id(message_id)
if trace_info.trace_id:
trace_id = convert_string_to_id(trace_info.trace_id)

suggested_question_span = SpanData( suggested_question_span = SpanData(
trace_id=convert_to_trace_id(message_id),
trace_id=trace_id,
parent_span_id=convert_to_span_id(message_id, "message"), parent_span_id=convert_to_span_id(message_id, "message"),
span_id=convert_to_span_id(message_id, "suggested_question"), span_id=convert_to_span_id(message_id, "suggested_question"),
name="suggested_question", name="suggested_question",

+ 9
- 3
api/core/ops/aliyun_trace/data_exporter/traceclient.py Vedi File

raise ValueError(f"Invalid UUID input: {e}") raise ValueError(f"Invalid UUID input: {e}")




def convert_string_to_id(string: Optional[str]) -> int:
if not string:
return generate_span_id()
hash_bytes = hashlib.sha256(string.encode("utf-8")).digest()
id = int.from_bytes(hash_bytes[:8], byteorder="big", signed=False)
return id


def convert_to_span_id(uuid_v4: Optional[str], span_type: str) -> int: def convert_to_span_id(uuid_v4: Optional[str], span_type: str) -> int:
try: try:
uuid_obj = uuid.UUID(uuid_v4) uuid_obj = uuid.UUID(uuid_v4)
except Exception as e: except Exception as e:
raise ValueError(f"Invalid UUID input: {e}") raise ValueError(f"Invalid UUID input: {e}")
combined_key = f"{uuid_obj.hex}-{span_type}" combined_key = f"{uuid_obj.hex}-{span_type}"
hash_bytes = hashlib.sha256(combined_key.encode("utf-8")).digest()
span_id = int.from_bytes(hash_bytes[:8], byteorder="big", signed=False)
return span_id
return convert_string_to_id(combined_key)




def convert_datetime_to_nanoseconds(start_time_a: Optional[datetime]) -> Optional[int]: def convert_datetime_to_nanoseconds(start_time_a: Optional[datetime]) -> Optional[int]:

+ 25
- 14
api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py Vedi File

import os import os
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Any, Optional, Union, cast from typing import Any, Optional, Union, cast
from urllib.parse import urlparse


from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes
from opentelemetry import trace from opentelemetry import trace
try: try:
# Choose the appropriate exporter based on config type # Choose the appropriate exporter based on config type
exporter: Union[GrpcOTLPSpanExporter, HttpOTLPSpanExporter] exporter: Union[GrpcOTLPSpanExporter, HttpOTLPSpanExporter]

# Inspect the provided endpoint to determine its structure
parsed = urlparse(arize_phoenix_config.endpoint)
base_endpoint = f"{parsed.scheme}://{parsed.netloc}"
path = parsed.path.rstrip("/")

if isinstance(arize_phoenix_config, ArizeConfig): if isinstance(arize_phoenix_config, ArizeConfig):
arize_endpoint = f"{arize_phoenix_config.endpoint}/v1"
arize_endpoint = f"{base_endpoint}/v1"
arize_headers = { arize_headers = {
"api_key": arize_phoenix_config.api_key or "", "api_key": arize_phoenix_config.api_key or "",
"space_id": arize_phoenix_config.space_id or "", "space_id": arize_phoenix_config.space_id or "",
timeout=30, timeout=30,
) )
else: else:
phoenix_endpoint = f"{arize_phoenix_config.endpoint}/v1/traces"
phoenix_endpoint = f"{base_endpoint}{path}/v1/traces"
phoenix_headers = { phoenix_headers = {
"api_key": arize_phoenix_config.api_key or "", "api_key": arize_phoenix_config.api_key or "",
"authorization": f"Bearer {arize_phoenix_config.api_key or ''}", "authorization": f"Bearer {arize_phoenix_config.api_key or ''}",
return int(dt.timestamp() * 1_000_000_000) return int(dt.timestamp() * 1_000_000_000)




def uuid_to_trace_id(string: Optional[str]) -> int:
"""Convert UUID string to a valid trace ID (16-byte integer)."""
def string_to_trace_id128(string: Optional[str]) -> int:
"""
Convert any input string into a stable 128-bit integer trace ID.

This uses SHA-256 hashing and takes the first 16 bytes (128 bits) of the digest.
It's suitable for generating consistent, unique identifiers from strings.
"""
if string is None: if string is None:
string = "" string = ""
hash_object = hashlib.sha256(string.encode()) hash_object = hashlib.sha256(string.encode())


# Take the first 16 bytes (128 bits) of the hash
# Take the first 16 bytes (128 bits) of the hash digest
digest = hash_object.digest()[:16] digest = hash_object.digest()[:16]


# Convert to integer (128 bits)
# Convert to a 128-bit integer
return int.from_bytes(digest, byteorder="big") return int.from_bytes(digest, byteorder="big")




} }
workflow_metadata.update(trace_info.metadata) workflow_metadata.update(trace_info.metadata)


external_trace_id = trace_info.metadata.get("external_trace_id")
trace_id = external_trace_id or uuid_to_trace_id(trace_info.workflow_run_id)
trace_id = string_to_trace_id128(trace_info.trace_id or trace_info.workflow_run_id)
span_id = RandomIdGenerator().generate_span_id() span_id = RandomIdGenerator().generate_span_id()
context = SpanContext( context = SpanContext(
trace_id=trace_id, trace_id=trace_id,
SpanAttributes.SESSION_ID: trace_info.message_data.conversation_id, SpanAttributes.SESSION_ID: trace_info.message_data.conversation_id,
} }


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.trace_id or trace_info.message_id)
message_span_id = RandomIdGenerator().generate_span_id() message_span_id = RandomIdGenerator().generate_span_id()
span_context = SpanContext( span_context = SpanContext(
trace_id=trace_id, trace_id=trace_id,
} }
metadata.update(trace_info.metadata) metadata.update(trace_info.metadata)


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.message_id)
span_id = RandomIdGenerator().generate_span_id() span_id = RandomIdGenerator().generate_span_id()
context = SpanContext( context = SpanContext(
trace_id=trace_id, trace_id=trace_id,
} }
metadata.update(trace_info.metadata) metadata.update(trace_info.metadata)


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.message_id)
span_id = RandomIdGenerator().generate_span_id() span_id = RandomIdGenerator().generate_span_id()
context = SpanContext( context = SpanContext(
trace_id=trace_id, trace_id=trace_id,
} }
metadata.update(trace_info.metadata) metadata.update(trace_info.metadata)


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.message_id)
span_id = RandomIdGenerator().generate_span_id() span_id = RandomIdGenerator().generate_span_id()
context = SpanContext( context = SpanContext(
trace_id=trace_id, trace_id=trace_id,
"tool_config": json.dumps(trace_info.tool_config, ensure_ascii=False), "tool_config": json.dumps(trace_info.tool_config, ensure_ascii=False),
} }


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.message_id)
tool_span_id = RandomIdGenerator().generate_span_id() tool_span_id = RandomIdGenerator().generate_span_id()
logger.info("[Arize/Phoenix] Creating tool trace with trace_id: %s, span_id: %s", trace_id, tool_span_id) logger.info("[Arize/Phoenix] Creating tool trace with trace_id: %s, span_id: %s", trace_id, tool_span_id)


} }
metadata.update(trace_info.metadata) metadata.update(trace_info.metadata)


trace_id = uuid_to_trace_id(trace_info.message_id)
trace_id = string_to_trace_id128(trace_info.message_id)
span_id = RandomIdGenerator().generate_span_id() span_id = RandomIdGenerator().generate_span_id()
context = SpanContext( context = SpanContext(
trace_id=trace_id, trace_id=trace_id,

+ 1
- 1
api/core/ops/entities/config_entity.py Vedi File

@field_validator("endpoint") @field_validator("endpoint")
@classmethod @classmethod
def endpoint_validator(cls, v, info: ValidationInfo): def endpoint_validator(cls, v, info: ValidationInfo):
return cls.validate_endpoint_url(v, "https://app.phoenix.arize.com")
return validate_url_with_path(v, "https://app.phoenix.arize.com")




class LangfuseConfig(BaseTracingConfig): class LangfuseConfig(BaseTracingConfig):

+ 1
- 0
api/core/ops/entities/trace_entity.py Vedi File

start_time: Optional[datetime] = None start_time: Optional[datetime] = None
end_time: Optional[datetime] = None end_time: Optional[datetime] = None
metadata: dict[str, Any] metadata: dict[str, Any]
trace_id: Optional[str] = None


@field_validator("inputs", "outputs") @field_validator("inputs", "outputs")
@classmethod @classmethod

+ 10
- 9
api/core/ops/langfuse_trace/langfuse_trace.py Vedi File

self.generate_name_trace(trace_info) self.generate_name_trace(trace_info)


def workflow_trace(self, trace_info: WorkflowTraceInfo): def workflow_trace(self, trace_info: WorkflowTraceInfo):
external_trace_id = trace_info.metadata.get("external_trace_id")
trace_id = external_trace_id or trace_info.workflow_run_id
trace_id = trace_info.trace_id or trace_info.workflow_run_id
user_id = trace_info.metadata.get("user_id") user_id = trace_info.metadata.get("user_id")
metadata = trace_info.metadata metadata = trace_info.metadata
metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id


if trace_info.message_id: if trace_info.message_id:
trace_id = external_trace_id or trace_info.message_id
trace_id = trace_info.trace_id or trace_info.message_id
name = TraceTaskName.MESSAGE_TRACE.value name = TraceTaskName.MESSAGE_TRACE.value
trace_data = LangfuseTrace( trace_data = LangfuseTrace(
id=trace_id, id=trace_id,
user_id = end_user_data.session_id user_id = end_user_data.session_id
metadata["user_id"] = user_id metadata["user_id"] = user_id


trace_id = trace_info.trace_id or message_id

trace_data = LangfuseTrace( trace_data = LangfuseTrace(
id=message_id,
id=trace_id,
user_id=user_id, user_id=user_id,
name=TraceTaskName.MESSAGE_TRACE.value, name=TraceTaskName.MESSAGE_TRACE.value,
input={ input={


langfuse_generation_data = LangfuseGeneration( langfuse_generation_data = LangfuseGeneration(
name="llm", name="llm",
trace_id=message_id,
trace_id=trace_id,
start_time=trace_info.start_time, start_time=trace_info.start_time,
end_time=trace_info.end_time, end_time=trace_info.end_time,
model=message_data.model_id, model=message_data.model_id,
"preset_response": trace_info.preset_response, "preset_response": trace_info.preset_response,
"inputs": trace_info.inputs, "inputs": trace_info.inputs,
}, },
trace_id=trace_info.message_id,
trace_id=trace_info.trace_id or trace_info.message_id,
start_time=trace_info.start_time or trace_info.message_data.created_at, start_time=trace_info.start_time or trace_info.message_data.created_at,
end_time=trace_info.end_time or trace_info.message_data.created_at, end_time=trace_info.end_time or trace_info.message_data.created_at,
metadata=trace_info.metadata, metadata=trace_info.metadata,
name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value, name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value,
input=trace_info.inputs, input=trace_info.inputs,
output=str(trace_info.suggested_question), output=str(trace_info.suggested_question),
trace_id=trace_info.message_id,
trace_id=trace_info.trace_id or trace_info.message_id,
start_time=trace_info.start_time, start_time=trace_info.start_time,
end_time=trace_info.end_time, end_time=trace_info.end_time,
metadata=trace_info.metadata, metadata=trace_info.metadata,
name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value, name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value,
input=trace_info.inputs, input=trace_info.inputs,
output={"documents": trace_info.documents}, output={"documents": trace_info.documents},
trace_id=trace_info.message_id,
trace_id=trace_info.trace_id or trace_info.message_id,
start_time=trace_info.start_time or trace_info.message_data.created_at, start_time=trace_info.start_time or trace_info.message_data.created_at,
end_time=trace_info.end_time or trace_info.message_data.updated_at, end_time=trace_info.end_time or trace_info.message_data.updated_at,
metadata=trace_info.metadata, metadata=trace_info.metadata,
name=trace_info.tool_name, name=trace_info.tool_name,
input=trace_info.tool_inputs, input=trace_info.tool_inputs,
output=trace_info.tool_outputs, output=trace_info.tool_outputs,
trace_id=trace_info.message_id,
trace_id=trace_info.trace_id or trace_info.message_id,
start_time=trace_info.start_time, start_time=trace_info.start_time,
end_time=trace_info.end_time, end_time=trace_info.end_time,
metadata=trace_info.metadata, metadata=trace_info.metadata,

+ 8
- 9
api/core/ops/langsmith_trace/langsmith_trace.py Vedi File

self.generate_name_trace(trace_info) self.generate_name_trace(trace_info)


def workflow_trace(self, trace_info: WorkflowTraceInfo): def workflow_trace(self, trace_info: WorkflowTraceInfo):
external_trace_id = trace_info.metadata.get("external_trace_id")
trace_id = external_trace_id or trace_info.message_id or trace_info.workflow_run_id
trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id
if trace_info.start_time is None: if trace_info.start_time is None:
trace_info.start_time = datetime.now() trace_info.start_time = datetime.now()
message_dotted_order = ( message_dotted_order = (
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
parent_run_id=None, parent_run_id=None,
) )
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
) )
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
error="", error="",
file_list=[], file_list=[],
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
error="", error="",
file_list=[], file_list=[],
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
error="", error="",
file_list=[], file_list=[],
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
error=trace_info.error or "", error=trace_info.error or "",
) )
reference_example_id=None, reference_example_id=None,
input_attachments={}, input_attachments={},
output_attachments={}, output_attachments={},
trace_id=None,
trace_id=trace_info.trace_id,
dotted_order=None, dotted_order=None,
error="", error="",
file_list=[], file_list=[],

+ 9
- 10
api/core/ops/opik_trace/opik_trace.py Vedi File

self.generate_name_trace(trace_info) self.generate_name_trace(trace_info)


def workflow_trace(self, trace_info: WorkflowTraceInfo): def workflow_trace(self, trace_info: WorkflowTraceInfo):
external_trace_id = trace_info.metadata.get("external_trace_id")
dify_trace_id = external_trace_id or trace_info.workflow_run_id
dify_trace_id = trace_info.trace_id or trace_info.workflow_run_id
opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id) opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id)
workflow_metadata = wrap_metadata( workflow_metadata = wrap_metadata(
trace_info.metadata, message_id=trace_info.message_id, workflow_app_log_id=trace_info.workflow_app_log_id trace_info.metadata, message_id=trace_info.message_id, workflow_app_log_id=trace_info.workflow_app_log_id
root_span_id = None root_span_id = None


if trace_info.message_id: if trace_info.message_id:
dify_trace_id = external_trace_id or trace_info.message_id
dify_trace_id = trace_info.trace_id or trace_info.message_id
opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id) opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id)


trace_data = { trace_data = {
return return


metadata = trace_info.metadata metadata = trace_info.metadata
message_id = trace_info.message_id
dify_trace_id = trace_info.trace_id or trace_info.message_id


user_id = message_data.from_account_id user_id = message_data.from_account_id
metadata["user_id"] = user_id metadata["user_id"] = user_id
metadata["end_user_id"] = end_user_id metadata["end_user_id"] = end_user_id


trace_data = { trace_data = {
"id": prepare_opik_uuid(trace_info.start_time, message_id),
"id": prepare_opik_uuid(trace_info.start_time, dify_trace_id),
"name": TraceTaskName.MESSAGE_TRACE.value, "name": TraceTaskName.MESSAGE_TRACE.value,
"start_time": trace_info.start_time, "start_time": trace_info.start_time,
"end_time": trace_info.end_time, "end_time": trace_info.end_time,
start_time = trace_info.start_time or trace_info.message_data.created_at start_time = trace_info.start_time or trace_info.message_data.created_at


span_data = { span_data = {
"trace_id": prepare_opik_uuid(start_time, trace_info.message_id),
"trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id),
"name": TraceTaskName.MODERATION_TRACE.value, "name": TraceTaskName.MODERATION_TRACE.value,
"type": "tool", "type": "tool",
"start_time": start_time, "start_time": start_time,
start_time = trace_info.start_time or message_data.created_at start_time = trace_info.start_time or message_data.created_at


span_data = { span_data = {
"trace_id": prepare_opik_uuid(start_time, trace_info.message_id),
"trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id),
"name": TraceTaskName.SUGGESTED_QUESTION_TRACE.value, "name": TraceTaskName.SUGGESTED_QUESTION_TRACE.value,
"type": "tool", "type": "tool",
"start_time": start_time, "start_time": start_time,
start_time = trace_info.start_time or trace_info.message_data.created_at start_time = trace_info.start_time or trace_info.message_data.created_at


span_data = { span_data = {
"trace_id": prepare_opik_uuid(start_time, trace_info.message_id),
"trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id),
"name": TraceTaskName.DATASET_RETRIEVAL_TRACE.value, "name": TraceTaskName.DATASET_RETRIEVAL_TRACE.value,
"type": "tool", "type": "tool",
"start_time": start_time, "start_time": start_time,


def tool_trace(self, trace_info: ToolTraceInfo): def tool_trace(self, trace_info: ToolTraceInfo):
span_data = { span_data = {
"trace_id": prepare_opik_uuid(trace_info.start_time, trace_info.message_id),
"trace_id": prepare_opik_uuid(trace_info.start_time, trace_info.trace_id or trace_info.message_id),
"name": trace_info.tool_name, "name": trace_info.tool_name,
"type": "tool", "type": "tool",
"start_time": trace_info.start_time, "start_time": trace_info.start_time,


def generate_name_trace(self, trace_info: GenerateNameTraceInfo): def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
trace_data = { trace_data = {
"id": prepare_opik_uuid(trace_info.start_time, trace_info.message_id),
"id": prepare_opik_uuid(trace_info.start_time, trace_info.trace_id or trace_info.message_id),
"name": TraceTaskName.GENERATE_NAME_TRACE.value, "name": TraceTaskName.GENERATE_NAME_TRACE.value,
"start_time": trace_info.start_time, "start_time": trace_info.start_time,
"end_time": trace_info.end_time, "end_time": trace_info.end_time,

+ 11
- 5
api/core/ops/ops_trace_manager.py Vedi File

self.timer = timer self.timer = timer
self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
self.app_id = None self.app_id = None
self.trace_id = None
self.kwargs = kwargs self.kwargs = kwargs
external_trace_id = kwargs.get("external_trace_id")
if external_trace_id:
self.trace_id = external_trace_id


def execute(self): def execute(self):
return self.preprocess() return self.preprocess()
"app_id": workflow_run.app_id, "app_id": workflow_run.app_id,
} }


external_trace_id = self.kwargs.get("external_trace_id")
if external_trace_id:
metadata["external_trace_id"] = external_trace_id

workflow_trace_info = WorkflowTraceInfo( workflow_trace_info = WorkflowTraceInfo(
trace_id=self.trace_id,
workflow_data=workflow_run.to_dict(), workflow_data=workflow_run.to_dict(),
conversation_id=conversation_id, conversation_id=conversation_id,
workflow_id=workflow_id, workflow_id=workflow_id,
message_tokens = message_data.message_tokens message_tokens = message_data.message_tokens


message_trace_info = MessageTraceInfo( message_trace_info = MessageTraceInfo(
trace_id=self.trace_id,
message_id=message_id, message_id=message_id,
message_data=message_data.to_dict(), message_data=message_data.to_dict(),
conversation_model=conversation_mode, conversation_model=conversation_mode,
workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None


moderation_trace_info = ModerationTraceInfo( moderation_trace_info = ModerationTraceInfo(
trace_id=self.trace_id,
message_id=workflow_app_log_id or message_id, message_id=workflow_app_log_id or message_id,
inputs=inputs, inputs=inputs,
message_data=message_data.to_dict(), message_data=message_data.to_dict(),
workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None


suggested_question_trace_info = SuggestedQuestionTraceInfo( suggested_question_trace_info = SuggestedQuestionTraceInfo(
trace_id=self.trace_id,
message_id=workflow_app_log_id or message_id, message_id=workflow_app_log_id or message_id,
message_data=message_data.to_dict(), message_data=message_data.to_dict(),
inputs=message_data.message, inputs=message_data.message,
} }


dataset_retrieval_trace_info = DatasetRetrievalTraceInfo( dataset_retrieval_trace_info = DatasetRetrievalTraceInfo(
trace_id=self.trace_id,
message_id=message_id, message_id=message_id,
inputs=message_data.query or message_data.inputs, inputs=message_data.query or message_data.inputs,
documents=[doc.model_dump() for doc in documents] if documents else [], documents=[doc.model_dump() for doc in documents] if documents else [],
) )


tool_trace_info = ToolTraceInfo( tool_trace_info = ToolTraceInfo(
trace_id=self.trace_id,
message_id=message_id, message_id=message_id,
message_data=message_data.to_dict(), message_data=message_data.to_dict(),
tool_name=tool_name, tool_name=tool_name,
} }


generate_name_trace_info = GenerateNameTraceInfo( generate_name_trace_info = GenerateNameTraceInfo(
trace_id=self.trace_id,
conversation_id=conversation_id, conversation_id=conversation_id,
inputs=inputs, inputs=inputs,
outputs=generate_conversation_name, outputs=generate_conversation_name,

+ 25
- 10
api/core/ops/weave_trace/weave_trace.py Vedi File

self.generate_name_trace(trace_info) self.generate_name_trace(trace_info)


def workflow_trace(self, trace_info: WorkflowTraceInfo): def workflow_trace(self, trace_info: WorkflowTraceInfo):
external_trace_id = trace_info.metadata.get("external_trace_id")
trace_id = external_trace_id or trace_info.message_id or trace_info.workflow_run_id
trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id
if trace_info.start_time is None: if trace_info.start_time is None:
trace_info.start_time = datetime.now() trace_info.start_time = datetime.now()


attributes["start_time"] = trace_info.start_time attributes["start_time"] = trace_info.start_time
attributes["end_time"] = trace_info.end_time attributes["end_time"] = trace_info.end_time
attributes["tags"] = ["message", str(trace_info.conversation_mode)] attributes["tags"] = ["message", str(trace_info.conversation_mode)]

trace_id = trace_info.trace_id or message_id
attributes["trace_id"] = trace_id

message_run = WeaveTraceModel( message_run = WeaveTraceModel(
id=message_id,
id=trace_id,
op=str(TraceTaskName.MESSAGE_TRACE.value), op=str(TraceTaskName.MESSAGE_TRACE.value),
input_tokens=trace_info.message_tokens, input_tokens=trace_info.message_tokens,
output_tokens=trace_info.answer_tokens, output_tokens=trace_info.answer_tokens,
) )
self.start_call( self.start_call(
llm_run, llm_run,
parent_run_id=message_id,
parent_run_id=trace_id,
) )
self.finish_call(llm_run) self.finish_call(llm_run)
self.finish_call(message_run) self.finish_call(message_run)
attributes["start_time"] = trace_info.start_time or trace_info.message_data.created_at attributes["start_time"] = trace_info.start_time or trace_info.message_data.created_at
attributes["end_time"] = trace_info.end_time or trace_info.message_data.updated_at attributes["end_time"] = trace_info.end_time or trace_info.message_data.updated_at


trace_id = trace_info.trace_id or trace_info.message_id
attributes["trace_id"] = trace_id

moderation_run = WeaveTraceModel( moderation_run = WeaveTraceModel(
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
op=str(TraceTaskName.MODERATION_TRACE.value), op=str(TraceTaskName.MODERATION_TRACE.value),
exception=getattr(trace_info, "error", None), exception=getattr(trace_info, "error", None),
file_list=[], file_list=[],
) )
self.start_call(moderation_run, parent_run_id=trace_info.message_id)
self.start_call(moderation_run, parent_run_id=trace_id)
self.finish_call(moderation_run) self.finish_call(moderation_run)


def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
attributes["start_time"] = (trace_info.start_time or message_data.created_at,) attributes["start_time"] = (trace_info.start_time or message_data.created_at,)
attributes["end_time"] = (trace_info.end_time or message_data.updated_at,) attributes["end_time"] = (trace_info.end_time or message_data.updated_at,)


trace_id = trace_info.trace_id or trace_info.message_id
attributes["trace_id"] = trace_id

suggested_question_run = WeaveTraceModel( suggested_question_run = WeaveTraceModel(
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
op=str(TraceTaskName.SUGGESTED_QUESTION_TRACE.value), op=str(TraceTaskName.SUGGESTED_QUESTION_TRACE.value),
file_list=[], file_list=[],
) )


self.start_call(suggested_question_run, parent_run_id=trace_info.message_id)
self.start_call(suggested_question_run, parent_run_id=trace_id)
self.finish_call(suggested_question_run) self.finish_call(suggested_question_run)


def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
attributes["start_time"] = (trace_info.start_time or trace_info.message_data.created_at,) attributes["start_time"] = (trace_info.start_time or trace_info.message_data.created_at,)
attributes["end_time"] = (trace_info.end_time or trace_info.message_data.updated_at,) attributes["end_time"] = (trace_info.end_time or trace_info.message_data.updated_at,)


trace_id = trace_info.trace_id or trace_info.message_id
attributes["trace_id"] = trace_id

dataset_retrieval_run = WeaveTraceModel( dataset_retrieval_run = WeaveTraceModel(
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
op=str(TraceTaskName.DATASET_RETRIEVAL_TRACE.value), op=str(TraceTaskName.DATASET_RETRIEVAL_TRACE.value),
file_list=[], file_list=[],
) )


self.start_call(dataset_retrieval_run, parent_run_id=trace_info.message_id)
self.start_call(dataset_retrieval_run, parent_run_id=trace_id)
self.finish_call(dataset_retrieval_run) self.finish_call(dataset_retrieval_run)


def tool_trace(self, trace_info: ToolTraceInfo): def tool_trace(self, trace_info: ToolTraceInfo):
attributes["start_time"] = trace_info.start_time attributes["start_time"] = trace_info.start_time
attributes["end_time"] = trace_info.end_time attributes["end_time"] = trace_info.end_time


message_id = trace_info.message_id or getattr(trace_info, "conversation_id", None)
message_id = message_id or None
trace_id = trace_info.trace_id or message_id
attributes["trace_id"] = trace_id

tool_run = WeaveTraceModel( tool_run = WeaveTraceModel(
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
op=trace_info.tool_name, op=trace_info.tool_name,
attributes=attributes, attributes=attributes,
exception=trace_info.error, exception=trace_info.error,
) )
message_id = trace_info.message_id or getattr(trace_info, "conversation_id", None)
message_id = message_id or None
self.start_call(tool_run, parent_run_id=message_id)
self.start_call(tool_run, parent_run_id=trace_id)
self.finish_call(tool_run) self.finish_call(tool_run)


def generate_name_trace(self, trace_info: GenerateNameTraceInfo): def generate_name_trace(self, trace_info: GenerateNameTraceInfo):

+ 149
- 31
api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py Vedi File





class ElasticSearchConfig(BaseModel): class ElasticSearchConfig(BaseModel):
host: str
port: int
username: str
password: str
# Regular Elasticsearch config
host: Optional[str] = None
port: Optional[int] = None
username: Optional[str] = None
password: Optional[str] = None

# Elastic Cloud specific config
cloud_url: Optional[str] = None # Cloud URL for Elasticsearch Cloud
api_key: Optional[str] = None

# Common config
use_cloud: bool = False
ca_certs: Optional[str] = None
verify_certs: bool = False
request_timeout: int = 100000
retry_on_timeout: bool = True
max_retries: int = 10000


@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod
def validate_config(cls, values: dict) -> dict: def validate_config(cls, values: dict) -> dict:
if not values["host"]:
raise ValueError("config HOST is required")
if not values["port"]:
raise ValueError("config PORT is required")
if not values["username"]:
raise ValueError("config USERNAME is required")
if not values["password"]:
raise ValueError("config PASSWORD is required")
use_cloud = values.get("use_cloud", False)
cloud_url = values.get("cloud_url")

if use_cloud:
# Cloud configuration validation - requires cloud_url and api_key
if not cloud_url:
raise ValueError("cloud_url is required for Elastic Cloud")

api_key = values.get("api_key")
if not api_key:
raise ValueError("api_key is required for Elastic Cloud")

else:
# Regular Elasticsearch validation
if not values.get("host"):
raise ValueError("config HOST is required for regular Elasticsearch")
if not values.get("port"):
raise ValueError("config PORT is required for regular Elasticsearch")
if not values.get("username"):
raise ValueError("config USERNAME is required for regular Elasticsearch")
if not values.get("password"):
raise ValueError("config PASSWORD is required for regular Elasticsearch")

return values return values




self._attributes = attributes self._attributes = attributes


def _init_client(self, config: ElasticSearchConfig) -> Elasticsearch: def _init_client(self, config: ElasticSearchConfig) -> Elasticsearch:
"""
Initialize Elasticsearch client for both regular Elasticsearch and Elastic Cloud.
"""
try: try:
parsed_url = urlparse(config.host)
if parsed_url.scheme in {"http", "https"}:
hosts = f"{config.host}:{config.port}"
# Check if using Elastic Cloud
client_config: dict[str, Any]
if config.use_cloud and config.cloud_url:
client_config = {
"request_timeout": config.request_timeout,
"retry_on_timeout": config.retry_on_timeout,
"max_retries": config.max_retries,
"verify_certs": config.verify_certs,
}

# Parse cloud URL and configure hosts
parsed_url = urlparse(config.cloud_url)
host = f"{parsed_url.scheme}://{parsed_url.hostname}"
if parsed_url.port:
host += f":{parsed_url.port}"

client_config["hosts"] = [host]

# API key authentication for cloud
client_config["api_key"] = config.api_key

# SSL settings
if config.ca_certs:
client_config["ca_certs"] = config.ca_certs

else: else:
hosts = f"http://{config.host}:{config.port}"
client = Elasticsearch(
hosts=hosts,
basic_auth=(config.username, config.password),
request_timeout=100000,
retry_on_timeout=True,
max_retries=10000,
)
except requests.exceptions.ConnectionError:
raise ConnectionError("Vector database connection error")
# Regular Elasticsearch configuration
parsed_url = urlparse(config.host or "")
if parsed_url.scheme in {"http", "https"}:
hosts = f"{config.host}:{config.port}"
use_https = parsed_url.scheme == "https"
else:
hosts = f"http://{config.host}:{config.port}"
use_https = False

client_config = {
"hosts": [hosts],
"basic_auth": (config.username, config.password),
"request_timeout": config.request_timeout,
"retry_on_timeout": config.retry_on_timeout,
"max_retries": config.max_retries,
}

# Only add SSL settings if using HTTPS
if use_https:
client_config["verify_certs"] = config.verify_certs
if config.ca_certs:
client_config["ca_certs"] = config.ca_certs

client = Elasticsearch(**client_config)

# Test connection
if not client.ping():
raise ConnectionError("Failed to connect to Elasticsearch")

except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"Vector database connection error: {str(e)}")
except Exception as e:
raise ConnectionError(f"Elasticsearch client initialization failed: {str(e)}")


return client return client


}, },
} }
} }

self._client.indices.create(index=self._collection_name, mappings=mappings) self._client.indices.create(index=self._collection_name, mappings=mappings)
logger.info("Created index %s with dimension %s", self._collection_name, dim)
else:
logger.info("Collection %s already exists.", self._collection_name)


redis_client.set(collection_exist_cache_key, 1, ex=3600) redis_client.set(collection_exist_cache_key, 1, ex=3600)


dataset.index_struct = json.dumps(self.gen_index_struct_dict(VectorType.ELASTICSEARCH, collection_name)) dataset.index_struct = json.dumps(self.gen_index_struct_dict(VectorType.ELASTICSEARCH, collection_name))


config = current_app.config config = current_app.config

# Check if ELASTICSEARCH_USE_CLOUD is explicitly set to false (boolean)
use_cloud_env = config.get("ELASTICSEARCH_USE_CLOUD", False)

if use_cloud_env is False:
# Use regular Elasticsearch with config values
config_dict = {
"use_cloud": False,
"host": config.get("ELASTICSEARCH_HOST", "elasticsearch"),
"port": config.get("ELASTICSEARCH_PORT", 9200),
"username": config.get("ELASTICSEARCH_USERNAME", "elastic"),
"password": config.get("ELASTICSEARCH_PASSWORD", "elastic"),
}
else:
# Check for cloud configuration
cloud_url = config.get("ELASTICSEARCH_CLOUD_URL")
if cloud_url:
config_dict = {
"use_cloud": True,
"cloud_url": cloud_url,
"api_key": config.get("ELASTICSEARCH_API_KEY"),
}
else:
# Fallback to regular Elasticsearch
config_dict = {
"use_cloud": False,
"host": config.get("ELASTICSEARCH_HOST", "localhost"),
"port": config.get("ELASTICSEARCH_PORT", 9200),
"username": config.get("ELASTICSEARCH_USERNAME", "elastic"),
"password": config.get("ELASTICSEARCH_PASSWORD", ""),
}

# Common configuration
config_dict.update(
{
"ca_certs": str(config.get("ELASTICSEARCH_CA_CERTS")) if config.get("ELASTICSEARCH_CA_CERTS") else None,
"verify_certs": bool(config.get("ELASTICSEARCH_VERIFY_CERTS", False)),
"request_timeout": int(config.get("ELASTICSEARCH_REQUEST_TIMEOUT", 100000)),
"retry_on_timeout": bool(config.get("ELASTICSEARCH_RETRY_ON_TIMEOUT", True)),
"max_retries": int(config.get("ELASTICSEARCH_MAX_RETRIES", 10000)),
}
)

return ElasticSearchVector( return ElasticSearchVector(
index_name=collection_name, index_name=collection_name,
config=ElasticSearchConfig(
host=config.get("ELASTICSEARCH_HOST", "localhost"),
port=config.get("ELASTICSEARCH_PORT", 9200),
username=config.get("ELASTICSEARCH_USERNAME", ""),
password=config.get("ELASTICSEARCH_PASSWORD", ""),
),
config=ElasticSearchConfig(**config_dict),
attributes=[], attributes=[],
) )

+ 2
- 0
api/core/rag/entities/metadata_entities.py Vedi File

"is not", "is not",
"empty", "empty",
"not empty", "not empty",
"in",
"not in",
# for number # for number
"=", "=",
"≠", "≠",

+ 4
- 1
api/core/rag/extractor/notion_extractor.py Vedi File

import json import json
import logging import logging
import operator
from typing import Any, Optional, cast from typing import Any, Optional, cast


import requests import requests
data[property_name] = value data[property_name] = value
row_dict = {k: v for k, v in data.items() if v} row_dict = {k: v for k, v in data.items() if v}
row_content = "" row_content = ""
for key, value in row_dict.items():
for key, value in sorted(row_dict.items(), key=operator.itemgetter(0)):
if isinstance(value, dict): if isinstance(value, dict):
value_dict = {k: v for k, v in value.items() if v} value_dict = {k: v for k, v in value.items() if v}
value_content = "".join(f"{k}:{v} " for k, v in value_dict.items()) value_content = "".join(f"{k}:{v} " for k, v in value_dict.items())
row_content = row_content + f"{key}:{value_content}\n" row_content = row_content + f"{key}:{value_content}\n"
else: else:
row_content = row_content + f"{key}:{value}\n" row_content = row_content + f"{key}:{value}\n"
if "url" in result:
row_content = row_content + f"Row Page URL:{result.get('url', '')}\n"
database_content.append(row_content) database_content.append(row_content)


has_more = response_data.get("has_more", False) has_more = response_data.get("has_more", False)

+ 1
- 1
api/core/tools/workflow_as_tool/tool.py Vedi File

if not version: if not version:
workflow = ( workflow = (
db.session.query(Workflow) db.session.query(Workflow)
.where(Workflow.app_id == app_id, Workflow.version != "draft")
.where(Workflow.app_id == app_id, Workflow.version != Workflow.VERSION_DRAFT)
.order_by(Workflow.created_at.desc()) .order_by(Workflow.created_at.desc())
.first() .first()
) )

+ 2
- 2
api/core/workflow/nodes/http_request/executor.py Vedi File

if not authorization.config.header: if not authorization.config.header:
authorization.config.header = "Authorization" authorization.config.header = "Authorization"


if self.auth.config.type == "bearer":
if self.auth.config.type == "bearer" and authorization.config.api_key:
headers[authorization.config.header] = f"Bearer {authorization.config.api_key}" headers[authorization.config.header] = f"Bearer {authorization.config.api_key}"
elif self.auth.config.type == "basic":
elif self.auth.config.type == "basic" and authorization.config.api_key:
credentials = authorization.config.api_key credentials = authorization.config.api_key
if ":" in credentials: if ":" in credentials:
encoded_credentials = base64.b64encode(credentials.encode("utf-8")).decode("utf-8") encoded_credentials = base64.b64encode(credentials.encode("utf-8")).decode("utf-8")

+ 2
- 0
api/core/workflow/nodes/knowledge_retrieval/entities.py Vedi File

"is not", "is not",
"empty", "empty",
"not empty", "not empty",
"in",
"not in",
# for number # for number
"=", "=",
"≠", "≠",

+ 22
- 0
api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py Vedi File

**{key: metadata_name, key_value: f"%{value}"} **{key: metadata_name, key_value: f"%{value}"}
) )
) )
case "in":
if isinstance(value, str):
escaped_values = [v.strip().replace("'", "''") for v in str(value).split(",")]
escaped_value_str = ",".join(escaped_values)
else:
escaped_value_str = str(value)
filters.append(
(text(f"documents.doc_metadata ->> :{key} = any(string_to_array(:{key_value},','))")).params(
**{key: metadata_name, key_value: escaped_value_str}
)
)
case "not in":
if isinstance(value, str):
escaped_values = [v.strip().replace("'", "''") for v in str(value).split(",")]
escaped_value_str = ",".join(escaped_values)
else:
escaped_value_str = str(value)
filters.append(
(text(f"documents.doc_metadata ->> :{key} != all(string_to_array(:{key_value},','))")).params(
**{key: metadata_name, key_value: escaped_value_str}
)
)
case "=" | "is": case "=" | "is":
if isinstance(value, str): if isinstance(value, str):
filters.append(Document.doc_metadata[metadata_name] == f'"{value}"') filters.append(Document.doc_metadata[metadata_name] == f'"{value}"')

+ 8
- 11
api/schedule/queue_monitor_task.py Vedi File

import logging import logging
from datetime import datetime from datetime import datetime
from urllib.parse import urlparse


import click import click
from kombu.utils.url import parse_url # type: ignore
from redis import Redis from redis import Redis


import app import app
from extensions.ext_database import db from extensions.ext_database import db
from libs.email_i18n import EmailType, get_email_i18n_service from libs.email_i18n import EmailType, get_email_i18n_service


# Create a dedicated Redis connection (using the same configuration as Celery)
celery_broker_url = dify_config.CELERY_BROKER_URL

parsed = urlparse(celery_broker_url)
host = parsed.hostname or "localhost"
port = parsed.port or 6379
password = parsed.password or None
redis_db = parsed.path.strip("/") or "1" # type: ignore

celery_redis = Redis(host=host, port=port, password=password, db=redis_db)
redis_config = parse_url(dify_config.CELERY_BROKER_URL)
celery_redis = Redis(
host=redis_config["hostname"],
port=redis_config["port"],
password=redis_config["password"],
db=int(redis_config["virtual_host"]) if redis_config["virtual_host"] else 1,
)




@app.celery.task(queue="monitor") @app.celery.task(queue="monitor")

+ 60
- 1
api/services/annotation_service.py Vedi File

annotation.id, app_id, current_user.current_tenant_id, app_annotation_setting.collection_binding_id annotation.id, app_id, current_user.current_tenant_id, app_annotation_setting.collection_binding_id
) )


@classmethod
def delete_app_annotations_in_batch(cls, app_id: str, annotation_ids: list[str]):
# get app info
app = (
db.session.query(App)
.where(App.id == app_id, App.tenant_id == current_user.current_tenant_id, App.status == "normal")
.first()
)

if not app:
raise NotFound("App not found")

# Fetch annotations and their settings in a single query
annotations_to_delete = (
db.session.query(MessageAnnotation, AppAnnotationSetting)
.outerjoin(AppAnnotationSetting, MessageAnnotation.app_id == AppAnnotationSetting.app_id)
.filter(MessageAnnotation.id.in_(annotation_ids))
.all()
)

if not annotations_to_delete:
return {"deleted_count": 0}

# Step 1: Extract IDs for bulk operations
annotation_ids_to_delete = [annotation.id for annotation, _ in annotations_to_delete]

# Step 2: Bulk delete hit histories in a single query
db.session.query(AppAnnotationHitHistory).filter(
AppAnnotationHitHistory.annotation_id.in_(annotation_ids_to_delete)
).delete(synchronize_session=False)

# Step 3: Trigger async tasks for search index deletion
for annotation, annotation_setting in annotations_to_delete:
if annotation_setting:
delete_annotation_index_task.delay(
annotation.id, app_id, current_user.current_tenant_id, annotation_setting.collection_binding_id
)

# Step 4: Bulk delete annotations in a single query
deleted_count = (
db.session.query(MessageAnnotation)
.filter(MessageAnnotation.id.in_(annotation_ids_to_delete))
.delete(synchronize_session=False)
)

db.session.commit()
return {"deleted_count": deleted_count}

@classmethod @classmethod
def batch_import_app_annotations(cls, app_id, file: FileStorage) -> dict: def batch_import_app_annotations(cls, app_id, file: FileStorage) -> dict:
# get app info # get app info


try: try:
# Skip the first row # Skip the first row
df = pd.read_csv(file)
df = pd.read_csv(file, dtype=str)
result = [] result = []
for index, row in df.iterrows(): for index, row in df.iterrows():
content = {"question": row.iloc[0], "answer": row.iloc[1]} content = {"question": row.iloc[0], "answer": row.iloc[1]}
if not app: if not app:
raise NotFound("App not found") raise NotFound("App not found")


# if annotation reply is enabled, delete annotation index
app_annotation_setting = (
db.session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app_id).first()
)

annotations_query = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app_id) annotations_query = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app_id)
for annotation in annotations_query.yield_per(100): for annotation in annotations_query.yield_per(100):
annotation_hit_histories_query = db.session.query(AppAnnotationHitHistory).filter( annotation_hit_histories_query = db.session.query(AppAnnotationHitHistory).filter(
for annotation_hit_history in annotation_hit_histories_query.yield_per(100): for annotation_hit_history in annotation_hit_histories_query.yield_per(100):
db.session.delete(annotation_hit_history) db.session.delete(annotation_hit_history)


# if annotation reply is enabled, delete annotation index
if app_annotation_setting:
delete_annotation_index_task.delay(
annotation.id, app_id, current_user.current_tenant_id, app_annotation_setting.collection_binding_id
)

db.session.delete(annotation) db.session.delete(annotation)


db.session.commit() db.session.commit()

+ 2
- 2
api/services/external_knowledge_service.py Vedi File

def validate_api_list(cls, api_settings: dict): def validate_api_list(cls, api_settings: dict):
if not api_settings: if not api_settings:
raise ValueError("api list is empty") raise ValueError("api list is empty")
if "endpoint" not in api_settings and not api_settings["endpoint"]:
if not api_settings.get("endpoint"):
raise ValueError("endpoint is required") raise ValueError("endpoint is required")
if "api_key" not in api_settings and not api_settings["api_key"]:
if not api_settings.get("api_key"):
raise ValueError("api_key is required") raise ValueError("api_key is required")


@staticmethod @staticmethod

+ 1
- 1
api/services/workflow/workflow_converter.py Vedi File

tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
app_id=app_model.id, app_id=app_model.id,
type=WorkflowType.from_app_mode(new_app_mode).value, type=WorkflowType.from_app_mode(new_app_mode).value,
version="draft",
version=Workflow.VERSION_DRAFT,
graph=json.dumps(graph), graph=json.dumps(graph),
features=json.dumps(features), features=json.dumps(features),
created_by=account_id, created_by=account_id,

+ 9
- 7
api/services/workflow_service.py Vedi File

workflow = ( workflow = (
db.session.query(Workflow) db.session.query(Workflow)
.where( .where(
Workflow.tenant_id == app_model.tenant_id, Workflow.app_id == app_model.id, Workflow.version == "draft"
Workflow.tenant_id == app_model.tenant_id,
Workflow.app_id == app_model.id,
Workflow.version == Workflow.VERSION_DRAFT,
) )
.first() .first()
) )
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
app_id=app_model.id, app_id=app_model.id,
type=WorkflowType.from_app_mode(app_model.mode).value, type=WorkflowType.from_app_mode(app_model.mode).value,
version="draft",
version=Workflow.VERSION_DRAFT,
graph=json.dumps(graph), graph=json.dumps(graph),
features=json.dumps(features), features=json.dumps(features),
created_by=account.id, created_by=account.id,
draft_workflow_stmt = select(Workflow).where( draft_workflow_stmt = select(Workflow).where(
Workflow.tenant_id == app_model.tenant_id, Workflow.tenant_id == app_model.tenant_id,
Workflow.app_id == app_model.id, Workflow.app_id == app_model.id,
Workflow.version == "draft",
Workflow.version == Workflow.VERSION_DRAFT,
) )
draft_workflow = session.scalar(draft_workflow_stmt) draft_workflow = session.scalar(draft_workflow_stmt)
if not draft_workflow: if not draft_workflow:
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
) )


eclosing_node_type_and_id = draft_workflow.get_enclosing_node_type_and_id(node_config)
if eclosing_node_type_and_id:
_, enclosing_node_id = eclosing_node_type_and_id
enclosing_node_type_and_id = draft_workflow.get_enclosing_node_type_and_id(node_config)
if enclosing_node_type_and_id:
_, enclosing_node_id = enclosing_node_type_and_id
else: else:
enclosing_node_id = None enclosing_node_id = None


raise ValueError(f"Workflow with ID {workflow_id} not found") raise ValueError(f"Workflow with ID {workflow_id} not found")


# Check if workflow is a draft version # Check if workflow is a draft version
if workflow.version == "draft":
if workflow.version == Workflow.VERSION_DRAFT:
raise DraftWorkflowDeletionError("Cannot delete draft workflow versions") raise DraftWorkflowDeletionError("Cannot delete draft workflow versions")


# Check if this workflow is currently referenced by an app # Check if this workflow is currently referenced by an app

+ 2
- 0
api/tasks/add_document_to_index_task.py Vedi File

return return


if dataset_document.indexing_status != "completed": if dataset_document.indexing_status != "completed":
db.session.close()
return return


indexing_cache_key = f"document_{dataset_document.id}_indexing" indexing_cache_key = f"document_{dataset_document.id}_indexing"
db.session.commit() db.session.commit()
finally: finally:
redis_client.delete(indexing_cache_key) redis_client.delete(indexing_cache_key)
db.session.close()

+ 1
- 0
api/tasks/create_segment_to_index_task.py Vedi File

return return


if segment.status != "waiting": if segment.status != "waiting":
db.session.close()
return return


indexing_cache_key = f"segment_{segment.id}_indexing" indexing_cache_key = f"segment_{segment.id}_indexing"

+ 2
- 0
api/tasks/document_indexing_sync_task.py Vedi File

logging.info(click.style(str(ex), fg="yellow")) logging.info(click.style(str(ex), fg="yellow"))
except Exception: except Exception:
logging.exception("document_indexing_sync_task failed, document_id: %s", document_id) logging.exception("document_indexing_sync_task failed, document_id: %s", document_id)
finally:
db.session.close()

+ 2
- 2
api/tasks/retry_document_indexing_task.py Vedi File

logging.info(click.style(str(ex), fg="yellow")) logging.info(click.style(str(ex), fg="yellow"))
redis_client.delete(retry_indexing_cache_key) redis_client.delete(retry_indexing_cache_key)
logging.exception("retry_document_indexing_task failed, document_id: %s", document_id) logging.exception("retry_document_indexing_task failed, document_id: %s", document_id)
end_at = time.perf_counter()
logging.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
end_at = time.perf_counter()
logging.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
except Exception as e: except Exception as e:
logging.exception( logging.exception(
"retry_document_indexing_task failed, dataset_id: %s, document_ids: %s", dataset_id, document_ids "retry_document_indexing_task failed, dataset_id: %s, document_ids: %s", dataset_id, document_ids

+ 3
- 1
api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py Vedi File

self.attributes = ["doc_id", "dataset_id", "document_id", "doc_hash"] self.attributes = ["doc_id", "dataset_id", "document_id", "doc_hash"]
self.vector = ElasticSearchVector( self.vector = ElasticSearchVector(
index_name=self.collection_name.lower(), index_name=self.collection_name.lower(),
config=ElasticSearchConfig(host="http://localhost", port="9200", username="elastic", password="elastic"),
config=ElasticSearchConfig(
use_cloud=False, host="http://localhost", port="9200", username="elastic", password="elastic"
),
attributes=self.attributes, attributes=self.attributes,
) )



+ 59
- 0
api/tests/unit_tests/configs/test_dify_config.py Vedi File

import os import os


import pytest
from flask import Flask from flask import Flask
from packaging.version import Version from packaging.version import Version
from yarl import URL from yarl import URL
options = engine_options["connect_args"]["options"] options = engine_options["connect_args"]["options"]
assert "search_path=myschema" in options assert "search_path=myschema" in options
assert "timezone=UTC" in options assert "timezone=UTC" in options


@pytest.mark.parametrize(
("broker_url", "expected_host", "expected_port", "expected_username", "expected_password", "expected_db"),
[
("redis://localhost:6379/1", "localhost", 6379, None, None, "1"),
("redis://:password@localhost:6379/1", "localhost", 6379, None, "password", "1"),
("redis://:mypass%23123@localhost:6379/1", "localhost", 6379, None, "mypass#123", "1"),
("redis://user:pass%40word@redis-host:6380/2", "redis-host", 6380, "user", "pass@word", "2"),
("redis://admin:complex%23pass%40word@127.0.0.1:6379/0", "127.0.0.1", 6379, "admin", "complex#pass@word", "0"),
(
"redis://user%40domain:secret%23123@redis.example.com:6380/3",
"redis.example.com",
6380,
"user@domain",
"secret#123",
"3",
),
# Password containing %23 substring (double encoding scenario)
("redis://:mypass%2523@localhost:6379/1", "localhost", 6379, None, "mypass%23", "1"),
# Username and password both containing encoded characters
("redis://user%2525%40:pass%2523@localhost:6379/1", "localhost", 6379, "user%25@", "pass%23", "1"),
],
)
def test_celery_broker_url_with_special_chars_password(
monkeypatch, broker_url, expected_host, expected_port, expected_username, expected_password, expected_db
):
"""Test that CELERY_BROKER_URL with various formats are handled correctly."""
from kombu.utils.url import parse_url

# clear system environment variables
os.environ.clear()

# Set up basic required environment variables (following existing pattern)
monkeypatch.setenv("CONSOLE_API_URL", "https://example.com")
monkeypatch.setenv("CONSOLE_WEB_URL", "https://example.com")
monkeypatch.setenv("DB_USERNAME", "postgres")
monkeypatch.setenv("DB_PASSWORD", "postgres")
monkeypatch.setenv("DB_HOST", "localhost")
monkeypatch.setenv("DB_PORT", "5432")
monkeypatch.setenv("DB_DATABASE", "dify")

# Set the CELERY_BROKER_URL to test
monkeypatch.setenv("CELERY_BROKER_URL", broker_url)

# Create config and verify the URL is stored correctly
config = DifyConfig()
assert broker_url == config.CELERY_BROKER_URL

# Test actual parsing behavior using kombu's parse_url (same as production)
redis_config = parse_url(config.CELERY_BROKER_URL)

# Verify the parsing results match expectations (using kombu's field names)
assert redis_config["hostname"] == expected_host
assert redis_config["port"] == expected_port
assert redis_config["userid"] == expected_username # kombu uses 'userid' not 'username'
assert redis_config["password"] == expected_password
assert redis_config["virtual_host"] == expected_db # kombu uses 'virtual_host' not 'db'

+ 13
- 6
api/tests/unit_tests/core/ops/test_config_entity.py Vedi File

assert config.project == "default" assert config.project == "default"


def test_endpoint_validation_with_path(self): def test_endpoint_validation_with_path(self):
"""Test endpoint validation normalizes URL by removing path"""
config = PhoenixConfig(endpoint="https://custom.phoenix.com/api/v1")
assert config.endpoint == "https://custom.phoenix.com"
"""Test endpoint validation with path"""
config = PhoenixConfig(endpoint="https://app.phoenix.arize.com/s/dify-integration")
assert config.endpoint == "https://app.phoenix.arize.com/s/dify-integration"

def test_endpoint_validation_without_path(self):
"""Test endpoint validation without path"""
config = PhoenixConfig(endpoint="https://app.phoenix.arize.com")
assert config.endpoint == "https://app.phoenix.arize.com"




class TestLangfuseConfig: class TestLangfuseConfig:
assert config.host == "https://custom.langfuse.com" assert config.host == "https://custom.langfuse.com"


def test_valid_config_with_path(self): def test_valid_config_with_path(self):
host = host = "https://custom.langfuse.com/api/v1"
host = "https://custom.langfuse.com/api/v1"
config = LangfuseConfig(public_key="public_key", secret_key="secret_key", host=host) config = LangfuseConfig(public_key="public_key", secret_key="secret_key", host=host)
assert config.public_key == "public_key" assert config.public_key == "public_key"
assert config.secret_key == "secret_key" assert config.secret_key == "secret_key"
"""Test that URL normalization works consistently across configs""" """Test that URL normalization works consistently across configs"""
# Test that paths are removed from endpoints # Test that paths are removed from endpoints
arize_config = ArizeConfig(endpoint="https://arize.com/api/v1/test") arize_config = ArizeConfig(endpoint="https://arize.com/api/v1/test")
phoenix_config = PhoenixConfig(endpoint="https://phoenix.com/api/v2/")
phoenix_with_path_config = PhoenixConfig(endpoint="https://app.phoenix.arize.com/s/dify-integration")
phoenix_without_path_config = PhoenixConfig(endpoint="https://app.phoenix.arize.com")
aliyun_config = AliyunConfig( aliyun_config = AliyunConfig(
license_key="test_license", endpoint="https://tracing-analysis-dc-hz.aliyuncs.com/api/v1/traces" license_key="test_license", endpoint="https://tracing-analysis-dc-hz.aliyuncs.com/api/v1/traces"
) )


assert arize_config.endpoint == "https://arize.com" assert arize_config.endpoint == "https://arize.com"
assert phoenix_config.endpoint == "https://phoenix.com"
assert phoenix_with_path_config.endpoint == "https://app.phoenix.arize.com/s/dify-integration"
assert phoenix_without_path_config.endpoint == "https://app.phoenix.arize.com"
assert aliyun_config.endpoint == "https://tracing-analysis-dc-hz.aliyuncs.com" assert aliyun_config.endpoint == "https://tracing-analysis-dc-hz.aliyuncs.com"


def test_project_default_values(self): def test_project_default_values(self):

+ 189
- 0
api/tests/unit_tests/services/test_metadata_bug_complete.py Vedi File

from unittest.mock import Mock, patch

import pytest
from flask_restful import reqparse
from werkzeug.exceptions import BadRequest

from services.entities.knowledge_entities.knowledge_entities import MetadataArgs
from services.metadata_service import MetadataService


class TestMetadataBugCompleteValidation:
"""Complete test suite to verify the metadata nullable bug and its fix."""

def test_1_pydantic_layer_validation(self):
"""Test Layer 1: Pydantic model validation correctly rejects None values."""
# Pydantic should reject None values for required fields
with pytest.raises((ValueError, TypeError)):
MetadataArgs(type=None, name=None)

with pytest.raises((ValueError, TypeError)):
MetadataArgs(type="string", name=None)

with pytest.raises((ValueError, TypeError)):
MetadataArgs(type=None, name="test")

# Valid values should work
valid_args = MetadataArgs(type="string", name="test_name")
assert valid_args.type == "string"
assert valid_args.name == "test_name"

def test_2_business_logic_layer_crashes_on_none(self):
"""Test Layer 2: Business logic crashes when None values slip through."""
# Create mock that bypasses Pydantic validation
mock_metadata_args = Mock()
mock_metadata_args.name = None
mock_metadata_args.type = "string"

with patch("services.metadata_service.current_user") as mock_user:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"

# Should crash with TypeError
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)

# Test update method as well
with patch("services.metadata_service.current_user") as mock_user:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"

with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)

def test_3_database_constraints_verification(self):
"""Test Layer 3: Verify database model has nullable=False constraints."""
from sqlalchemy import inspect

from models.dataset import DatasetMetadata

# Get table info
mapper = inspect(DatasetMetadata)

# Check that type and name columns are not nullable
type_column = mapper.columns["type"]
name_column = mapper.columns["name"]

assert type_column.nullable is False, "type column should be nullable=False"
assert name_column.nullable is False, "name column should be nullable=False"

def test_4_fixed_api_layer_rejects_null(self, app):
"""Test Layer 4: Fixed API configuration properly rejects null values."""
# Test Console API create endpoint (fixed)
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")

with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"):
with pytest.raises(BadRequest):
parser.parse_args()

# Test with just name being null
with app.test_request_context(json={"type": "string", "name": None}, content_type="application/json"):
with pytest.raises(BadRequest):
parser.parse_args()

# Test with just type being null
with app.test_request_context(json={"type": None, "name": "test"}, content_type="application/json"):
with pytest.raises(BadRequest):
parser.parse_args()

def test_5_fixed_api_accepts_valid_values(self, app):
"""Test that fixed API still accepts valid non-null values."""
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")

with app.test_request_context(json={"type": "string", "name": "valid_name"}, content_type="application/json"):
args = parser.parse_args()
assert args["type"] == "string"
assert args["name"] == "valid_name"

def test_6_simulated_buggy_behavior(self, app):
"""Test simulating the original buggy behavior with nullable=True."""
# Simulate the old buggy configuration
buggy_parser = reqparse.RequestParser()
buggy_parser.add_argument("type", type=str, required=True, nullable=True, location="json")
buggy_parser.add_argument("name", type=str, required=True, nullable=True, location="json")

with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"):
# This would pass in the buggy version
args = buggy_parser.parse_args()
assert args["type"] is None
assert args["name"] is None

# But would crash when trying to create MetadataArgs
with pytest.raises((ValueError, TypeError)):
MetadataArgs(**args)

def test_7_end_to_end_validation_layers(self):
"""Test all validation layers work together correctly."""
# Layer 1: API should reject null at parameter level (with fix)
# Layer 2: Pydantic should reject null at model level
# Layer 3: Business logic expects non-null
# Layer 4: Database enforces non-null

# Test that valid data flows through all layers
valid_data = {"type": "string", "name": "test_metadata"}

# Should create valid Pydantic object
metadata_args = MetadataArgs(**valid_data)
assert metadata_args.type == "string"
assert metadata_args.name == "test_metadata"

# Should not crash in business logic length check
assert len(metadata_args.name) <= 255 # This should not crash
assert len(metadata_args.type) > 0 # This should not crash

def test_8_verify_specific_fix_locations(self):
"""Verify that the specific locations mentioned in bug report are fixed."""
# Read the actual files to verify fixes
import os

# Console API create
console_create_file = "api/controllers/console/datasets/metadata.py"
if os.path.exists(console_create_file):
with open(console_create_file) as f:
content = f.read()
# Should contain nullable=False, not nullable=True
assert "nullable=True" not in content.split("class DatasetMetadataCreateApi")[1].split("class")[0]

# Service API create
service_create_file = "api/controllers/service_api/dataset/metadata.py"
if os.path.exists(service_create_file):
with open(service_create_file) as f:
content = f.read()
# Should contain nullable=False, not nullable=True
create_api_section = content.split("class DatasetMetadataCreateServiceApi")[1].split("class")[0]
assert "nullable=True" not in create_api_section


class TestMetadataValidationSummary:
"""Summary tests that demonstrate the complete validation architecture."""

def test_validation_layer_architecture(self):
"""Document and test the 4-layer validation architecture."""
# Layer 1: API Parameter Validation (Flask-RESTful reqparse)
# - Role: First line of defense, validates HTTP request parameters
# - Fixed: nullable=False ensures null values are rejected at API boundary

# Layer 2: Pydantic Model Validation
# - Role: Validates data structure and types before business logic
# - Working: Required fields without Optional[] reject None values

# Layer 3: Business Logic Validation
# - Role: Domain-specific validation (length checks, uniqueness, etc.)
# - Vulnerable: Direct len() calls crash on None values

# Layer 4: Database Constraints
# - Role: Final data integrity enforcement
# - Working: nullable=False prevents None values in database

# The bug was: Layer 1 allowed None, but Layers 2-4 expected non-None
# The fix: Make Layer 1 consistent with Layers 2-4

assert True # This test documents the architecture


if __name__ == "__main__":
pytest.main([__file__, "-v"])

+ 108
- 0
api/tests/unit_tests/services/test_metadata_nullable_bug.py Vedi File

from unittest.mock import Mock, patch

import pytest
from flask_restful import reqparse

from services.entities.knowledge_entities.knowledge_entities import MetadataArgs
from services.metadata_service import MetadataService


class TestMetadataNullableBug:
"""Test case to reproduce the metadata nullable validation bug."""

def test_metadata_args_with_none_values_should_fail(self):
"""Test that MetadataArgs validation should reject None values."""
# This test demonstrates the expected behavior - should fail validation
with pytest.raises((ValueError, TypeError)):
# This should fail because Pydantic expects non-None values
MetadataArgs(type=None, name=None)

def test_metadata_service_create_with_none_name_crashes(self):
"""Test that MetadataService.create_metadata crashes when name is None."""
# Mock the MetadataArgs to bypass Pydantic validation
mock_metadata_args = Mock()
mock_metadata_args.name = None # This will cause len() to crash
mock_metadata_args.type = "string"

with patch("services.metadata_service.current_user") as mock_user:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"

# This should crash with TypeError when calling len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)

def test_metadata_service_update_with_none_name_crashes(self):
"""Test that MetadataService.update_metadata_name crashes when name is None."""
with patch("services.metadata_service.current_user") as mock_user:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"

# This should crash with TypeError when calling len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)

def test_api_parser_accepts_null_values(self, app):
"""Test that API parser configuration incorrectly accepts null values."""
# Simulate the current API parser configuration
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")

# Simulate request data with null values
with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"):
# This should parse successfully due to nullable=True
args = parser.parse_args()

# Verify that null values are accepted
assert args["type"] is None
assert args["name"] is None

# This demonstrates the bug: API accepts None but business logic will crash

def test_integration_bug_scenario(self, app):
"""Test the complete bug scenario from API to service layer."""
# Step 1: API parser accepts null values (current buggy behavior)
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")

with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"):
args = parser.parse_args()

# Step 2: Try to create MetadataArgs with None values
# This should fail at Pydantic validation level
with pytest.raises((ValueError, TypeError)):
metadata_args = MetadataArgs(**args)

# Step 3: If we bypass Pydantic (simulating the bug scenario)
# Move this outside the request context to avoid Flask-Login issues
mock_metadata_args = Mock()
mock_metadata_args.name = None # From args["name"]
mock_metadata_args.type = None # From args["type"]

with patch("services.metadata_service.current_user") as mock_user:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"

# Step 4: Service layer crashes on len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)

def test_correct_nullable_false_configuration_works(self, app):
"""Test that the correct nullable=False configuration works as expected."""
# This tests the FIXED configuration
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str, required=True, nullable=False, location="json")

with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"):
# This should fail with BadRequest due to nullable=False
from werkzeug.exceptions import BadRequest

with pytest.raises(BadRequest):
parser.parse_args()


if __name__ == "__main__":
pytest.main([__file__, "-v"])

+ 11
- 0
docker/.env.example Vedi File

ELASTICSEARCH_PASSWORD=elastic ELASTICSEARCH_PASSWORD=elastic
KIBANA_PORT=5601 KIBANA_PORT=5601


# Using ElasticSearch Cloud Serverless, or not.
ELASTICSEARCH_USE_CLOUD=false
ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL
ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY

ELASTICSEARCH_VERIFY_CERTS=False
ELASTICSEARCH_CA_CERTS=
ELASTICSEARCH_REQUEST_TIMEOUT=100000
ELASTICSEARCH_RETRY_ON_TIMEOUT=True
ELASTICSEARCH_MAX_RETRIES=10

# baidu vector configurations, only available when VECTOR_STORE is `baidu` # baidu vector configurations, only available when VECTOR_STORE is `baidu`
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000

+ 8
- 0
docker/docker-compose.yaml Vedi File

ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
KIBANA_PORT: ${KIBANA_PORT:-5601} KIBANA_PORT: ${KIBANA_PORT:-5601}
ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false}
ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL}
ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY}
ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False}
ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-}
ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000}
ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True}
ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}

+ 566
- 0
web/__tests__/check-i18n.test.ts Vedi File

import fs from 'node:fs'
import path from 'node:path'

// Mock functions to simulate the check-i18n functionality
const vm = require('node:vm')
const transpile = require('typescript').transpile

describe('check-i18n script functionality', () => {
const testDir = path.join(__dirname, '../i18n-test')
const testEnDir = path.join(testDir, 'en-US')
const testZhDir = path.join(testDir, 'zh-Hans')

// Helper function that replicates the getKeysFromLanguage logic
async function getKeysFromLanguage(language: string, testPath = testDir): Promise<string[]> {
return new Promise((resolve, reject) => {
const folderPath = path.resolve(testPath, language)
const allKeys: string[] = []

if (!fs.existsSync(folderPath)) {
resolve([])
return
}

fs.readdir(folderPath, (err, files) => {
if (err) {
reject(err)
return
}

const translationFiles = files.filter(file => /\.(ts|js)$/.test(file))

translationFiles.forEach((file) => {
const filePath = path.join(folderPath, file)
const fileName = file.replace(/\.[^/.]+$/, '')
const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) =>
c.toUpperCase(),
)

try {
const content = fs.readFileSync(filePath, 'utf8')
const moduleExports = {}
const context = {
exports: moduleExports,
module: { exports: moduleExports },
require,
console,
__filename: filePath,
__dirname: folderPath,
}

vm.runInNewContext(transpile(content), context)
const translationObj = (context.module.exports as any).default || context.module.exports

if (!translationObj || typeof translationObj !== 'object')
throw new Error(`Error parsing file: ${filePath}`)

const nestedKeys: string[] = []
const iterateKeys = (obj: any, prefix = '') => {
for (const key in obj) {
const nestedKey = prefix ? `${prefix}.${key}` : key
if (typeof obj[key] === 'object' && obj[key] !== null && !Array.isArray(obj[key])) {
// This is an object (but not array), recurse into it but don't add it as a key
iterateKeys(obj[key], nestedKey)
}
else {
// This is a leaf node (string, number, boolean, array, etc.), add it as a key
nestedKeys.push(nestedKey)
}
}
}
iterateKeys(translationObj)

const fileKeys = nestedKeys.map(key => `${camelCaseFileName}.${key}`)
allKeys.push(...fileKeys)
}
catch (error) {
reject(error)
}
})
resolve(allKeys)
})
})
}

beforeEach(() => {
// Clean up and create test directories
if (fs.existsSync(testDir))
fs.rmSync(testDir, { recursive: true })

fs.mkdirSync(testDir, { recursive: true })
fs.mkdirSync(testEnDir, { recursive: true })
fs.mkdirSync(testZhDir, { recursive: true })
})

afterEach(() => {
// Clean up test files
if (fs.existsSync(testDir))
fs.rmSync(testDir, { recursive: true })
})

describe('Key extraction logic', () => {
it('should extract only leaf node keys, not intermediate objects', async () => {
const testContent = `const translation = {
simple: 'Simple Value',
nested: {
level1: 'Level 1 Value',
deep: {
level2: 'Level 2 Value'
}
},
array: ['not extracted'],
number: 42,
boolean: true
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'test.ts'), testContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toEqual([
'test.simple',
'test.nested.level1',
'test.nested.deep.level2',
'test.array',
'test.number',
'test.boolean',
])

// Should not include intermediate object keys
expect(keys).not.toContain('test.nested')
expect(keys).not.toContain('test.nested.deep')
})

it('should handle camelCase file name conversion correctly', async () => {
const testContent = `const translation = {
key: 'value'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'app-debug.ts'), testContent)
fs.writeFileSync(path.join(testEnDir, 'user_profile.ts'), testContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toContain('appDebug.key')
expect(keys).toContain('userProfile.key')
})
})

describe('Missing keys detection', () => {
it('should detect missing keys in target language', async () => {
const enContent = `const translation = {
common: {
save: 'Save',
cancel: 'Cancel',
delete: 'Delete'
},
app: {
title: 'My App',
version: '1.0'
}
}

export default translation
`

const zhContent = `const translation = {
common: {
save: '保存',
cancel: '取消'
// missing 'delete'
},
app: {
title: '我的应用'
// missing 'version'
}
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'test.ts'), enContent)
fs.writeFileSync(path.join(testZhDir, 'test.ts'), zhContent)

const enKeys = await getKeysFromLanguage('en-US')
const zhKeys = await getKeysFromLanguage('zh-Hans')

const missingKeys = enKeys.filter(key => !zhKeys.includes(key))

expect(missingKeys).toContain('test.common.delete')
expect(missingKeys).toContain('test.app.version')
expect(missingKeys).toHaveLength(2)
})
})

describe('Extra keys detection', () => {
it('should detect extra keys in target language', async () => {
const enContent = `const translation = {
common: {
save: 'Save',
cancel: 'Cancel'
}
}

export default translation
`

const zhContent = `const translation = {
common: {
save: '保存',
cancel: '取消',
delete: '删除', // extra key
extra: '额外的' // another extra key
},
newSection: {
someKey: '某个值' // extra section
}
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'test.ts'), enContent)
fs.writeFileSync(path.join(testZhDir, 'test.ts'), zhContent)

const enKeys = await getKeysFromLanguage('en-US')
const zhKeys = await getKeysFromLanguage('zh-Hans')

const extraKeys = zhKeys.filter(key => !enKeys.includes(key))

expect(extraKeys).toContain('test.common.delete')
expect(extraKeys).toContain('test.common.extra')
expect(extraKeys).toContain('test.newSection.someKey')
expect(extraKeys).toHaveLength(3)
})
})

describe('File filtering logic', () => {
it('should filter keys by specific file correctly', async () => {
// Create multiple files
const file1Content = `const translation = {
button: 'Button',
text: 'Text'
}

export default translation
`

const file2Content = `const translation = {
title: 'Title',
description: 'Description'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'components.ts'), file1Content)
fs.writeFileSync(path.join(testEnDir, 'pages.ts'), file2Content)
fs.writeFileSync(path.join(testZhDir, 'components.ts'), file1Content)
fs.writeFileSync(path.join(testZhDir, 'pages.ts'), file2Content)

const allEnKeys = await getKeysFromLanguage('en-US')
const allZhKeys = await getKeysFromLanguage('zh-Hans')

// Test file filtering logic
const targetFile = 'components'
const filteredEnKeys = allEnKeys.filter(key =>
key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())),
)

expect(allEnKeys).toHaveLength(4) // 2 keys from each file
expect(filteredEnKeys).toHaveLength(2) // only components keys
expect(filteredEnKeys).toContain('components.button')
expect(filteredEnKeys).toContain('components.text')
expect(filteredEnKeys).not.toContain('pages.title')
expect(filteredEnKeys).not.toContain('pages.description')
})
})

describe('Complex nested structure handling', () => {
it('should handle deeply nested objects correctly', async () => {
const complexContent = `const translation = {
level1: {
level2: {
level3: {
level4: {
deepValue: 'Deep Value'
},
anotherValue: 'Another Value'
},
simpleValue: 'Simple Value'
},
directValue: 'Direct Value'
},
rootValue: 'Root Value'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'complex.ts'), complexContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toContain('complex.level1.level2.level3.level4.deepValue')
expect(keys).toContain('complex.level1.level2.level3.anotherValue')
expect(keys).toContain('complex.level1.level2.simpleValue')
expect(keys).toContain('complex.level1.directValue')
expect(keys).toContain('complex.rootValue')

// Should not include intermediate objects
expect(keys).not.toContain('complex.level1')
expect(keys).not.toContain('complex.level1.level2')
expect(keys).not.toContain('complex.level1.level2.level3')
expect(keys).not.toContain('complex.level1.level2.level3.level4')
})
})

describe('Edge cases', () => {
it('should handle empty objects', async () => {
const emptyContent = `const translation = {
empty: {},
withValue: 'value'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'empty.ts'), emptyContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toContain('empty.withValue')
expect(keys).not.toContain('empty.empty')
})

it('should handle special characters in keys', async () => {
const specialContent = `const translation = {
'key-with-dash': 'value1',
'key_with_underscore': 'value2',
'key.with.dots': 'value3',
normalKey: 'value4'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'special.ts'), specialContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toContain('special.key-with-dash')
expect(keys).toContain('special.key_with_underscore')
expect(keys).toContain('special.key.with.dots')
expect(keys).toContain('special.normalKey')
})

it('should handle different value types', async () => {
const typesContent = `const translation = {
stringValue: 'string',
numberValue: 42,
booleanValue: true,
nullValue: null,
undefinedValue: undefined,
arrayValue: ['array', 'values'],
objectValue: {
nested: 'nested value'
}
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'types.ts'), typesContent)

const keys = await getKeysFromLanguage('en-US')

expect(keys).toContain('types.stringValue')
expect(keys).toContain('types.numberValue')
expect(keys).toContain('types.booleanValue')
expect(keys).toContain('types.nullValue')
expect(keys).toContain('types.undefinedValue')
expect(keys).toContain('types.arrayValue')
expect(keys).toContain('types.objectValue.nested')
expect(keys).not.toContain('types.objectValue')
})
})

describe('Real-world scenario tests', () => {
it('should handle app-debug structure like real files', async () => {
const appDebugEn = `const translation = {
pageTitle: {
line1: 'Prompt',
line2: 'Engineering'
},
operation: {
applyConfig: 'Publish',
resetConfig: 'Reset',
debugConfig: 'Debug'
},
generate: {
instruction: 'Instructions',
generate: 'Generate',
resTitle: 'Generated Prompt',
noDataLine1: 'Describe your use case on the left,',
noDataLine2: 'the orchestration preview will show here.'
}
}

export default translation
`

const appDebugZh = `const translation = {
pageTitle: {
line1: '提示词',
line2: '编排'
},
operation: {
applyConfig: '发布',
resetConfig: '重置',
debugConfig: '调试'
},
generate: {
instruction: '指令',
generate: '生成',
resTitle: '生成的提示词',
noData: '在左侧描述您的用例,编排预览将在此处显示。' // This is extra
}
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'app-debug.ts'), appDebugEn)
fs.writeFileSync(path.join(testZhDir, 'app-debug.ts'), appDebugZh)

const enKeys = await getKeysFromLanguage('en-US')
const zhKeys = await getKeysFromLanguage('zh-Hans')

const missingKeys = enKeys.filter(key => !zhKeys.includes(key))
const extraKeys = zhKeys.filter(key => !enKeys.includes(key))

expect(missingKeys).toContain('appDebug.generate.noDataLine1')
expect(missingKeys).toContain('appDebug.generate.noDataLine2')
expect(extraKeys).toContain('appDebug.generate.noData')

expect(missingKeys).toHaveLength(2)
expect(extraKeys).toHaveLength(1)
})

it('should handle time structure with operation nested keys', async () => {
const timeEn = `const translation = {
months: {
January: 'January',
February: 'February'
},
operation: {
now: 'Now',
ok: 'OK',
cancel: 'Cancel',
pickDate: 'Pick Date'
},
title: {
pickTime: 'Pick Time'
},
defaultPlaceholder: 'Pick a time...'
}

export default translation
`

const timeZh = `const translation = {
months: {
January: '一月',
February: '二月'
},
operation: {
now: '此刻',
ok: '确定',
cancel: '取消',
pickDate: '选择日期'
},
title: {
pickTime: '选择时间'
},
pickDate: '选择日期', // This is extra - duplicates operation.pickDate
defaultPlaceholder: '请选择时间...'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'time.ts'), timeEn)
fs.writeFileSync(path.join(testZhDir, 'time.ts'), timeZh)

const enKeys = await getKeysFromLanguage('en-US')
const zhKeys = await getKeysFromLanguage('zh-Hans')

const missingKeys = enKeys.filter(key => !zhKeys.includes(key))
const extraKeys = zhKeys.filter(key => !enKeys.includes(key))

expect(missingKeys).toHaveLength(0) // No missing keys
expect(extraKeys).toContain('time.pickDate') // Extra root-level pickDate
expect(extraKeys).toHaveLength(1)

// Should have both keys available
expect(zhKeys).toContain('time.operation.pickDate') // Correct nested key
expect(zhKeys).toContain('time.pickDate') // Extra duplicate key
})
})

describe('Statistics calculation', () => {
it('should calculate correct difference statistics', async () => {
const enContent = `const translation = {
key1: 'value1',
key2: 'value2',
key3: 'value3'
}

export default translation
`

const zhContentMissing = `const translation = {
key1: 'value1',
key2: 'value2'
// missing key3
}

export default translation
`

const zhContentExtra = `const translation = {
key1: 'value1',
key2: 'value2',
key3: 'value3',
key4: 'extra',
key5: 'extra2'
}

export default translation
`

fs.writeFileSync(path.join(testEnDir, 'stats.ts'), enContent)

// Test missing keys scenario
fs.writeFileSync(path.join(testZhDir, 'stats.ts'), zhContentMissing)

const enKeys = await getKeysFromLanguage('en-US')
const zhKeysMissing = await getKeysFromLanguage('zh-Hans')

expect(enKeys.length - zhKeysMissing.length).toBe(1) // +1 means 1 missing key

// Test extra keys scenario
fs.writeFileSync(path.join(testZhDir, 'stats.ts'), zhContentExtra)

const zhKeysExtra = await getKeysFromLanguage('zh-Hans')

expect(enKeys.length - zhKeysExtra.length).toBe(-2) // -2 means 2 extra keys
})
})
})

+ 207
- 0
web/__tests__/plugin-tool-workflow-error.test.tsx Vedi File

/**
* Test cases to reproduce the plugin tool workflow error
* Issue: #23154 - Application error when loading plugin tools in workflow
* Root cause: split() operation called on null/undefined values
*/

describe('Plugin Tool Workflow Error Reproduction', () => {
/**
* Mock function to simulate the problematic code in switch-plugin-version.tsx:29
* const [pluginId] = uniqueIdentifier.split(':')
*/
const mockSwitchPluginVersionLogic = (uniqueIdentifier: string | null | undefined) => {
// This directly reproduces the problematic line from switch-plugin-version.tsx:29
const [pluginId] = uniqueIdentifier!.split(':')
return pluginId
}

/**
* Test case 1: Simulate null uniqueIdentifier
* This should reproduce the error mentioned in the issue
*/
it('should reproduce error when uniqueIdentifier is null', () => {
expect(() => {
mockSwitchPluginVersionLogic(null)
}).toThrow('Cannot read properties of null (reading \'split\')')
})

/**
* Test case 2: Simulate undefined uniqueIdentifier
*/
it('should reproduce error when uniqueIdentifier is undefined', () => {
expect(() => {
mockSwitchPluginVersionLogic(undefined)
}).toThrow('Cannot read properties of undefined (reading \'split\')')
})

/**
* Test case 3: Simulate empty string uniqueIdentifier
*/
it('should handle empty string uniqueIdentifier', () => {
expect(() => {
const result = mockSwitchPluginVersionLogic('')
expect(result).toBe('') // Empty string split by ':' returns ['']
}).not.toThrow()
})

/**
* Test case 4: Simulate malformed uniqueIdentifier without colon separator
*/
it('should handle malformed uniqueIdentifier without colon separator', () => {
expect(() => {
const result = mockSwitchPluginVersionLogic('malformed-identifier-without-colon')
expect(result).toBe('malformed-identifier-without-colon') // No colon means full string returned
}).not.toThrow()
})

/**
* Test case 5: Simulate valid uniqueIdentifier
*/
it('should work correctly with valid uniqueIdentifier', () => {
expect(() => {
const result = mockSwitchPluginVersionLogic('valid-plugin-id:1.0.0')
expect(result).toBe('valid-plugin-id')
}).not.toThrow()
})
})

/**
* Test for the variable processing split error in use-single-run-form-params
*/
describe('Variable Processing Split Error', () => {
/**
* Mock function to simulate the problematic code in use-single-run-form-params.ts:91
* const getDependentVars = () => {
* return varInputs.map(item => item.variable.slice(1, -1).split('.'))
* }
*/
const mockGetDependentVars = (varInputs: Array<{ variable: string | null | undefined }>) => {
return varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0) // Filter out empty arrays
}

/**
* Test case 1: Variable processing with null variable
*/
it('should handle null variable safely', () => {
const varInputs = [{ variable: null }]

expect(() => {
mockGetDependentVars(varInputs)
}).not.toThrow()

const result = mockGetDependentVars(varInputs)
expect(result).toEqual([]) // null variables are filtered out
})

/**
* Test case 2: Variable processing with undefined variable
*/
it('should handle undefined variable safely', () => {
const varInputs = [{ variable: undefined }]

expect(() => {
mockGetDependentVars(varInputs)
}).not.toThrow()

const result = mockGetDependentVars(varInputs)
expect(result).toEqual([]) // undefined variables are filtered out
})

/**
* Test case 3: Variable processing with empty string
*/
it('should handle empty string variable', () => {
const varInputs = [{ variable: '' }]

expect(() => {
mockGetDependentVars(varInputs)
}).not.toThrow()

const result = mockGetDependentVars(varInputs)
expect(result).toEqual([]) // Empty string is filtered out, so result is empty array
})

/**
* Test case 4: Variable processing with valid variable format
*/
it('should work correctly with valid variable format', () => {
const varInputs = [{ variable: '{{workflow.node.output}}' }]

expect(() => {
mockGetDependentVars(varInputs)
}).not.toThrow()

const result = mockGetDependentVars(varInputs)
expect(result[0]).toEqual(['{workflow', 'node', 'output}'])
})
})

/**
* Integration test to simulate the complete workflow scenario
*/
describe('Plugin Tool Workflow Integration', () => {
/**
* Simulate the scenario where plugin metadata is incomplete or corrupted
* This can happen when:
* 1. Plugin is being loaded from marketplace but metadata request fails
* 2. Plugin configuration is corrupted in database
* 3. Network issues during plugin loading
*/
it('should reproduce the client-side exception scenario', () => {
// Mock incomplete plugin data that could cause the error
const incompletePluginData = {
// Missing or null uniqueIdentifier
uniqueIdentifier: null,
meta: null,
minimum_dify_version: undefined,
}

// This simulates the error path that leads to the white screen
expect(() => {
// Simulate the code path in switch-plugin-version.tsx:29
// The actual problematic code doesn't use optional chaining
const _pluginId = (incompletePluginData.uniqueIdentifier as any).split(':')[0]
}).toThrow('Cannot read properties of null (reading \'split\')')
})

/**
* Test the scenario mentioned in the issue where plugin tools are loaded in workflow
*/
it('should simulate plugin tool loading in workflow context', () => {
// Mock the workflow context where plugin tools are being loaded
const workflowPluginTools = [
{
provider_name: 'test-plugin',
uniqueIdentifier: null, // This is the problematic case
tool_name: 'test-tool',
},
{
provider_name: 'valid-plugin',
uniqueIdentifier: 'valid-plugin:1.0.0',
tool_name: 'valid-tool',
},
]

// Process each plugin tool
workflowPluginTools.forEach((tool, _index) => {
if (tool.uniqueIdentifier === null) {
// This reproduces the exact error scenario
expect(() => {
const _pluginId = (tool.uniqueIdentifier as any).split(':')[0]
}).toThrow()
}
else {
// Valid tools should work fine
expect(() => {
const _pluginId = tool.uniqueIdentifier.split(':')[0]
}).not.toThrow()
}
})
})
})

+ 301
- 0
web/__tests__/workflow-parallel-limit.test.tsx Vedi File

/**
* MAX_PARALLEL_LIMIT Configuration Bug Test
*
* This test reproduces and verifies the fix for issue #23083:
* MAX_PARALLEL_LIMIT environment variable does not take effect in iteration panel
*/

import { render, screen } from '@testing-library/react'
import React from 'react'

// Mock environment variables before importing constants
const originalEnv = process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT

// Test with different environment values
function setupEnvironment(value?: string) {
if (value)
process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = value
else
delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT

// Clear module cache to force re-evaluation
jest.resetModules()
}

function restoreEnvironment() {
if (originalEnv)
process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = originalEnv
else
delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT

jest.resetModules()
}

// Mock i18next with proper implementation
jest.mock('react-i18next', () => ({
useTranslation: () => ({
t: (key: string) => {
if (key.includes('MaxParallelismTitle')) return 'Max Parallelism'
if (key.includes('MaxParallelismDesc')) return 'Maximum number of parallel executions'
if (key.includes('parallelMode')) return 'Parallel Mode'
if (key.includes('parallelPanelDesc')) return 'Enable parallel execution'
if (key.includes('errorResponseMethod')) return 'Error Response Method'
return key
},
}),
initReactI18next: {
type: '3rdParty',
init: jest.fn(),
},
}))

// Mock i18next module completely to prevent initialization issues
jest.mock('i18next', () => ({
use: jest.fn().mockReturnThis(),
init: jest.fn().mockReturnThis(),
t: jest.fn(key => key),
isInitialized: true,
}))

// Mock the useConfig hook
jest.mock('@/app/components/workflow/nodes/iteration/use-config', () => ({
__esModule: true,
default: () => ({
inputs: {
is_parallel: true,
parallel_nums: 5,
error_handle_mode: 'terminated',
},
changeParallel: jest.fn(),
changeParallelNums: jest.fn(),
changeErrorHandleMode: jest.fn(),
}),
}))

// Mock other components
jest.mock('@/app/components/workflow/nodes/_base/components/variable/var-reference-picker', () => {
return function MockVarReferencePicker() {
return <div data-testid="var-reference-picker">VarReferencePicker</div>
}
})

jest.mock('@/app/components/workflow/nodes/_base/components/split', () => {
return function MockSplit() {
return <div data-testid="split">Split</div>
}
})

jest.mock('@/app/components/workflow/nodes/_base/components/field', () => {
return function MockField({ title, children }: { title: string, children: React.ReactNode }) {
return (
<div data-testid="field">
<label>{title}</label>
{children}
</div>
)
}
})

jest.mock('@/app/components/base/switch', () => {
return function MockSwitch({ defaultValue }: { defaultValue: boolean }) {
return <input type="checkbox" defaultChecked={defaultValue} data-testid="switch" />
}
})

jest.mock('@/app/components/base/select', () => {
return function MockSelect() {
return <select data-testid="select">Select</select>
}
})

// Use defaultValue to avoid controlled input warnings
jest.mock('@/app/components/base/slider', () => {
return function MockSlider({ value, max, min }: { value: number, max: number, min: number }) {
return (
<input
type="range"
defaultValue={value}
max={max}
min={min}
data-testid="slider"
data-max={max}
data-min={min}
readOnly
/>
)
}
})

// Use defaultValue to avoid controlled input warnings
jest.mock('@/app/components/base/input', () => {
return function MockInput({ type, max, min, value }: { type: string, max: number, min: number, value: number }) {
return (
<input
type={type}
defaultValue={value}
max={max}
min={min}
data-testid="number-input"
data-max={max}
data-min={min}
readOnly
/>
)
}
})

describe('MAX_PARALLEL_LIMIT Configuration Bug', () => {
const mockNodeData = {
id: 'test-iteration-node',
type: 'iteration' as const,
data: {
title: 'Test Iteration',
desc: 'Test iteration node',
iterator_selector: ['test'],
output_selector: ['output'],
is_parallel: true,
parallel_nums: 5,
error_handle_mode: 'terminated' as const,
},
}

beforeEach(() => {
jest.clearAllMocks()
})

afterEach(() => {
restoreEnvironment()
})

afterAll(() => {
restoreEnvironment()
})

describe('Environment Variable Parsing', () => {
it('should parse MAX_PARALLEL_LIMIT from NEXT_PUBLIC_MAX_PARALLEL_LIMIT environment variable', () => {
setupEnvironment('25')
const { MAX_PARALLEL_LIMIT } = require('@/config')
expect(MAX_PARALLEL_LIMIT).toBe(25)
})

it('should fallback to default when environment variable is not set', () => {
setupEnvironment() // No environment variable
const { MAX_PARALLEL_LIMIT } = require('@/config')
expect(MAX_PARALLEL_LIMIT).toBe(10)
})

it('should handle invalid environment variable values', () => {
setupEnvironment('invalid')
const { MAX_PARALLEL_LIMIT } = require('@/config')

// Should fall back to default when parsing fails
expect(MAX_PARALLEL_LIMIT).toBe(10)
})

it('should handle empty environment variable', () => {
setupEnvironment('')
const { MAX_PARALLEL_LIMIT } = require('@/config')

// Should fall back to default when empty
expect(MAX_PARALLEL_LIMIT).toBe(10)
})

// Edge cases for boundary values
it('should clamp MAX_PARALLEL_LIMIT to MIN when env is 0 or negative', () => {
setupEnvironment('0')
let { MAX_PARALLEL_LIMIT } = require('@/config')
expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default

setupEnvironment('-5')
;({ MAX_PARALLEL_LIMIT } = require('@/config'))
expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default
})

it('should handle float numbers by parseInt behavior', () => {
setupEnvironment('12.7')
const { MAX_PARALLEL_LIMIT } = require('@/config')
// parseInt truncates to integer
expect(MAX_PARALLEL_LIMIT).toBe(12)
})
})

describe('UI Component Integration (Main Fix Verification)', () => {
it('should render iteration panel with environment-configured max value', () => {
// Set environment variable to a different value
setupEnvironment('30')

// Import Panel after setting environment
const Panel = require('@/app/components/workflow/nodes/iteration/panel').default
const { MAX_PARALLEL_LIMIT } = require('@/config')

render(
<Panel
id="test-node"
data={mockNodeData.data}
/>,
)

// Behavior-focused assertion: UI max should equal MAX_PARALLEL_LIMIT
const numberInput = screen.getByTestId('number-input')
expect(numberInput).toHaveAttribute('data-max', String(MAX_PARALLEL_LIMIT))

const slider = screen.getByTestId('slider')
expect(slider).toHaveAttribute('data-max', String(MAX_PARALLEL_LIMIT))

// Verify the actual values
expect(MAX_PARALLEL_LIMIT).toBe(30)
expect(numberInput.getAttribute('data-max')).toBe('30')
expect(slider.getAttribute('data-max')).toBe('30')
})

it('should maintain UI consistency with different environment values', () => {
setupEnvironment('15')
const Panel = require('@/app/components/workflow/nodes/iteration/panel').default
const { MAX_PARALLEL_LIMIT } = require('@/config')

render(
<Panel
id="test-node"
data={mockNodeData.data}
/>,
)

// Both input and slider should use the same max value from MAX_PARALLEL_LIMIT
const numberInput = screen.getByTestId('number-input')
const slider = screen.getByTestId('slider')

expect(numberInput.getAttribute('data-max')).toBe(slider.getAttribute('data-max'))
expect(numberInput.getAttribute('data-max')).toBe(String(MAX_PARALLEL_LIMIT))
})
})

describe('Legacy Constant Verification (For Transition Period)', () => {
// Marked as transition/deprecation tests
it('should maintain MAX_ITERATION_PARALLEL_NUM for backward compatibility', () => {
const { MAX_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants')
expect(typeof MAX_ITERATION_PARALLEL_NUM).toBe('number')
expect(MAX_ITERATION_PARALLEL_NUM).toBe(10) // Hardcoded legacy value
})

it('should demonstrate MAX_PARALLEL_LIMIT vs legacy constant difference', () => {
setupEnvironment('50')
const { MAX_PARALLEL_LIMIT } = require('@/config')
const { MAX_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants')

// MAX_PARALLEL_LIMIT is configurable, MAX_ITERATION_PARALLEL_NUM is not
expect(MAX_PARALLEL_LIMIT).toBe(50)
expect(MAX_ITERATION_PARALLEL_NUM).toBe(10)
expect(MAX_PARALLEL_LIMIT).not.toBe(MAX_ITERATION_PARALLEL_NUM)
})
})

describe('Constants Validation', () => {
it('should validate that required constants exist and have correct types', () => {
const { MAX_PARALLEL_LIMIT } = require('@/config')
const { MIN_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants')
expect(typeof MAX_PARALLEL_LIMIT).toBe('number')
expect(typeof MIN_ITERATION_PARALLEL_NUM).toBe('number')
expect(MAX_PARALLEL_LIMIT).toBeGreaterThanOrEqual(MIN_ITERATION_PARALLEL_NUM)
})
})
})

+ 79
- 0
web/app/components/app/annotation/batch-action.tsx Vedi File

import React, { type FC } from 'react'
import { RiDeleteBinLine } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import Divider from '@/app/components/base/divider'
import classNames from '@/utils/classnames'
import Confirm from '@/app/components/base/confirm'

const i18nPrefix = 'appAnnotation.batchAction'

type IBatchActionProps = {
className?: string
selectedIds: string[]
onBatchDelete: () => Promise<void>
onCancel: () => void
}

const BatchAction: FC<IBatchActionProps> = ({
className,
selectedIds,
onBatchDelete,
onCancel,
}) => {
const { t } = useTranslation()
const [isShowDeleteConfirm, {
setTrue: showDeleteConfirm,
setFalse: hideDeleteConfirm,
}] = useBoolean(false)
const [isDeleting, {
setTrue: setIsDeleting,
setFalse: setIsNotDeleting,
}] = useBoolean(false)

const handleBatchDelete = async () => {
setIsDeleting()
await onBatchDelete()
hideDeleteConfirm()
setIsNotDeleting()
}
return (
<div className={classNames('pointer-events-none flex w-full justify-center', className)}>
<div className='pointer-events-auto flex items-center gap-x-1 rounded-[10px] border border-components-actionbar-border-accent bg-components-actionbar-bg-accent p-1 shadow-xl shadow-shadow-shadow-5 backdrop-blur-[5px]'>
<div className='inline-flex items-center gap-x-2 py-1 pl-2 pr-3'>
<span className='flex h-5 w-5 items-center justify-center rounded-md bg-text-accent px-1 py-0.5 text-xs font-medium text-text-primary-on-surface'>
{selectedIds.length}
</span>
<span className='text-[13px] font-semibold leading-[16px] text-text-accent'>{t(`${i18nPrefix}.selected`)}</span>
</div>
<Divider type='vertical' className='mx-0.5 h-3.5 bg-divider-regular' />
<div className='flex cursor-pointer items-center gap-x-0.5 px-3 py-2' onClick={showDeleteConfirm}>
<RiDeleteBinLine className='h-4 w-4 text-components-button-destructive-ghost-text' />
<button type='button' className='px-0.5 text-[13px] font-medium leading-[16px] text-components-button-destructive-ghost-text' >
{t('common.operation.delete')}
</button>
</div>

<Divider type='vertical' className='mx-0.5 h-3.5 bg-divider-regular' />
<button type='button' className='px-3.5 py-2 text-[13px] font-medium leading-[16px] text-components-button-ghost-text' onClick={onCancel}>
{t('common.operation.cancel')}
</button>
</div>
{
isShowDeleteConfirm && (
<Confirm
isShow
title={t('appAnnotation.list.delete.title')}
confirmText={t('common.operation.delete')}
onConfirm={handleBatchDelete}
onCancel={hideDeleteConfirm}
isLoading={isDeleting}
isDisabled={isDeleting}
/>
)
}
</div>
)
}

export default React.memo(BatchAction)

+ 27
- 2
web/app/components/app/annotation/index.tsx Vedi File

import AnnotationFullModal from '@/app/components/billing/annotation-full/modal' import AnnotationFullModal from '@/app/components/billing/annotation-full/modal'
import type { App } from '@/types/app' import type { App } from '@/types/app'
import cn from '@/utils/classnames' import cn from '@/utils/classnames'
import { delAnnotations } from '@/service/annotation'


type Props = { type Props = {
appDetail: App appDetail: App
const [controlUpdateList, setControlUpdateList] = useState(Date.now()) const [controlUpdateList, setControlUpdateList] = useState(Date.now())
const [currItem, setCurrItem] = useState<AnnotationItem | null>(null) const [currItem, setCurrItem] = useState<AnnotationItem | null>(null)
const [isShowViewModal, setIsShowViewModal] = useState(false) const [isShowViewModal, setIsShowViewModal] = useState(false)
const [selectedIds, setSelectedIds] = useState<string[]>([])
const debouncedQueryParams = useDebounce(queryParams, { wait: 500 }) const debouncedQueryParams = useDebounce(queryParams, { wait: 500 })
const [isBatchDeleting, setIsBatchDeleting] = useState(false)


const fetchAnnotationConfig = async () => { const fetchAnnotationConfig = async () => {
const res = await doFetchAnnotationConfig(appDetail.id) const res = await doFetchAnnotationConfig(appDetail.id)


useEffect(() => { useEffect(() => {
if (isChatApp) fetchAnnotationConfig() if (isChatApp) fetchAnnotationConfig()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []) }, [])


const ensureJobCompleted = async (jobId: string, status: AnnotationEnableStatus) => { const ensureJobCompleted = async (jobId: string, status: AnnotationEnableStatus) => {


useEffect(() => { useEffect(() => {
fetchList(currPage + 1) fetchList(currPage + 1)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [currPage, limit, debouncedQueryParams]) }, [currPage, limit, debouncedQueryParams])


const handleAdd = async (payload: AnnotationItemBasic) => { const handleAdd = async (payload: AnnotationItemBasic) => {
setControlUpdateList(Date.now()) setControlUpdateList(Date.now())
} }


const handleBatchDelete = async () => {
if (isBatchDeleting)
return
setIsBatchDeleting(true)
try {
await delAnnotations(appDetail.id, selectedIds)
Toast.notify({ message: t('common.api.actionSuccess'), type: 'success' })
fetchList()
setControlUpdateList(Date.now())
setSelectedIds([])
}
catch (e: any) {
Toast.notify({ type: 'error', message: e.message || t('common.api.actionFailed') })
}
finally {
setIsBatchDeleting(false)
}
}

const handleView = (item: AnnotationItem) => { const handleView = (item: AnnotationItem) => {
setCurrItem(item) setCurrItem(item)
setIsShowViewModal(true) setIsShowViewModal(true)
list={list} list={list}
onRemove={handleRemove} onRemove={handleRemove}
onView={handleView} onView={handleView}
selectedIds={selectedIds}
onSelectedIdsChange={setSelectedIds}
onBatchDelete={handleBatchDelete}
onCancel={() => setSelectedIds([])}
isBatchDeleting={isBatchDeleting}
/> />
: <div className='flex h-full grow items-center justify-center'><EmptyElement /></div> : <div className='flex h-full grow items-center justify-center'><EmptyElement /></div>
} }

+ 64
- 4
web/app/components/app/annotation/list.tsx Vedi File

'use client' 'use client'
import type { FC } from 'react' import type { FC } from 'react'
import React from 'react'
import React, { useCallback, useMemo } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { RiDeleteBinLine, RiEditLine } from '@remixicon/react' import { RiDeleteBinLine, RiEditLine } from '@remixicon/react'
import type { AnnotationItem } from './type' import type { AnnotationItem } from './type'
import ActionButton from '@/app/components/base/action-button' import ActionButton from '@/app/components/base/action-button'
import useTimestamp from '@/hooks/use-timestamp' import useTimestamp from '@/hooks/use-timestamp'
import cn from '@/utils/classnames' import cn from '@/utils/classnames'
import Checkbox from '@/app/components/base/checkbox'
import BatchAction from './batch-action'


type Props = { type Props = {
list: AnnotationItem[] list: AnnotationItem[]
onRemove: (id: string) => void
onView: (item: AnnotationItem) => void onView: (item: AnnotationItem) => void
onRemove: (id: string) => void
selectedIds: string[]
onSelectedIdsChange: (selectedIds: string[]) => void
onBatchDelete: () => Promise<void>
onCancel: () => void
isBatchDeleting?: boolean
} }


const List: FC<Props> = ({ const List: FC<Props> = ({
list, list,
onView, onView,
onRemove, onRemove,
selectedIds,
onSelectedIdsChange,
onBatchDelete,
onCancel,
isBatchDeleting,
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const { formatTime } = useTimestamp() const { formatTime } = useTimestamp()
const [currId, setCurrId] = React.useState<string | null>(null) const [currId, setCurrId] = React.useState<string | null>(null)
const [showConfirmDelete, setShowConfirmDelete] = React.useState(false) const [showConfirmDelete, setShowConfirmDelete] = React.useState(false)

const isAllSelected = useMemo(() => {
return list.length > 0 && list.every(item => selectedIds.includes(item.id))
}, [list, selectedIds])

const isSomeSelected = useMemo(() => {
return list.some(item => selectedIds.includes(item.id))
}, [list, selectedIds])

const handleSelectAll = useCallback(() => {
const currentPageIds = list.map(item => item.id)
const otherPageIds = selectedIds.filter(id => !currentPageIds.includes(id))

if (isAllSelected)
onSelectedIdsChange(otherPageIds)
else
onSelectedIdsChange([...otherPageIds, ...currentPageIds])
}, [isAllSelected, list, selectedIds, onSelectedIdsChange])

return ( return (
<div className='overflow-x-auto'>
<div className='relative grow overflow-x-auto'>
<table className={cn('mt-2 w-full min-w-[440px] border-collapse border-0')}> <table className={cn('mt-2 w-full min-w-[440px] border-collapse border-0')}>
<thead className='system-xs-medium-uppercase text-text-tertiary'> <thead className='system-xs-medium-uppercase text-text-tertiary'>
<tr> <tr>
<td className='w-5 whitespace-nowrap rounded-l-lg bg-background-section-burn pl-2 pr-1'>{t('appAnnotation.table.header.question')}</td>
<td className='w-12 whitespace-nowrap rounded-l-lg bg-background-section-burn px-2'>
<Checkbox
className='mr-2'
checked={isAllSelected}
indeterminate={!isAllSelected && isSomeSelected}
onCheck={handleSelectAll}
/>
</td>
<td className='w-5 whitespace-nowrap bg-background-section-burn pl-2 pr-1'>{t('appAnnotation.table.header.question')}</td>
<td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.answer')}</td> <td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.answer')}</td>
<td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.createdAt')}</td> <td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.createdAt')}</td>
<td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.hits')}</td> <td className='whitespace-nowrap bg-background-section-burn py-1.5 pl-3'>{t('appAnnotation.table.header.hits')}</td>
} }
} }
> >
<td className='w-12 px-2' onClick={e => e.stopPropagation()}>
<Checkbox
className='mr-2'
checked={selectedIds.includes(item.id)}
onCheck={() => {
if (selectedIds.includes(item.id))
onSelectedIdsChange(selectedIds.filter(id => id !== item.id))
else
onSelectedIdsChange([...selectedIds, item.id])
}}
/>
</td>
<td <td
className='max-w-[250px] overflow-hidden text-ellipsis whitespace-nowrap p-3 pr-2' className='max-w-[250px] overflow-hidden text-ellipsis whitespace-nowrap p-3 pr-2'
title={item.question} title={item.question}
setShowConfirmDelete(false) setShowConfirmDelete(false)
}} }}
/> />
{selectedIds.length > 0 && (
<BatchAction
className='absolute bottom-6 left-1/2 z-20 -translate-x-1/2'
selectedIds={selectedIds}
onBatchDelete={onBatchDelete}
onCancel={onCancel}
isBatchDeleting={isBatchDeleting}
/>
)}
</div> </div>
) )
} }

+ 2
- 0
web/app/components/plugins/plugin-detail-panel/detail-header.tsx Vedi File

const isAutoUpgradeEnabled = useMemo(() => { const isAutoUpgradeEnabled = useMemo(() => {
if (!autoUpgradeInfo || !isFromMarketplace) if (!autoUpgradeInfo || !isFromMarketplace)
return false return false
if(autoUpgradeInfo.strategy_setting === 'disabled')
return false
if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.update_all) if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.update_all)
return true return true
if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.partial && autoUpgradeInfo.include_plugins.includes(plugin_id)) if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.partial && autoUpgradeInfo.include_plugins.includes(plugin_id))

+ 0
- 8
web/app/components/workflow/constants.ts Vedi File

export const NODE_LAYOUT_VERTICAL_PADDING = 60 export const NODE_LAYOUT_VERTICAL_PADDING = 60
export const NODE_LAYOUT_MIN_DISTANCE = 100 export const NODE_LAYOUT_MIN_DISTANCE = 100


let maxParallelLimit = 10

if (process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT && process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT !== '')
maxParallelLimit = Number.parseInt(process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT)
else if (globalThis.document?.body?.getAttribute('data-public-max-parallel-limit') && globalThis.document.body.getAttribute('data-public-max-parallel-limit') !== '')
maxParallelLimit = Number.parseInt(globalThis.document.body.getAttribute('data-public-max-parallel-limit') as string)

export const PARALLEL_LIMIT = maxParallelLimit
export const PARALLEL_DEPTH_LIMIT = 3 export const PARALLEL_DEPTH_LIMIT = 3


export const RETRIEVAL_OUTPUT_STRUCT = `{ export const RETRIEVAL_OUTPUT_STRUCT = `{

+ 4
- 5
web/app/components/workflow/hooks/use-workflow.ts Vedi File

import { getParallelInfo } from '../utils' import { getParallelInfo } from '../utils'
import { import {
PARALLEL_DEPTH_LIMIT, PARALLEL_DEPTH_LIMIT,
PARALLEL_LIMIT,
SUPPORT_OUTPUT_VARS_NODE, SUPPORT_OUTPUT_VARS_NODE,
} from '../constants' } from '../constants'
import type { IterationNodeType } from '../nodes/iteration/types' import type { IterationNodeType } from '../nodes/iteration/types'
import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants' import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants'
import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants'
import { basePath } from '@/utils/var' import { basePath } from '@/utils/var'
import { canFindTool } from '@/utils'
import { MAX_PARALLEL_LIMIT } from '@/config'
import { useNodesMetaData } from '.' import { useNodesMetaData } from '.'


export const useIsChatMode = () => { export const useIsChatMode = () => {
}) })
setNodes(newNodes) setNodes(newNodes)
} }

// eslint-disable-next-line react-hooks/exhaustive-deps
}, [store]) }, [store])


const isVarUsedInNodes = useCallback((varSelector: ValueSelector) => { const isVarUsedInNodes = useCallback((varSelector: ValueSelector) => {
edges, edges,
} = store.getState() } = store.getState()
const connectedEdges = edges.filter(edge => edge.source === nodeId && edge.sourceHandle === nodeHandle) const connectedEdges = edges.filter(edge => edge.source === nodeId && edge.sourceHandle === nodeHandle)
if (connectedEdges.length > PARALLEL_LIMIT - 1) {
if (connectedEdges.length > MAX_PARALLEL_LIMIT - 1) {
const { setShowTips } = workflowStore.getState() const { setShowTips } = workflowStore.getState()
setShowTips(t('workflow.common.parallelTip.limit', { num: PARALLEL_LIMIT }))
setShowTips(t('workflow.common.parallelTip.limit', { num: MAX_PARALLEL_LIMIT }))
return false return false
} }



+ 7
- 1
web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx Vedi File



export const SwitchPluginVersion: FC<SwitchPluginVersionProps> = (props) => { export const SwitchPluginVersion: FC<SwitchPluginVersionProps> = (props) => {
const { uniqueIdentifier, tooltip, onChange, className } = props const { uniqueIdentifier, tooltip, onChange, className } = props
const [pluginId] = uniqueIdentifier.split(':')

const [pluginId] = uniqueIdentifier?.split(':') || ['']
const [isShow, setIsShow] = useState(false) const [isShow, setIsShow] = useState(false)
const [isShowUpdateModal, { setTrue: showUpdateModal, setFalse: hideUpdateModal }] = useBoolean(false) const [isShowUpdateModal, { setTrue: showUpdateModal, setFalse: hideUpdateModal }] = useBoolean(false)
const [target, setTarget] = useState<{ const [target, setTarget] = useState<{
}) })
} }
const { t } = useTranslation() const { t } = useTranslation()

// Guard against null/undefined uniqueIdentifier to prevent app crash
if (!uniqueIdentifier || !pluginId)
return null

return <Tooltip popupContent={!isShow && !isShowUpdateModal && tooltip} triggerMethod='hover'> return <Tooltip popupContent={!isShow && !isShowUpdateModal && tooltip} triggerMethod='hover'>
<div className={cn('flex w-fit items-center justify-center', className)} onClick={e => e.stopPropagation()}> <div className={cn('flex w-fit items-center justify-center', className)} onClick={e => e.stopPropagation()}>
{isShowUpdateModal && pluginDetail && <PluginMutationModel {isShowUpdateModal && pluginDetail && <PluginMutationModel

+ 7
- 1
web/app/components/workflow/nodes/agent/use-single-run-form-params.ts Vedi File

}, [runResult, t]) }, [runResult, t])


const getDependentVars = () => { const getDependentVars = () => {
return varInputs.map(item => item.variable.slice(1, -1).split('.'))
return varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
} }


return { return {

+ 13
- 3
web/app/components/workflow/nodes/http/components/timeout/index.tsx Vedi File

description: string description: string
placeholder: string placeholder: string
value?: number value?: number
onChange: (value: number) => void
onChange: (value: number | undefined) => void
readOnly?: boolean readOnly?: boolean
min: number min: number
max: number max: number
type='number' type='number'
value={value} value={value}
onChange={(e) => { onChange={(e) => {
const value = Math.max(min, Math.min(max, Number.parseInt(e.target.value, 10)))
onChange(value)
const inputValue = e.target.value
if (inputValue === '') {
// When user clears the input, set to undefined to let backend use default values
onChange(undefined)
}
else {
const parsedValue = Number.parseInt(inputValue, 10)
if (!Number.isNaN(parsedValue)) {
const value = Math.max(min, Math.min(max, parsedValue))
onChange(value)
}
}
}} }}
placeholder={placeholder} placeholder={placeholder}
readOnly={readOnly} readOnly={readOnly}

+ 7
- 1
web/app/components/workflow/nodes/http/use-single-run-form-params.ts Vedi File

}, [inputVarValues, setInputVarValues, varInputs]) }, [inputVarValues, setInputVarValues, varInputs])


const getDependentVars = () => { const getDependentVars = () => {
return varInputs.map(item => item.variable.slice(1, -1).split('.'))
return varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
} }


return { return {

+ 4
- 3
web/app/components/workflow/nodes/iteration/panel.tsx Vedi File

import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import VarReferencePicker from '../_base/components/variable/var-reference-picker' import VarReferencePicker from '../_base/components/variable/var-reference-picker'
import Split from '../_base/components/split' import Split from '../_base/components/split'
import { MAX_ITERATION_PARALLEL_NUM, MIN_ITERATION_PARALLEL_NUM } from '../../constants'
import { MIN_ITERATION_PARALLEL_NUM } from '../../constants'
import type { IterationNodeType } from './types' import type { IterationNodeType } from './types'
import useConfig from './use-config' import useConfig from './use-config'
import { ErrorHandleMode, type NodePanelProps } from '@/app/components/workflow/types' import { ErrorHandleMode, type NodePanelProps } from '@/app/components/workflow/types'
import Select from '@/app/components/base/select' import Select from '@/app/components/base/select'
import Slider from '@/app/components/base/slider' import Slider from '@/app/components/base/slider'
import Input from '@/app/components/base/input' import Input from '@/app/components/base/input'
import { MAX_PARALLEL_LIMIT } from '@/config'


const i18nPrefix = 'workflow.nodes.iteration' const i18nPrefix = 'workflow.nodes.iteration'


inputs.is_parallel && (<div className='px-4 pb-2'> inputs.is_parallel && (<div className='px-4 pb-2'>
<Field title={t(`${i18nPrefix}.MaxParallelismTitle`)} isSubTitle tooltip={<div className='w-[230px]'>{t(`${i18nPrefix}.MaxParallelismDesc`)}</div>}> <Field title={t(`${i18nPrefix}.MaxParallelismTitle`)} isSubTitle tooltip={<div className='w-[230px]'>{t(`${i18nPrefix}.MaxParallelismDesc`)}</div>}>
<div className='row flex'> <div className='row flex'>
<Input type='number' wrapperClassName='w-18 mr-4 ' max={MAX_ITERATION_PARALLEL_NUM} min={MIN_ITERATION_PARALLEL_NUM} value={inputs.parallel_nums} onChange={(e) => { changeParallelNums(Number(e.target.value)) }} />
<Input type='number' wrapperClassName='w-18 mr-4 ' max={MAX_PARALLEL_LIMIT} min={MIN_ITERATION_PARALLEL_NUM} value={inputs.parallel_nums} onChange={(e) => { changeParallelNums(Number(e.target.value)) }} />
<Slider <Slider
value={inputs.parallel_nums} value={inputs.parallel_nums}
onChange={changeParallelNums} onChange={changeParallelNums}
max={MAX_ITERATION_PARALLEL_NUM}
max={MAX_PARALLEL_LIMIT}
min={MIN_ITERATION_PARALLEL_NUM} min={MIN_ITERATION_PARALLEL_NUM}
className=' mt-4 flex-1 shrink-0' className=' mt-4 flex-1 shrink-0'
/> />

+ 2
- 0
web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts Vedi File

ComparisonOperator.endWith, ComparisonOperator.endWith,
ComparisonOperator.empty, ComparisonOperator.empty,
ComparisonOperator.notEmpty, ComparisonOperator.notEmpty,
ComparisonOperator.in,
ComparisonOperator.notIn,
] ]
case MetadataFilteringVariableType.number: case MetadataFilteringVariableType.number:
return [ return [

+ 60
- 42
web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx Vedi File

import { VarType } from '../../../types' import { VarType } from '../../../types'


const optionNameI18NPrefix = 'workflow.nodes.ifElse.optionName' const optionNameI18NPrefix = 'workflow.nodes.ifElse.optionName'
import { getConditionValueAsString } from '@/app/components/workflow/nodes/utils'


const VAR_INPUT_SUPPORTED_KEYS: Record<string, VarType> = { const VAR_INPUT_SUPPORTED_KEYS: Record<string, VarType> = {
name: VarType.string, name: VarType.string,
url: VarType.string, url: VarType.string,
extension: VarType.string, extension: VarType.string,
mime_type: VarType.string, mime_type: VarType.string,
related_id: VarType.number,
related_id: VarType.string,
size: VarType.number,
} }


type Props = { type Props = {
condition: Condition condition: Condition
onChange: (condition: Condition) => void onChange: (condition: Condition) => void
varType: VarType
hasSubVariable: boolean hasSubVariable: boolean
readOnly: boolean readOnly: boolean
nodeId: string nodeId: string


const FilterCondition: FC<Props> = ({ const FilterCondition: FC<Props> = ({
condition = { key: '', comparison_operator: ComparisonOperator.equal, value: '' }, condition = { key: '', comparison_operator: ComparisonOperator.equal, value: '' },
varType,
onChange, onChange,
hasSubVariable, hasSubVariable,
readOnly, readOnly,
const { t } = useTranslation() const { t } = useTranslation()
const [isFocus, setIsFocus] = useState(false) const [isFocus, setIsFocus] = useState(false)


const expectedVarType = VAR_INPUT_SUPPORTED_KEYS[condition.key]
const expectedVarType = condition.key ? VAR_INPUT_SUPPORTED_KEYS[condition.key] : varType
const supportVariableInput = !!expectedVarType const supportVariableInput = !!expectedVarType


const { availableVars, availableNodesWithParent } = useAvailableVarList(nodeId, { const { availableVars, availableNodesWithParent } = useAvailableVarList(nodeId, {
}) })
}, [onChange, expectedVarType]) }, [onChange, expectedVarType])


// Extract input rendering logic to avoid nested ternary
let inputElement: React.ReactNode = null
if (!comparisonOperatorNotRequireValue(condition.comparison_operator)) {
if (isSelect) {
inputElement = (
<Select
items={selectOptions}
defaultValue={isArrayValue ? (condition.value as string[])[0] : condition.value as string}
onSelect={item => handleChange('value')(item.value)}
className='!text-[13px]'
wrapperClassName='grow h-8'
placeholder='Select value'
/>
)
}
else if (supportVariableInput) {
inputElement = (
<Input
instanceId='filter-condition-input'
className={cn(
isFocus
? 'border-components-input-border-active bg-components-input-bg-active shadow-xs'
: 'border-components-input-border-hover bg-components-input-bg-normal',
'w-0 grow rounded-lg border px-3 py-[6px]',
)}
value={
getConditionValueAsString(condition)
}
onChange={handleChange('value')}
readOnly={readOnly}
nodesOutputVars={availableVars}
availableNodes={availableNodesWithParent}
onFocusChange={setIsFocus}
placeholder={!readOnly ? t('workflow.nodes.http.insertVarPlaceholder')! : ''}
placeholderClassName='!leading-[21px]'
/>
)
}
else {
inputElement = (
<input
type={((hasSubVariable && condition.key === 'size') || (!hasSubVariable && varType === VarType.number)) ? 'number' : 'text'}
className='grow rounded-lg border border-components-input-border-hover bg-components-input-bg-normal px-3 py-[6px]'
value={
getConditionValueAsString(condition)
}
onChange={e => handleChange('value')(e.target.value)}
readOnly={readOnly}
/>
)
}
}

return ( return (
<div> <div>
{hasSubVariable && ( {hasSubVariable && (
file={hasSubVariable ? { key: condition.key } : undefined} file={hasSubVariable ? { key: condition.key } : undefined}
disabled={readOnly} disabled={readOnly}
/> />
{!comparisonOperatorNotRequireValue(condition.comparison_operator) && (
<>
{isSelect ? (
<Select
items={selectOptions}
defaultValue={isArrayValue ? (condition.value as string[])[0] : condition.value as string}
onSelect={item => handleChange('value')(item.value)}
className='!text-[13px]'
wrapperClassName='grow h-8'
placeholder='Select value'
/>
) : supportVariableInput ? (
<Input
instanceId='filter-condition-input'
className={cn(
isFocus
? 'border-components-input-border-active bg-components-input-bg-active shadow-xs'
: 'border-components-input-border-hover bg-components-input-bg-normal',
'w-0 grow rounded-lg border px-3 py-[6px]',
)}
value={condition.value}
onChange={handleChange('value')}
readOnly={readOnly}
nodesOutputVars={availableVars}
availableNodes={availableNodesWithParent}
onFocusChange={setIsFocus}
placeholder={!readOnly ? t('workflow.nodes.http.extractListPlaceholder')! : ''}
placeholderClassName='!leading-[21px]'
/>
) : (
<input
type={(condition.key === 'size' || expectedVarType === VarType.number) ? 'number' : 'text'}
className='grow rounded-lg border border-components-input-border-hover bg-components-input-bg-normal px-3 py-[6px]'
value={condition.value}
onChange={e => handleChange('value')(e.target.value)}
readOnly={readOnly}
/>
)}
</>
)}
{inputElement}
</div> </div>
</div> </div>
) )

+ 7
- 1
web/app/components/workflow/nodes/llm/use-single-run-form-params.ts Vedi File

})() })()


const getDependentVars = () => { const getDependentVars = () => {
const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.'))
const promptVars = varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
const contextVar = payload.context.variable_selector const contextVar = payload.context.variable_selector
const vars = [...promptVars, contextVar] const vars = [...promptVars, contextVar]
if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) {

+ 7
- 1
web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts Vedi File

})() })()


const getDependentVars = () => { const getDependentVars = () => {
const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.'))
const promptVars = varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
const vars = [payload.query, ...promptVars] const vars = [payload.query, ...promptVars]
if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) {
const visionVar = payload.vision.configs.variable_selector const visionVar = payload.vision.configs.variable_selector

+ 7
- 1
web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts Vedi File

})() })()


const getDependentVars = () => { const getDependentVars = () => {
const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.'))
const promptVars = varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
const vars = [payload.query_variable_selector, ...promptVars] const vars = [payload.query_variable_selector, ...promptVars]
if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) {
const visionVar = payload.vision.configs.variable_selector const visionVar = payload.vision.configs.variable_selector

+ 7
- 1
web/app/components/workflow/nodes/tool/use-single-run-form-params.ts Vedi File

const toolIcon = useToolIcon(payload) const toolIcon = useToolIcon(payload)


const getDependentVars = () => { const getDependentVars = () => {
return varInputs.map(item => item.variable.slice(1, -1).split('.'))
return varInputs.map((item) => {
// Guard against null/undefined variable to prevent app crash
if (!item.variable || typeof item.variable !== 'string')
return []

return item.variable.slice(1, -1).split('.')
}).filter(arr => arr.length > 0)
} }


return { return {

+ 10
- 0
web/app/components/workflow/nodes/utils.ts Vedi File

formType, formType,
} }
} }

export const getConditionValueAsString = (condition: { value: any }) => {
if (Array.isArray(condition.value))
return condition.value[0] ?? ''

if (typeof condition.value === 'number')
return String(condition.value)

return condition.value ?? ''
}

+ 11
- 4
web/config/index.ts Vedi File

} }


const getNumberConfig = (envVar: string | undefined, dataAttrKey: DatasetAttr, defaultValue: number) => { const getNumberConfig = (envVar: string | undefined, dataAttrKey: DatasetAttr, defaultValue: number) => {
if (envVar)
return Number.parseInt(envVar)
if (envVar) {
const parsed = Number.parseInt(envVar)
if (!Number.isNaN(parsed) && parsed > 0)
return parsed
}


const attrValue = globalThis.document?.body?.getAttribute(dataAttrKey) const attrValue = globalThis.document?.body?.getAttribute(dataAttrKey)
if (attrValue)
return Number.parseInt(attrValue)
if (attrValue) {
const parsed = Number.parseInt(attrValue)
if (!Number.isNaN(parsed) && parsed > 0)
return parsed
}
return defaultValue return defaultValue
} }


export const JSON_SCHEMA_MAX_DEPTH = 10 export const JSON_SCHEMA_MAX_DEPTH = 10


export const MAX_TOOLS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_TOOLS_NUM, DatasetAttr.DATA_PUBLIC_MAX_TOOLS_NUM, 10) export const MAX_TOOLS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_TOOLS_NUM, DatasetAttr.DATA_PUBLIC_MAX_TOOLS_NUM, 10)
export const MAX_PARALLEL_LIMIT = getNumberConfig(process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT, DatasetAttr.DATA_PUBLIC_MAX_PARALLEL_LIMIT, 10)
export const TEXT_GENERATION_TIMEOUT_MS = getNumberConfig(process.env.NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, DatasetAttr.DATA_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, 60000) export const TEXT_GENERATION_TIMEOUT_MS = getNumberConfig(process.env.NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, DatasetAttr.DATA_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, 60000)
export const LOOP_NODE_MAX_COUNT = getNumberConfig(process.env.NEXT_PUBLIC_LOOP_NODE_MAX_COUNT, DatasetAttr.DATA_PUBLIC_LOOP_NODE_MAX_COUNT, 100) export const LOOP_NODE_MAX_COUNT = getNumberConfig(process.env.NEXT_PUBLIC_LOOP_NODE_MAX_COUNT, DatasetAttr.DATA_PUBLIC_LOOP_NODE_MAX_COUNT, 100)
export const MAX_ITERATIONS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_ITERATIONS_NUM, DatasetAttr.DATA_PUBLIC_MAX_ITERATIONS_NUM, 99) export const MAX_ITERATIONS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_ITERATIONS_NUM, DatasetAttr.DATA_PUBLIC_MAX_ITERATIONS_NUM, 99)

+ 2
- 2
web/i18n-config/README.md Vedi File



``` ```
├── [ 24] README.md ├── [ 24] README.md
├── [ 0] README_CN.md
├── [ 704] en-US ├── [ 704] en-US
│   ├── [2.4K] app-annotation.ts │   ├── [2.4K] app-annotation.ts
│   ├── [5.2K] app-api.ts │   ├── [5.2K] app-api.ts


We use English as the default language. The i18n files are organized by language and then by module. For example, the English translation for the `app` module is in `en-US/app.ts`. We use English as the default language. The i18n files are organized by language and then by module. For example, the English translation for the `app` module is in `en-US/app.ts`.


If you want to add a new language or modify an existing translation, you can create a new file for the language or modify the existing file. The file name should be the language code (e.g., `zh-CN` for Chinese) and the file extension should be `.ts`.
If you want to add a new language or modify an existing translation, you can create a new file for the language or modify the existing file. The file name should be the language code (e.g., `zh-Hans` for Chinese) and the file extension should be `.ts`.


For example, if you want to add french translation, you can create a new folder `fr-FR` and add the translation files in it. For example, if you want to add french translation, you can create a new folder `fr-FR` and add the translation files in it.


1. Create a new folder for the new language. 1. Create a new folder for the new language.


``` ```
cd web/i18n
cp -r en-US fr-FR cp -r en-US fr-FR
``` ```



+ 214
- 9
web/i18n-config/check-i18n.js Vedi File

const iterateKeys = (obj, prefix = '') => { const iterateKeys = (obj, prefix = '') => {
for (const key in obj) { for (const key in obj) {
const nestedKey = prefix ? `${prefix}.${key}` : key const nestedKey = prefix ? `${prefix}.${key}` : key
nestedKeys.push(nestedKey)
if (typeof obj[key] === 'object' && obj[key] !== null)
if (typeof obj[key] === 'object' && obj[key] !== null && !Array.isArray(obj[key])) {
// This is an object (but not array), recurse into it but don't add it as a key
iterateKeys(obj[key], nestedKey) iterateKeys(obj[key], nestedKey)
}
else {
// This is a leaf node (string, number, boolean, array, etc.), add it as a key
nestedKeys.push(nestedKey)
}
} }
} }
iterateKeys(translationObj) iterateKeys(translationObj)
}) })
} }


function removeKeysFromObject(obj, keysToRemove, prefix = '') {
let modified = false
for (const key in obj) {
const fullKey = prefix ? `${prefix}.${key}` : key

if (keysToRemove.includes(fullKey)) {
delete obj[key]
modified = true
console.log(`🗑️ Removed key: ${fullKey}`)
}
else if (typeof obj[key] === 'object' && obj[key] !== null) {
const subModified = removeKeysFromObject(obj[key], keysToRemove, fullKey)
modified = modified || subModified
}
}
return modified
}

async function removeExtraKeysFromFile(language, fileName, extraKeys) {
const filePath = path.resolve(__dirname, '../i18n', language, `${fileName}.ts`)

if (!fs.existsSync(filePath)) {
console.log(`⚠️ File not found: ${filePath}`)
return false
}

try {
// Filter keys that belong to this file
const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) => c.toUpperCase())
const fileSpecificKeys = extraKeys
.filter(key => key.startsWith(`${camelCaseFileName}.`))
.map(key => key.substring(camelCaseFileName.length + 1)) // Remove file prefix

if (fileSpecificKeys.length === 0)
return false

console.log(`🔄 Processing file: ${filePath}`)

// Read the original file content
const content = fs.readFileSync(filePath, 'utf8')
const lines = content.split('\n')

let modified = false
const linesToRemove = []

// Find lines to remove for each key
for (const keyToRemove of fileSpecificKeys) {
const keyParts = keyToRemove.split('.')
let targetLineIndex = -1

// Build regex pattern for the exact key path
if (keyParts.length === 1) {
// Simple key at root level like "pickDate: 'value'"
for (let i = 0; i < lines.length; i++) {
const line = lines[i]
const simpleKeyPattern = new RegExp(`^\\s*${keyParts[0]}\\s*:`)
if (simpleKeyPattern.test(line)) {
targetLineIndex = i
break
}
}
}
else {
// Nested key - need to find the exact path
const currentPath = []
let braceDepth = 0

for (let i = 0; i < lines.length; i++) {
const line = lines[i]
const trimmedLine = line.trim()

// Track current object path
const keyMatch = trimmedLine.match(/^(\w+)\s*:\s*{/)
if (keyMatch) {
currentPath.push(keyMatch[1])
braceDepth++
}
else if (trimmedLine === '},' || trimmedLine === '}') {
if (braceDepth > 0) {
braceDepth--
currentPath.pop()
}
}

// Check if this line matches our target key
const leafKeyMatch = trimmedLine.match(/^(\w+)\s*:/)
if (leafKeyMatch) {
const fullPath = [...currentPath, leafKeyMatch[1]]
const fullPathString = fullPath.join('.')

if (fullPathString === keyToRemove) {
targetLineIndex = i
break
}
}
}
}

if (targetLineIndex !== -1) {
linesToRemove.push(targetLineIndex)
console.log(`🗑️ Found key to remove: ${keyToRemove} at line ${targetLineIndex + 1}`)
modified = true
}
else {
console.log(`⚠️ Could not find key: ${keyToRemove}`)
}
}

if (modified) {
// Remove lines in reverse order to maintain correct indices
linesToRemove.sort((a, b) => b - a)

for (const lineIndex of linesToRemove) {
const line = lines[lineIndex]
console.log(`🗑️ Removing line ${lineIndex + 1}: ${line.trim()}`)
lines.splice(lineIndex, 1)

// Also remove trailing comma from previous line if it exists and the next line is a closing brace
if (lineIndex > 0 && lineIndex < lines.length) {
const prevLine = lines[lineIndex - 1]
const nextLine = lines[lineIndex] ? lines[lineIndex].trim() : ''

if (prevLine.trim().endsWith(',') && (nextLine.startsWith('}') || nextLine === ''))
lines[lineIndex - 1] = prevLine.replace(/,\s*$/, '')
}
}

// Write back to file
const newContent = lines.join('\n')
fs.writeFileSync(filePath, newContent)
console.log(`💾 Updated file: ${filePath}`)
return true
}

return false
}
catch (error) {
console.error(`Error processing file ${filePath}:`, error.message)
return false
}
}

// Add command line argument support
const targetFile = process.argv.find(arg => arg.startsWith('--file='))?.split('=')[1]
const targetLang = process.argv.find(arg => arg.startsWith('--lang='))?.split('=')[1]
const autoRemove = process.argv.includes('--auto-remove')

async function main() { async function main() {
const compareKeysCount = async () => { const compareKeysCount = async () => {
const targetKeys = await getKeysFromLanguage(targetLanguage)
const languagesKeys = await Promise.all(languages.map(language => getKeysFromLanguage(language)))
const allTargetKeys = await getKeysFromLanguage(targetLanguage)

// Filter target keys by file if specified
const targetKeys = targetFile
? allTargetKeys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())))
: allTargetKeys

// Filter languages by target language if specified
const languagesToProcess = targetLang ? [targetLang] : languages

const allLanguagesKeys = await Promise.all(languagesToProcess.map(language => getKeysFromLanguage(language)))

// Filter language keys by file if specified
const languagesKeys = targetFile
? allLanguagesKeys.map(keys => keys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase()))))
: allLanguagesKeys


const keysCount = languagesKeys.map(keys => keys.length) const keysCount = languagesKeys.map(keys => keys.length)
const targetKeysCount = targetKeys.length const targetKeysCount = targetKeys.length


const comparison = languages.reduce((result, language, index) => {
const comparison = languagesToProcess.reduce((result, language, index) => {
const languageKeysCount = keysCount[index] const languageKeysCount = keysCount[index]
const difference = targetKeysCount - languageKeysCount const difference = targetKeysCount - languageKeysCount
result[language] = difference result[language] = difference


console.log(comparison) console.log(comparison)


// Print missing keys
languages.forEach((language, index) => {
const missingKeys = targetKeys.filter(key => !languagesKeys[index].includes(key))
// Print missing keys and extra keys
for (let index = 0; index < languagesToProcess.length; index++) {
const language = languagesToProcess[index]
const languageKeys = languagesKeys[index]
const missingKeys = targetKeys.filter(key => !languageKeys.includes(key))
const extraKeys = languageKeys.filter(key => !targetKeys.includes(key))

console.log(`Missing keys in ${language}:`, missingKeys) console.log(`Missing keys in ${language}:`, missingKeys)
})

// Show extra keys only when there are extra keys (negative difference)
if (extraKeys.length > 0) {
console.log(`Extra keys in ${language} (not in ${targetLanguage}):`, extraKeys)

// Auto-remove extra keys if flag is set
if (autoRemove) {
console.log(`\n🤖 Auto-removing extra keys from ${language}...`)

// Get all translation files
const i18nFolder = path.resolve(__dirname, '../i18n', language)
const files = fs.readdirSync(i18nFolder)
.filter(file => /\.ts$/.test(file))
.map(file => file.replace(/\.ts$/, ''))
.filter(f => !targetFile || f === targetFile) // Filter by target file if specified

let totalRemoved = 0
for (const fileName of files) {
const removed = await removeExtraKeysFromFile(language, fileName, extraKeys)
if (removed) totalRemoved++
}

console.log(`✅ Auto-removal completed for ${language}. Modified ${totalRemoved} files.`)
}
}
}
} }


console.log('🚀 Starting check-i18n script...')
if (targetFile)
console.log(`📁 Checking file: ${targetFile}`)

if (targetLang)
console.log(`🌍 Checking language: ${targetLang}`)

if (autoRemove)
console.log('🤖 Auto-remove mode: ENABLED')

compareKeysCount() compareKeysCount()
} }



+ 0
- 1
web/i18n/de-DE/app-debug.ts Vedi File

queryNoBeEmpty: 'Anfrage muss im Prompt gesetzt sein', queryNoBeEmpty: 'Anfrage muss im Prompt gesetzt sein',
}, },
variableConfig: { variableConfig: {
modalTitle: 'Feldeinstellungen',
description: 'Einstellung für Variable {{varName}}', description: 'Einstellung für Variable {{varName}}',
fieldType: 'Feldtyp', fieldType: 'Feldtyp',
string: 'Kurztext', string: 'Kurztext',

+ 0
- 19
web/i18n/de-DE/app.ts Vedi File

createApp: 'Neue App erstellen', createApp: 'Neue App erstellen',
types: { types: {
all: 'Alle', all: 'Alle',
assistant: 'Assistent',
completion: 'Vervollständigung', completion: 'Vervollständigung',
workflow: 'Arbeitsablauf', workflow: 'Arbeitsablauf',
agent: 'Agent', agent: 'Agent',
advanced: 'Chatflow', advanced: 'Chatflow',
}, },
modes: { modes: {
completion: 'Textgenerator',
chat: 'Basisassistent',
}, },
createFromConfigFile: 'App aus Konfigurationsdatei erstellen', createFromConfigFile: 'App aus Konfigurationsdatei erstellen',
deleteAppConfirmTitle: 'Diese App löschen?', deleteAppConfirmTitle: 'Diese App löschen?',
communityIntro: communityIntro:
'Diskutieren Sie mit Teammitgliedern, Mitwirkenden und Entwicklern auf verschiedenen Kanälen.', 'Diskutieren Sie mit Teammitgliedern, Mitwirkenden und Entwicklern auf verschiedenen Kanälen.',
roadmap: 'Sehen Sie unseren Fahrplan', roadmap: 'Sehen Sie unseren Fahrplan',
appNamePlaceholder: 'Bitte geben Sie den Namen der App ein',
newApp: { newApp: {
startToCreate: 'Lassen Sie uns mit Ihrer neuen App beginnen',
captionName: 'App-Symbol & Name', captionName: 'App-Symbol & Name',
captionAppType: 'Welchen Typ von App möchten Sie erstellen?',
previewDemo: 'Vorschau-Demo', previewDemo: 'Vorschau-Demo',
chatApp: 'Assistent', chatApp: 'Assistent',
chatAppIntro: chatAppIntro:
appTypeRequired: 'Bitte wählen Sie einen App-Typ', appTypeRequired: 'Bitte wählen Sie einen App-Typ',
appCreated: 'App erstellt', appCreated: 'App erstellt',
appCreateFailed: 'Erstellen der App fehlgeschlagen', appCreateFailed: 'Erstellen der App fehlgeschlagen',
basic: 'Grundlegend',
chatbotType: 'Chatbot-Orchestrierungsmethode',
workflowDescription: 'Erstellen Sie eine Anwendung, die qualitativ hochwertigen Text auf der Grundlage von Workflow-Orchestrierungen mit einem hohen Maß an Anpassung generiert. Es ist für erfahrene Benutzer geeignet.',
advancedFor: 'Für Fortgeschrittene',
startFromTemplate: 'Aus Vorlage erstellen', startFromTemplate: 'Aus Vorlage erstellen',
appNamePlaceholder: 'Geben Sie Ihrer App einen Namen', appNamePlaceholder: 'Geben Sie Ihrer App einen Namen',
startFromBlank: 'Aus Leer erstellen', startFromBlank: 'Aus Leer erstellen',
basicTip: 'Für Anfänger können Sie später zu Chatflow wechseln',
basicDescription: 'Basic Orchestrate ermöglicht die Orchestrierung einer Chatbot-App mit einfachen Einstellungen, ohne die Möglichkeit, integrierte Eingabeaufforderungen zu ändern. Es ist für Anfänger geeignet.',
workflowWarning: 'Derzeit in der Beta-Phase', workflowWarning: 'Derzeit in der Beta-Phase',
advancedDescription: 'Workflow Orchestrate orchestriert Chatbots in Form von Workflows und bietet ein hohes Maß an Individualisierung, einschließlich der Möglichkeit, integrierte Eingabeaufforderungen zu bearbeiten. Es ist für erfahrene Benutzer geeignet.',
basicFor: 'FÜR ANFÄNGER',
completionWarning: 'Diese Art von App wird nicht mehr unterstützt.',
chatbotDescription: 'Erstellen Sie eine chatbasierte Anwendung. Diese App verwendet ein Frage-und-Antwort-Format, das mehrere Runden kontinuierlicher Konversation ermöglicht.',
captionDescription: 'Beschreibung', captionDescription: 'Beschreibung',
advanced: 'Chatflow',
useTemplate: 'Diese Vorlage verwenden', useTemplate: 'Diese Vorlage verwenden',
agentDescription: 'Erstellen Sie einen intelligenten Agenten, der autonom Werkzeuge auswählen kann, um die Aufgaben zu erledigen',
completionDescription: 'Erstellen Sie eine Anwendung, die qualitativ hochwertigen Text auf der Grundlage von Eingabeaufforderungen generiert, z. B. zum Generieren von Artikeln, Zusammenfassungen, Übersetzungen und mehr.',
appDescriptionPlaceholder: 'Geben Sie die Beschreibung der App ein', appDescriptionPlaceholder: 'Geben Sie die Beschreibung der App ein',
caution: 'Vorsicht', caution: 'Vorsicht',
Confirm: 'Bestätigen', Confirm: 'Bestätigen',

+ 22
- 32
web/i18n/de-DE/billing.ts Vedi File

contractSales: 'Vertrieb kontaktieren', contractSales: 'Vertrieb kontaktieren',
contractOwner: 'Teammanager kontaktieren', contractOwner: 'Teammanager kontaktieren',
startForFree: 'Kostenlos starten', startForFree: 'Kostenlos starten',
getStartedWith: 'Beginnen Sie mit ',
contactSales: 'Vertrieb kontaktieren', contactSales: 'Vertrieb kontaktieren',
talkToSales: 'Mit dem Vertrieb sprechen', talkToSales: 'Mit dem Vertrieb sprechen',
modelProviders: 'Modellanbieter', modelProviders: 'Modellanbieter',
teamMembers: 'Teammitglieder',
buildApps: 'Apps bauen', buildApps: 'Apps bauen',
vectorSpace: 'Vektorraum', vectorSpace: 'Vektorraum',
vectorSpaceBillingTooltip: 'Jedes 1MB kann ungefähr 1,2 Millionen Zeichen an vektorisierten Daten speichern (geschätzt mit OpenAI Embeddings, variiert je nach Modell).',
vectorSpaceTooltip: 'Vektorraum ist das Langzeitspeichersystem, das erforderlich ist, damit LLMs Ihre Daten verstehen können.', vectorSpaceTooltip: 'Vektorraum ist das Langzeitspeichersystem, das erforderlich ist, damit LLMs Ihre Daten verstehen können.',
documentsUploadQuota: 'Dokumenten-Upload-Kontingent',
documentProcessingPriority: 'Priorität der Dokumentenverarbeitung', documentProcessingPriority: 'Priorität der Dokumentenverarbeitung',
documentProcessingPriorityTip: 'Für eine höhere Dokumentenverarbeitungspriorität, bitte Ihren Tarif upgraden.',
documentProcessingPriorityUpgrade: 'Mehr Daten mit höherer Genauigkeit bei schnelleren Geschwindigkeiten verarbeiten.', documentProcessingPriorityUpgrade: 'Mehr Daten mit höherer Genauigkeit bei schnelleren Geschwindigkeiten verarbeiten.',
priority: { priority: {
'standard': 'Standard', 'standard': 'Standard',
sandbox: { sandbox: {
name: 'Sandbox', name: 'Sandbox',
description: '200 mal GPT kostenlos testen', description: '200 mal GPT kostenlos testen',
includesTitle: 'Beinhaltet:',
for: 'Kostenlose Testversion der Kernfunktionen', for: 'Kostenlose Testversion der Kernfunktionen',
}, },
professional: { professional: {
name: 'Professionell', name: 'Professionell',
description: 'Für Einzelpersonen und kleine Teams, um mehr Leistung erschwinglich freizuschalten.', description: 'Für Einzelpersonen und kleine Teams, um mehr Leistung erschwinglich freizuschalten.',
includesTitle: 'Alles im kostenlosen Tarif, plus:',
for: 'Für unabhängige Entwickler/kleine Teams', for: 'Für unabhängige Entwickler/kleine Teams',
}, },
team: { team: {
name: 'Team', name: 'Team',
description: 'Zusammenarbeiten ohne Grenzen und Top-Leistung genießen.', description: 'Zusammenarbeiten ohne Grenzen und Top-Leistung genießen.',
includesTitle: 'Alles im Professionell-Tarif, plus:',
for: 'Für mittelgroße Teams', for: 'Für mittelgroße Teams',
}, },
enterprise: { enterprise: {
name: 'Unternehmen', name: 'Unternehmen',
description: 'Erhalten Sie volle Fähigkeiten und Unterstützung für großangelegte, missionskritische Systeme.', description: 'Erhalten Sie volle Fähigkeiten und Unterstützung für großangelegte, missionskritische Systeme.',
includesTitle: 'Alles im Team-Tarif, plus:', includesTitle: 'Alles im Team-Tarif, plus:',
features: {
2: 'Exklusive Unternehmensfunktionen',
8: 'Professioneller technischer Support',
6: 'Erweiterte Sicherheits- und Kontrollsysteme',
4: 'SSO',
0: 'Enterprise-Grade Skalierbare Bereitstellungslösungen',
3: 'Mehrere Arbeitsbereiche und Unternehmensverwaltung',
1: 'Kommerzielle Lizenzgenehmigung',
5: 'Verhandelte SLAs durch Dify-Partner',
7: 'Updates und Wartung von Dify offiziell',
},
btnText: 'Vertrieb kontaktieren', btnText: 'Vertrieb kontaktieren',
price: 'Benutzerdefiniert', price: 'Benutzerdefiniert',
priceTip: 'Jährliche Abrechnung nur', priceTip: 'Jährliche Abrechnung nur',
for: 'Für große Teams', for: 'Für große Teams',
features: [
'Skalierbare Bereitstellungslösungen in Unternehmensqualität',
'Kommerzielle Lizenzierung',
'Exklusive Enterprise-Funktionen',
'Mehrere Arbeitsbereiche und Unternehmensverwaltung',
'SSO (Single Sign-On)',
'Vereinbarte SLAs mit Dify-Partnern',
'Erweiterte Sicherheitsfunktionen und Kontrollen',
'Offizielle Updates und Wartung durch Dify',
'Professioneller technischer Support',
],
}, },
community: { community: {
features: {
2: 'Entspricht der Dify Open Source Lizenz',
1: 'Einzelner Arbeitsbereich',
0: 'Alle Kernfunktionen wurden im öffentlichen Repository veröffentlicht.',
},
description: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte', description: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte',
for: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte', for: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte',
btnText: 'Beginnen Sie mit der Gemeinschaft', btnText: 'Beginnen Sie mit der Gemeinschaft',
price: 'Kostenlos', price: 'Kostenlos',
includesTitle: 'Kostenlose Funktionen:', includesTitle: 'Kostenlose Funktionen:',
name: 'Gemeinschaft', name: 'Gemeinschaft',
features: [
'Alle Kernfunktionen im öffentlichen Repository veröffentlicht',
'Einzelner Arbeitsbereich',
'Entspricht der Dify Open-Source-Lizenz',
],
}, },
premium: { premium: {
features: {
2: 'WebApp-Logo und Branding-Anpassung',
0: 'Selbstverwaltete Zuverlässigkeit durch verschiedene Cloud-Anbieter',
3: 'Priorisierte E-Mail- und Chat-Unterstützung',
1: 'Einzelner Arbeitsbereich',
},
includesTitle: 'Alles aus der Community, plus:', includesTitle: 'Alles aus der Community, plus:',
name: 'Premium', name: 'Premium',
priceTip: 'Basierend auf dem Cloud-Marktplatz', priceTip: 'Basierend auf dem Cloud-Marktplatz',
comingSoon: 'Microsoft Azure- und Google Cloud-Support demnächst verfügbar', comingSoon: 'Microsoft Azure- und Google Cloud-Support demnächst verfügbar',
description: 'Für mittelgroße Organisationen und Teams', description: 'Für mittelgroße Organisationen und Teams',
price: 'Skalierbar', price: 'Skalierbar',
features: [
'Selbstverwaltete Zuverlässigkeit durch verschiedene Cloud-Anbieter',
'Einzelner Arbeitsbereich',
'Anpassung von WebApp-Logo und Branding',
'Bevorzugter E-Mail- und Chat-Support',
],
}, },
}, },
vectorSpace: { vectorSpace: {
fullSolution: 'Upgraden Sie Ihren Tarif, um mehr Speicherplatz zu erhalten.', fullSolution: 'Upgraden Sie Ihren Tarif, um mehr Speicherplatz zu erhalten.',
}, },
apps: { apps: {
fullTipLine1: 'Upgraden Sie Ihren Tarif, um',
fullTipLine2: 'mehr Apps zu bauen.',
contactUs: 'Kontaktieren Sie uns', contactUs: 'Kontaktieren Sie uns',
fullTip1: 'Upgrade, um mehr Apps zu erstellen', fullTip1: 'Upgrade, um mehr Apps zu erstellen',
fullTip2des: 'Es wird empfohlen, inaktive Anwendungen zu bereinigen, um Speicherplatz freizugeben, oder uns zu kontaktieren.', fullTip2des: 'Es wird empfohlen, inaktive Anwendungen zu bereinigen, um Speicherplatz freizugeben, oder uns zu kontaktieren.',

+ 0
- 1
web/i18n/de-DE/common.ts Vedi File

showAppLength: '{{length}} Apps anzeigen', showAppLength: '{{length}} Apps anzeigen',
delete: 'Konto löschen', delete: 'Konto löschen',
deleteTip: 'Wenn Sie Ihr Konto löschen, werden alle Ihre Daten dauerhaft gelöscht und können nicht wiederhergestellt werden.', deleteTip: 'Wenn Sie Ihr Konto löschen, werden alle Ihre Daten dauerhaft gelöscht und können nicht wiederhergestellt werden.',
deleteConfirmTip: 'Zur Bestätigung senden Sie bitte Folgendes von Ihrer registrierten E-Mail-Adresse an ',
myAccount: 'Mein Konto', myAccount: 'Mein Konto',
studio: 'Dify Studio', studio: 'Dify Studio',
account: 'Konto', account: 'Konto',

+ 0
- 2
web/i18n/de-DE/dataset-creation.ts Vedi File

const translation = { const translation = {
steps: { steps: {
header: { header: {
creation: 'Wissen erstellen',
update: 'Daten hinzufügen',
fallbackRoute: 'Wissen', fallbackRoute: 'Wissen',
}, },
one: 'Datenquelle wählen', one: 'Datenquelle wählen',

+ 0
- 2
web/i18n/de-DE/dataset-documents.ts Vedi File

journalConferenceName: 'Zeitschrift/Konferenzname', journalConferenceName: 'Zeitschrift/Konferenzname',
volumeIssuePage: 'Band/Ausgabe/Seite', volumeIssuePage: 'Band/Ausgabe/Seite',
DOI: 'DOI', DOI: 'DOI',
topicKeywords: 'Themen/Schlüsselwörter',
abstract: 'Zusammenfassung', abstract: 'Zusammenfassung',
topicsKeywords: 'Themen/Stichworte', topicsKeywords: 'Themen/Stichworte',
}, },
keywords: 'Schlüsselwörter', keywords: 'Schlüsselwörter',
addKeyWord: 'Schlüsselwort hinzufügen', addKeyWord: 'Schlüsselwort hinzufügen',
keywordError: 'Die maximale Länge des Schlüsselworts beträgt 20', keywordError: 'Die maximale Länge des Schlüsselworts beträgt 20',
characters: 'Zeichen',
hitCount: 'Abrufanzahl', hitCount: 'Abrufanzahl',
vectorHash: 'Vektor-Hash: ', vectorHash: 'Vektor-Hash: ',
questionPlaceholder: 'Frage hier hinzufügen', questionPlaceholder: 'Frage hier hinzufügen',

+ 0
- 1
web/i18n/de-DE/dataset-hit-testing.ts Vedi File

title: 'Abruf-Test', title: 'Abruf-Test',
desc: 'Testen Sie die Treffereffektivität des Wissens anhand des gegebenen Abfragetextes.', desc: 'Testen Sie die Treffereffektivität des Wissens anhand des gegebenen Abfragetextes.',
dateTimeFormat: 'MM/DD/YYYY hh:mm A', dateTimeFormat: 'MM/DD/YYYY hh:mm A',
recents: 'Kürzlich',
table: { table: {
header: { header: {
source: 'Quelle', source: 'Quelle',

+ 0
- 1
web/i18n/de-DE/login.ts Vedi File

activated: 'Jetzt anmelden', activated: 'Jetzt anmelden',
adminInitPassword: 'Admin-Initialpasswort', adminInitPassword: 'Admin-Initialpasswort',
validate: 'Validieren', validate: 'Validieren',
sso: 'Mit SSO fortfahren',
checkCode: { checkCode: {
didNotReceiveCode: 'Sie haben den Code nicht erhalten?', didNotReceiveCode: 'Sie haben den Code nicht erhalten?',
verificationCodePlaceholder: 'Geben Sie den 6-stelligen Code ein', verificationCodePlaceholder: 'Geben Sie den 6-stelligen Code ein',

+ 0
- 1
web/i18n/de-DE/run-log.ts Vedi File

resultEmpty: { resultEmpty: {
title: 'Dieser Lauf gibt nur das JSON-Format aus', title: 'Dieser Lauf gibt nur das JSON-Format aus',
tipLeft: 'Bitte gehen Sie zum ', tipLeft: 'Bitte gehen Sie zum ',
Link: 'Detailpanel',
tipRight: 'ansehen.', tipRight: 'ansehen.',
link: 'Gruppe Detail', link: 'Gruppe Detail',
}, },

+ 0
- 1
web/i18n/de-DE/tools.ts Vedi File

keyTooltip: 'Http Header Key, Sie können es bei "Authorization" belassen, wenn Sie nicht wissen, was es ist, oder auf einen benutzerdefinierten Wert setzen', keyTooltip: 'Http Header Key, Sie können es bei "Authorization" belassen, wenn Sie nicht wissen, was es ist, oder auf einen benutzerdefinierten Wert setzen',
types: { types: {
none: 'Keine', none: 'Keine',
api_key: 'API-Key',
apiKeyPlaceholder: 'HTTP-Headername für API-Key', apiKeyPlaceholder: 'HTTP-Headername für API-Key',
apiValuePlaceholder: 'API-Key eingeben', apiValuePlaceholder: 'API-Key eingeben',
api_key_header: 'Kopfzeile', api_key_header: 'Kopfzeile',

+ 0
- 5
web/i18n/de-DE/workflow.ts Vedi File

loadMore: 'Weitere Workflows laden', loadMore: 'Weitere Workflows laden',
noHistory: 'Keine Geschichte', noHistory: 'Keine Geschichte',
exportSVG: 'Als SVG exportieren', exportSVG: 'Als SVG exportieren',
noExist: 'Keine solche Variable',
versionHistory: 'Versionsverlauf', versionHistory: 'Versionsverlauf',
publishUpdate: 'Update veröffentlichen', publishUpdate: 'Update veröffentlichen',
referenceVar: 'Referenzvariable',
exportImage: 'Bild exportieren', exportImage: 'Bild exportieren',
exportJPEG: 'Als JPEG exportieren', exportJPEG: 'Als JPEG exportieren',
exitVersions: 'Ausgangsversionen', exitVersions: 'Ausgangsversionen',
tabs: { tabs: {
'tools': 'Werkzeuge', 'tools': 'Werkzeuge',
'allTool': 'Alle', 'allTool': 'Alle',
'builtInTool': 'Eingebaut',
'customTool': 'Benutzerdefiniert', 'customTool': 'Benutzerdefiniert',
'workflowTool': 'Arbeitsablauf', 'workflowTool': 'Arbeitsablauf',
'question-understand': 'Fragen verstehen', 'question-understand': 'Fragen verstehen',
'not empty': 'ist nicht leer', 'not empty': 'ist nicht leer',
'null': 'ist null', 'null': 'ist null',
'not null': 'ist nicht null', 'not null': 'ist nicht null',
'regex match': 'Regex-Übereinstimmung',
'not exists': 'existiert nicht', 'not exists': 'existiert nicht',
'in': 'in', 'in': 'in',
'all of': 'alle', 'all of': 'alle',
}, },
select: 'Auswählen', select: 'Auswählen',
addSubVariable: 'Untervariable', addSubVariable: 'Untervariable',
condition: 'Bedingung',
}, },
variableAssigner: { variableAssigner: {
title: 'Variablen zuweisen', title: 'Variablen zuweisen',

+ 10
- 0
web/i18n/en-US/app-annotation.ts Vedi File

error: 'Import Error', error: 'Import Error',
ok: 'OK', ok: 'OK',
}, },
list: {
delete: {
title: 'Are you sure Delete?',
},
},
batchAction: {
selected: 'Selected',
delete: 'Delete',
cancel: 'Cancel',
},
errorMessage: { errorMessage: {
answerRequired: 'Answer is required', answerRequired: 'Answer is required',
queryRequired: 'Question is required', queryRequired: 'Question is required',

+ 0
- 15
web/i18n/es-ES/app-debug.ts Vedi File

}, },
}, },
automatic: { automatic: {
title: 'Orquestación automatizada de aplicaciones',
description: 'Describe tu escenario, Dify orquestará una aplicación para ti.',
intendedAudience: '¿Quién es el público objetivo?',
intendedAudiencePlaceHolder: 'p.ej. Estudiante',
solveProblem: '¿Qué problemas esperan que la IA pueda resolver para ellos?',
solveProblemPlaceHolder: 'p.ej. Extraer ideas y resumir información de informes y artículos largos',
generate: 'Generar',
audiencesRequired: 'Audiencia requerida',
problemRequired: 'Problema requerido',
resTitle: 'Hemos orquestado la siguiente aplicación para ti.',
apply: 'Aplicar esta orquestación',
noData: 'Describe tu caso de uso a la izquierda, la vista previa de la orquestación se mostrará aquí.',
loading: 'Orquestando la aplicación para ti...',
overwriteTitle: '¿Sobrescribir configuración existente?',
overwriteMessage: 'Aplicar esta orquestación sobrescribirá la configuración existente.',
}, },
resetConfig: { resetConfig: {
title: '¿Confirmar restablecimiento?', title: '¿Confirmar restablecimiento?',

+ 0
- 14
web/i18n/es-ES/app.ts Vedi File

newApp: { newApp: {
startFromBlank: 'Crear desde cero', startFromBlank: 'Crear desde cero',
startFromTemplate: 'Crear desde plantilla', startFromTemplate: 'Crear desde plantilla',
captionAppType: '¿Qué tipo de app quieres crear?',
chatbotDescription: 'Crea una aplicación basada en chat. Esta app utiliza un formato de pregunta y respuesta, permitiendo múltiples rondas de conversación continua.',
completionDescription: 'Crea una aplicación que genera texto de alta calidad basado en prompts, como la generación de artículos, resúmenes, traducciones y más.',
completionWarning: 'Este tipo de app ya no será compatible.',
agentDescription: 'Crea un Agente inteligente que puede elegir herramientas de forma autónoma para completar tareas',
workflowDescription: 'Crea una aplicación que genera texto de alta calidad basado en flujos de trabajo con un alto grado de personalización. Es adecuado para usuarios experimentados.',
workflowWarning: 'Actualmente en beta', workflowWarning: 'Actualmente en beta',
chatbotType: 'Método de orquestación del Chatbot',
basic: 'Básico',
basicTip: 'Para principiantes, se puede cambiar a Chatflow más adelante',
basicFor: 'PARA PRINCIPIANTES',
basicDescription: 'La Orquestación Básica permite la orquestación de una app de Chatbot utilizando configuraciones simples, sin la capacidad de modificar los prompts incorporados. Es adecuado para principiantes.',
advanced: 'Chatflow',
advancedFor: 'Para usuarios avanzados',
advancedDescription: 'La Orquestación de Flujo de Trabajo orquesta Chatbots en forma de flujos de trabajo, ofreciendo un alto grado de personalización, incluida la capacidad de editar los prompts incorporados. Es adecuado para usuarios experimentados.',
captionName: 'Icono y nombre de la app', captionName: 'Icono y nombre de la app',
appNamePlaceholder: 'Asigna un nombre a tu app', appNamePlaceholder: 'Asigna un nombre a tu app',
captionDescription: 'Descripción', captionDescription: 'Descripción',

+ 22
- 32
web/i18n/es-ES/billing.ts Vedi File

contractSales: 'Contactar ventas', contractSales: 'Contactar ventas',
contractOwner: 'Contactar al administrador del equipo', contractOwner: 'Contactar al administrador del equipo',
startForFree: 'Empezar gratis', startForFree: 'Empezar gratis',
getStartedWith: 'Empezar con ',
contactSales: 'Contactar Ventas', contactSales: 'Contactar Ventas',
talkToSales: 'Hablar con Ventas', talkToSales: 'Hablar con Ventas',
modelProviders: 'Proveedores de Modelos', modelProviders: 'Proveedores de Modelos',
teamMembers: 'Miembros del Equipo',
annotationQuota: 'Cuota de Anotación', annotationQuota: 'Cuota de Anotación',
buildApps: 'Crear Aplicaciones', buildApps: 'Crear Aplicaciones',
vectorSpace: 'Espacio Vectorial', vectorSpace: 'Espacio Vectorial',
vectorSpaceBillingTooltip: 'Cada 1MB puede almacenar aproximadamente 1.2 millones de caracteres de datos vectorizados (estimado utilizando OpenAI Embeddings, varía según los modelos).',
vectorSpaceTooltip: 'El Espacio Vectorial es el sistema de memoria a largo plazo necesario para que los LLMs comprendan tus datos.', vectorSpaceTooltip: 'El Espacio Vectorial es el sistema de memoria a largo plazo necesario para que los LLMs comprendan tus datos.',
documentsUploadQuota: 'Cuota de Carga de Documentos',
documentProcessingPriority: 'Prioridad de Procesamiento de Documentos', documentProcessingPriority: 'Prioridad de Procesamiento de Documentos',
documentProcessingPriorityTip: 'Para una mayor prioridad de procesamiento de documentos, por favor actualiza tu plan.',
documentProcessingPriorityUpgrade: 'Procesa más datos con mayor precisión y velocidad.', documentProcessingPriorityUpgrade: 'Procesa más datos con mayor precisión y velocidad.',
priority: { priority: {
'standard': 'Estándar', 'standard': 'Estándar',
sandbox: { sandbox: {
name: 'Sandbox', name: 'Sandbox',
description: 'Prueba gratuita de 200 veces GPT', description: 'Prueba gratuita de 200 veces GPT',
includesTitle: 'Incluye:',
for: 'Prueba gratuita de capacidades básicas', for: 'Prueba gratuita de capacidades básicas',
}, },
professional: { professional: {
name: 'Profesional', name: 'Profesional',
description: 'Para individuos y pequeños equipos que desean desbloquear más poder de manera asequible.', description: 'Para individuos y pequeños equipos que desean desbloquear más poder de manera asequible.',
includesTitle: 'Todo en el plan gratuito, más:',
for: 'Para desarrolladores independientes/equipos pequeños', for: 'Para desarrolladores independientes/equipos pequeños',
}, },
team: { team: {
name: 'Equipo', name: 'Equipo',
description: 'Colabora sin límites y disfruta de un rendimiento de primera categoría.', description: 'Colabora sin límites y disfruta de un rendimiento de primera categoría.',
includesTitle: 'Todo en el plan Profesional, más:',
for: 'Para equipos de tamaño mediano', for: 'Para equipos de tamaño mediano',
}, },
enterprise: { enterprise: {
name: 'Empresa', name: 'Empresa',
description: 'Obtén capacidades completas y soporte para sistemas críticos a gran escala.', description: 'Obtén capacidades completas y soporte para sistemas críticos a gran escala.',
includesTitle: 'Todo en el plan Equipo, más:', includesTitle: 'Todo en el plan Equipo, más:',
features: {
0: 'Soluciones de implementación escalables de nivel empresarial',
7: 'Actualizaciones y Mantenimiento por Dify Oficialmente',
8: 'Soporte Técnico Profesional',
3: 'Múltiples Espacios de Trabajo y Gestión Empresarial',
1: 'Autorización de Licencia Comercial',
2: 'Características Exclusivas de la Empresa',
5: 'SLA negociados por Dify Partners',
4: 'SSO',
6: 'Seguridad y Controles Avanzados',
},
btnText: 'Contactar ventas', btnText: 'Contactar ventas',
for: 'Para equipos de gran tamaño', for: 'Para equipos de gran tamaño',
price: 'Personalizado', price: 'Personalizado',
priceTip: 'Facturación Anual Solo', priceTip: 'Facturación Anual Solo',
features: [
'Soluciones de implementación escalables a nivel empresarial',
'Autorización de licencia comercial',
'Funciones exclusivas para empresas',
'Múltiples espacios de trabajo y gestión empresarial',
'SSO (inicio de sesión único)',
'SLAs negociados con socios de Dify',
'Seguridad y controles avanzados',
'Actualizaciones y mantenimiento oficiales por parte de Dify',
'Soporte técnico profesional',
],
}, },
community: { community: {
features: {
0: 'Todas las características principales se lanzaron bajo el repositorio público',
2: 'Cumple con la Licencia de Código Abierto de Dify',
1: 'Espacio de trabajo único',
},
includesTitle: 'Características gratuitas:', includesTitle: 'Características gratuitas:',
for: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales', for: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales',
price: 'Gratis', price: 'Gratis',
btnText: 'Comienza con la Comunidad', btnText: 'Comienza con la Comunidad',
name: 'Comunidad', name: 'Comunidad',
description: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales', description: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales',
features: [
'Todas las funciones principales publicadas en el repositorio público',
'Espacio de trabajo único',
'Cumple con la licencia de código abierto de Dify',
],
}, },
premium: { premium: {
features: {
0: 'Confiabilidad autogestionada por varios proveedores de nube',
1: 'Espacio de trabajo único',
3: 'Soporte prioritario por correo electrónico y chat',
2: 'Personalización de logotipos y marcas de WebApp',
},
description: 'Para organizaciones y equipos de tamaño mediano', description: 'Para organizaciones y equipos de tamaño mediano',
comingSoon: 'Soporte de Microsoft Azure y Google Cloud disponible próximamente', comingSoon: 'Soporte de Microsoft Azure y Google Cloud disponible próximamente',
btnText: 'Obtén Premium en', btnText: 'Obtén Premium en',
includesTitle: 'Todo de Community, además:', includesTitle: 'Todo de Community, además:',
name: 'Premium', name: 'Premium',
for: 'Para organizaciones y equipos de tamaño mediano', for: 'Para organizaciones y equipos de tamaño mediano',
features: [
'Fiabilidad autogestionada mediante varios proveedores de nube',
'Espacio de trabajo único',
'Personalización del logotipo y la marca de la aplicación web',
'Soporte prioritario por correo electrónico y chat',
],
}, },
}, },
vectorSpace: { vectorSpace: {
fullSolution: 'Actualiza tu plan para obtener más espacio.', fullSolution: 'Actualiza tu plan para obtener más espacio.',
}, },
apps: { apps: {
fullTipLine1: 'Actualiza tu plan para',
fullTipLine2: 'crear más aplicaciones.',
fullTip1des: 'Has alcanzado el límite de aplicaciones de construcción en este plan', fullTip1des: 'Has alcanzado el límite de aplicaciones de construcción en este plan',
fullTip2des: 'Se recomienda limpiar las aplicaciones inactivas para liberar espacio de uso, o contactarnos.', fullTip2des: 'Se recomienda limpiar las aplicaciones inactivas para liberar espacio de uso, o contactarnos.',
fullTip1: 'Actualiza para crear más aplicaciones', fullTip1: 'Actualiza para crear más aplicaciones',

+ 0
- 1
web/i18n/es-ES/common.ts Vedi File

showAppLength: 'Mostrar {{length}} apps', showAppLength: 'Mostrar {{length}} apps',
delete: 'Eliminar cuenta', delete: 'Eliminar cuenta',
deleteTip: 'Eliminar tu cuenta borrará permanentemente todos tus datos y no se podrán recuperar.', deleteTip: 'Eliminar tu cuenta borrará permanentemente todos tus datos y no se podrán recuperar.',
deleteConfirmTip: 'Para confirmar, por favor envía lo siguiente desde tu correo electrónico registrado a ',
account: 'Cuenta', account: 'Cuenta',
myAccount: 'Mi Cuenta', myAccount: 'Mi Cuenta',
studio: 'Estudio Dify', studio: 'Estudio Dify',

+ 0
- 2
web/i18n/es-ES/dataset-creation.ts Vedi File

const translation = { const translation = {
steps: { steps: {
header: { header: {
creation: 'Crear conocimiento',
update: 'Agregar datos',
fallbackRoute: 'Conocimiento', fallbackRoute: 'Conocimiento',
}, },
one: 'Elegir fuente de datos', one: 'Elegir fuente de datos',

+ 0
- 1
web/i18n/es-ES/dataset-documents.ts Vedi File

keywords: 'Palabras clave', keywords: 'Palabras clave',
addKeyWord: 'Agregar palabra clave', addKeyWord: 'Agregar palabra clave',
keywordError: 'La longitud máxima de la palabra clave es 20', keywordError: 'La longitud máxima de la palabra clave es 20',
characters: 'caracteres',
hitCount: 'Cantidad de recuperación', hitCount: 'Cantidad de recuperación',
vectorHash: 'Hash de vector: ', vectorHash: 'Hash de vector: ',
questionPlaceholder: 'agregar pregunta aquí', questionPlaceholder: 'agregar pregunta aquí',

+ 0
- 1
web/i18n/es-ES/dataset-hit-testing.ts Vedi File

title: 'Prueba de recuperación', title: 'Prueba de recuperación',
desc: 'Prueba del efecto de impacto del conocimiento basado en el texto de consulta proporcionado.', desc: 'Prueba del efecto de impacto del conocimiento basado en el texto de consulta proporcionado.',
dateTimeFormat: 'MM/DD/YYYY hh:mm A', dateTimeFormat: 'MM/DD/YYYY hh:mm A',
recents: 'Recientes',
table: { table: {
header: { header: {
source: 'Fuente', source: 'Fuente',

+ 0
- 1
web/i18n/es-ES/login.ts Vedi File

namePlaceholder: 'Tu nombre de usuario', namePlaceholder: 'Tu nombre de usuario',
forget: '¿Olvidaste tu contraseña?', forget: '¿Olvidaste tu contraseña?',
signBtn: 'Iniciar sesión', signBtn: 'Iniciar sesión',
sso: 'Continuar con SSO',
installBtn: 'Configurar', installBtn: 'Configurar',
setAdminAccount: 'Configurando una cuenta de administrador', setAdminAccount: 'Configurando una cuenta de administrador',
setAdminAccountDesc: 'Privilegios máximos para la cuenta de administrador, que se puede utilizar para crear aplicaciones y administrar proveedores de LLM, etc.', setAdminAccountDesc: 'Privilegios máximos para la cuenta de administrador, que se puede utilizar para crear aplicaciones y administrar proveedores de LLM, etc.',

+ 0
- 1
web/i18n/es-ES/tools.ts Vedi File

keyTooltip: 'Clave del encabezado HTTP, puedes dejarla como "Authorization" si no tienes idea de qué es o configurarla con un valor personalizado', keyTooltip: 'Clave del encabezado HTTP, puedes dejarla como "Authorization" si no tienes idea de qué es o configurarla con un valor personalizado',
types: { types: {
none: 'Ninguno', none: 'Ninguno',
api_key: 'Clave API',
apiKeyPlaceholder: 'Nombre del encabezado HTTP para la Clave API', apiKeyPlaceholder: 'Nombre del encabezado HTTP para la Clave API',
apiValuePlaceholder: 'Ingresa la Clave API', apiValuePlaceholder: 'Ingresa la Clave API',
api_key_header: 'Encabezado', api_key_header: 'Encabezado',

+ 1
- 9
web/i18n/es-ES/workflow.ts Vedi File

exitVersions: 'Versiones de salida', exitVersions: 'Versiones de salida',
exportJPEG: 'Exportar como JPEG', exportJPEG: 'Exportar como JPEG',
exportPNG: 'Exportar como PNG', exportPNG: 'Exportar como PNG',
referenceVar: 'Variable de referencia',
publishUpdate: 'Publicar actualización', publishUpdate: 'Publicar actualización',
noExist: 'No existe tal variable',
exportImage: 'Exportar imagen', exportImage: 'Exportar imagen',
needAnswerNode: 'Se debe agregar el nodo de respuesta', needAnswerNode: 'Se debe agregar el nodo de respuesta',
needEndNode: 'Se debe agregar el nodo Final', needEndNode: 'Se debe agregar el nodo Final',
tabs: { tabs: {
'tools': 'Herramientas', 'tools': 'Herramientas',
'allTool': 'Todos', 'allTool': 'Todos',
'builtInTool': 'Incorporadas',
'customTool': 'Personalizadas', 'customTool': 'Personalizadas',
'workflowTool': 'Flujo de trabajo', 'workflowTool': 'Flujo de trabajo',
'question-understand': 'Entender pregunta', 'question-understand': 'Entender pregunta',
'not empty': 'no está vacío', 'not empty': 'no está vacío',
'null': 'es nulo', 'null': 'es nulo',
'not null': 'no es nulo', 'not null': 'no es nulo',
'regex match': 'Coincidencia de expresiones regulares',
'not in': 'no en', 'not in': 'no en',
'in': 'en', 'in': 'en',
'exists': 'Existe', 'exists': 'Existe',
}, },
select: 'Escoger', select: 'Escoger',
addSubVariable: 'Sub Variable', addSubVariable: 'Sub Variable',
condition: 'Condición',
}, },
variableAssigner: { variableAssigner: {
title: 'Asignar variables', title: 'Asignar variables',
showAuthor: 'Mostrar autor', showAuthor: 'Mostrar autor',
}, },
}, },
tracing: {
stopBy: 'Detenido por {{user}}',
},
docExtractor: { docExtractor: {
outputVars: { outputVars: {
text: 'Texto extraído', text: 'Texto extraído',
}, },
}, },
tracing: { tracing: {
stopBy: 'Pásate por {{usuario}}',
stopBy: 'Pásate por {{user}}',
}, },
variableReference: { variableReference: {
noAvailableVars: 'No hay variables disponibles', noAvailableVars: 'No hay variables disponibles',

+ 0
- 0
web/i18n/fa-IR/workflow.ts Vedi File


Dato che sono stati cambiati molti file in questo diff, alcuni di essi non verranno mostrati

Loading…
Annulla
Salva