Co-authored-by: takatost <takatost@gmail.com>tags/0.6.12
| @@ -20,6 +20,7 @@ from .app import ( | |||
| generator, | |||
| message, | |||
| model_config, | |||
| ops_trace, | |||
| site, | |||
| statistic, | |||
| workflow, | |||
| @@ -1,4 +1,3 @@ | |||
| import json | |||
| import uuid | |||
| from flask_login import current_user | |||
| @@ -9,17 +8,14 @@ from controllers.console import api | |||
| from controllers.console.app.wraps import get_app_model | |||
| from controllers.console.setup import setup_required | |||
| from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check | |||
| from core.tools.tool_manager import ToolManager | |||
| from core.tools.utils.configuration import ToolParameterConfigurationManager | |||
| from core.ops.ops_trace_manager import OpsTraceManager | |||
| from fields.app_fields import ( | |||
| app_detail_fields, | |||
| app_detail_fields_with_site, | |||
| app_pagination_fields, | |||
| ) | |||
| from libs.login import login_required | |||
| from models.model import App, AppMode, AppModelConfig | |||
| from services.app_service import AppService | |||
| from services.tag_service import TagService | |||
| ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion'] | |||
| @@ -286,6 +282,39 @@ class AppApiStatus(Resource): | |||
| return app_model | |||
| class AppTraceApi(Resource): | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def get(self, app_id): | |||
| """Get app trace""" | |||
| app_trace_config = OpsTraceManager.get_app_tracing_config( | |||
| app_id=app_id | |||
| ) | |||
| return app_trace_config | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def post(self, app_id): | |||
| # add app trace | |||
| if not current_user.is_admin_or_owner: | |||
| raise Forbidden() | |||
| parser = reqparse.RequestParser() | |||
| parser.add_argument('enabled', type=bool, required=True, location='json') | |||
| parser.add_argument('tracing_provider', type=str, required=True, location='json') | |||
| args = parser.parse_args() | |||
| OpsTraceManager.update_app_tracing_config( | |||
| app_id=app_id, | |||
| enabled=args['enabled'], | |||
| tracing_provider=args['tracing_provider'], | |||
| ) | |||
| return {"result": "success"} | |||
| api.add_resource(AppListApi, '/apps') | |||
| api.add_resource(AppImportApi, '/apps/import') | |||
| api.add_resource(AppApi, '/apps/<uuid:app_id>') | |||
| @@ -295,3 +324,4 @@ api.add_resource(AppNameApi, '/apps/<uuid:app_id>/name') | |||
| api.add_resource(AppIconApi, '/apps/<uuid:app_id>/icon') | |||
| api.add_resource(AppSiteStatus, '/apps/<uuid:app_id>/site-enable') | |||
| api.add_resource(AppApiStatus, '/apps/<uuid:app_id>/api-enable') | |||
| api.add_resource(AppTraceApi, '/apps/<uuid:app_id>/trace') | |||
| @@ -97,3 +97,21 @@ class DraftWorkflowNotSync(BaseHTTPException): | |||
| error_code = 'draft_workflow_not_sync' | |||
| description = "Workflow graph might have been modified, please refresh and resubmit." | |||
| code = 400 | |||
| class TracingConfigNotExist(BaseHTTPException): | |||
| error_code = 'trace_config_not_exist' | |||
| description = "Trace config not exist." | |||
| code = 400 | |||
| class TracingConfigIsExist(BaseHTTPException): | |||
| error_code = 'trace_config_is_exist' | |||
| description = "Trace config is exist." | |||
| code = 400 | |||
| class TracingConfigCheckError(BaseHTTPException): | |||
| error_code = 'trace_config_check_error' | |||
| description = "Invalid Credentials." | |||
| code = 400 | |||
| @@ -0,0 +1,101 @@ | |||
| from flask_restful import Resource, reqparse | |||
| from controllers.console import api | |||
| from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist | |||
| from controllers.console.setup import setup_required | |||
| from controllers.console.wraps import account_initialization_required | |||
| from libs.login import login_required | |||
| from services.ops_service import OpsService | |||
| class TraceAppConfigApi(Resource): | |||
| """ | |||
| Manage trace app configurations | |||
| """ | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def get(self, app_id): | |||
| parser = reqparse.RequestParser() | |||
| parser.add_argument('tracing_provider', type=str, required=True, location='args') | |||
| args = parser.parse_args() | |||
| try: | |||
| trace_config = OpsService.get_tracing_app_config( | |||
| app_id=app_id, tracing_provider=args['tracing_provider'] | |||
| ) | |||
| if not trace_config: | |||
| return {"has_not_configured": True} | |||
| return trace_config | |||
| except Exception as e: | |||
| raise e | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def post(self, app_id): | |||
| """Create a new trace app configuration""" | |||
| parser = reqparse.RequestParser() | |||
| parser.add_argument('tracing_provider', type=str, required=True, location='json') | |||
| parser.add_argument('tracing_config', type=dict, required=True, location='json') | |||
| args = parser.parse_args() | |||
| try: | |||
| result = OpsService.create_tracing_app_config( | |||
| app_id=app_id, | |||
| tracing_provider=args['tracing_provider'], | |||
| tracing_config=args['tracing_config'] | |||
| ) | |||
| if not result: | |||
| raise TracingConfigIsExist() | |||
| if result.get('error'): | |||
| raise TracingConfigCheckError() | |||
| return result | |||
| except Exception as e: | |||
| raise e | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def patch(self, app_id): | |||
| """Update an existing trace app configuration""" | |||
| parser = reqparse.RequestParser() | |||
| parser.add_argument('tracing_provider', type=str, required=True, location='json') | |||
| parser.add_argument('tracing_config', type=dict, required=True, location='json') | |||
| args = parser.parse_args() | |||
| try: | |||
| result = OpsService.update_tracing_app_config( | |||
| app_id=app_id, | |||
| tracing_provider=args['tracing_provider'], | |||
| tracing_config=args['tracing_config'] | |||
| ) | |||
| if not result: | |||
| raise TracingConfigNotExist() | |||
| return {"result": "success"} | |||
| except Exception as e: | |||
| raise e | |||
| @setup_required | |||
| @login_required | |||
| @account_initialization_required | |||
| def delete(self, app_id): | |||
| """Delete an existing trace app configuration""" | |||
| parser = reqparse.RequestParser() | |||
| parser.add_argument('tracing_provider', type=str, required=True, location='args') | |||
| args = parser.parse_args() | |||
| try: | |||
| result = OpsService.delete_tracing_app_config( | |||
| app_id=app_id, | |||
| tracing_provider=args['tracing_provider'] | |||
| ) | |||
| if not result: | |||
| raise TracingConfigNotExist() | |||
| return {"result": "success"} | |||
| except Exception as e: | |||
| raise e | |||
| api.add_resource(TraceAppConfigApi, '/apps/<uuid:app_id>/trace-config') | |||
| @@ -1,7 +1,7 @@ | |||
| import json | |||
| from abc import ABC, abstractmethod | |||
| from collections.abc import Generator | |||
| from typing import Union | |||
| from typing import Optional, Union | |||
| from core.agent.base_agent_runner import BaseAgentRunner | |||
| from core.agent.entities import AgentScratchpadUnit | |||
| @@ -15,6 +15,7 @@ from core.model_runtime.entities.message_entities import ( | |||
| ToolPromptMessage, | |||
| UserPromptMessage, | |||
| ) | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform | |||
| from core.tools.entities.tool_entities import ToolInvokeMeta | |||
| from core.tools.tool.tool import Tool | |||
| @@ -42,6 +43,8 @@ class CotAgentRunner(BaseAgentRunner, ABC): | |||
| self._repack_app_generate_entity(app_generate_entity) | |||
| self._init_react_state(query) | |||
| trace_manager = app_generate_entity.trace_manager | |||
| # check model mode | |||
| if 'Observation' not in app_generate_entity.model_conf.stop: | |||
| if app_generate_entity.model_conf.provider not in self._ignore_observation_providers: | |||
| @@ -211,7 +214,8 @@ class CotAgentRunner(BaseAgentRunner, ABC): | |||
| tool_invoke_response, tool_invoke_meta = self._handle_invoke_action( | |||
| action=scratchpad.action, | |||
| tool_instances=tool_instances, | |||
| message_file_ids=message_file_ids | |||
| message_file_ids=message_file_ids, | |||
| trace_manager=trace_manager, | |||
| ) | |||
| scratchpad.observation = tool_invoke_response | |||
| scratchpad.agent_response = tool_invoke_response | |||
| @@ -237,8 +241,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): | |||
| # update prompt tool message | |||
| for prompt_tool in self._prompt_messages_tools: | |||
| self.update_prompt_message_tool( | |||
| tool_instances[prompt_tool.name], prompt_tool) | |||
| self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool) | |||
| iteration_step += 1 | |||
| @@ -275,14 +278,15 @@ class CotAgentRunner(BaseAgentRunner, ABC): | |||
| message=AssistantPromptMessage( | |||
| content=final_answer | |||
| ), | |||
| usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage( | |||
| ), | |||
| usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(), | |||
| system_fingerprint='' | |||
| )), PublishFrom.APPLICATION_MANAGER) | |||
| def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, | |||
| tool_instances: dict[str, Tool], | |||
| message_file_ids: list[str]) -> tuple[str, ToolInvokeMeta]: | |||
| message_file_ids: list[str], | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> tuple[str, ToolInvokeMeta]: | |||
| """ | |||
| handle invoke action | |||
| :param action: action | |||
| @@ -312,7 +316,8 @@ class CotAgentRunner(BaseAgentRunner, ABC): | |||
| tenant_id=self.tenant_id, | |||
| message=self.message, | |||
| invoke_from=self.application_generate_entity.invoke_from, | |||
| agent_tool_callback=self.agent_callback | |||
| agent_tool_callback=self.agent_callback, | |||
| trace_manager=trace_manager, | |||
| ) | |||
| # publish files | |||
| @@ -50,6 +50,9 @@ class FunctionCallAgentRunner(BaseAgentRunner): | |||
| } | |||
| final_answer = '' | |||
| # get tracing instance | |||
| trace_manager = app_generate_entity.trace_manager | |||
| def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): | |||
| if not final_llm_usage_dict['usage']: | |||
| final_llm_usage_dict['usage'] = usage | |||
| @@ -243,6 +246,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): | |||
| message=self.message, | |||
| invoke_from=self.application_generate_entity.invoke_from, | |||
| agent_tool_callback=self.agent_callback, | |||
| trace_manager=trace_manager, | |||
| ) | |||
| # publish files | |||
| for message_file_id, save_as in message_files: | |||
| @@ -183,6 +183,14 @@ class TextToSpeechEntity(BaseModel): | |||
| language: Optional[str] = None | |||
| class TracingConfigEntity(BaseModel): | |||
| """ | |||
| Tracing Config Entity. | |||
| """ | |||
| enabled: bool | |||
| tracing_provider: str | |||
| class FileExtraConfig(BaseModel): | |||
| """ | |||
| File Upload Entity. | |||
| @@ -199,7 +207,7 @@ class AppAdditionalFeatures(BaseModel): | |||
| more_like_this: bool = False | |||
| speech_to_text: bool = False | |||
| text_to_speech: Optional[TextToSpeechEntity] = None | |||
| trace_config: Optional[TracingConfigEntity] = None | |||
| class AppConfig(BaseModel): | |||
| """ | |||
| @@ -20,6 +20,7 @@ from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, | |||
| from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse | |||
| from core.file.message_file_parser import MessageFileParser | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from extensions.ext_database import db | |||
| from models.account import Account | |||
| from models.model import App, Conversation, EndUser, Message | |||
| @@ -29,13 +30,14 @@ logger = logging.getLogger(__name__) | |||
| class AdvancedChatAppGenerator(MessageBasedAppGenerator): | |||
| def generate(self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| args: dict, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True) \ | |||
| -> Union[dict, Generator[dict, None, None]]: | |||
| def generate( | |||
| self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| args: dict, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| ) -> Union[dict, Generator[dict, None, None]]: | |||
| """ | |||
| Generate App response. | |||
| @@ -84,6 +86,9 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): | |||
| workflow=workflow | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_id=app_model.id) | |||
| if invoke_from == InvokeFrom.DEBUGGER: | |||
| # always enable retriever resource in debugger mode | |||
| app_config.additional_features.show_retrieve_source = True | |||
| @@ -99,7 +104,8 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): | |||
| user_id=user.id, | |||
| stream=stream, | |||
| invoke_from=invoke_from, | |||
| extras=extras | |||
| extras=extras, | |||
| trace_manager=trace_manager | |||
| ) | |||
| return self._generate( | |||
| @@ -70,7 +70,8 @@ class AdvancedChatAppRunner(AppRunner): | |||
| app_record=app_record, | |||
| app_generate_entity=application_generate_entity, | |||
| inputs=inputs, | |||
| query=query | |||
| query=query, | |||
| message_id=message.id | |||
| ): | |||
| return | |||
| @@ -156,11 +157,14 @@ class AdvancedChatAppRunner(AppRunner): | |||
| # return workflow | |||
| return workflow | |||
| def handle_input_moderation(self, queue_manager: AppQueueManager, | |||
| app_record: App, | |||
| app_generate_entity: AdvancedChatAppGenerateEntity, | |||
| inputs: dict, | |||
| query: str) -> bool: | |||
| def handle_input_moderation( | |||
| self, queue_manager: AppQueueManager, | |||
| app_record: App, | |||
| app_generate_entity: AdvancedChatAppGenerateEntity, | |||
| inputs: dict, | |||
| query: str, | |||
| message_id: str | |||
| ) -> bool: | |||
| """ | |||
| Handle input moderation | |||
| :param queue_manager: application queue manager | |||
| @@ -168,6 +172,7 @@ class AdvancedChatAppRunner(AppRunner): | |||
| :param app_generate_entity: application generate entity | |||
| :param inputs: inputs | |||
| :param query: query | |||
| :param message_id: message id | |||
| :return: | |||
| """ | |||
| try: | |||
| @@ -178,6 +183,7 @@ class AdvancedChatAppRunner(AppRunner): | |||
| app_generate_entity=app_generate_entity, | |||
| inputs=inputs, | |||
| query=query, | |||
| message_id=message_id, | |||
| ) | |||
| except ModerationException as e: | |||
| self._stream_output( | |||
| @@ -42,6 +42,7 @@ from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage | |||
| from core.file.file_obj import FileVar | |||
| from core.model_runtime.entities.llm_entities import LLMUsage | |||
| from core.model_runtime.utils.encoders import jsonable_encoder | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from core.workflow.entities.node_entities import NodeType, SystemVariable | |||
| from core.workflow.nodes.answer.answer_node import AnswerNode | |||
| from core.workflow.nodes.answer.entities import TextGenerateRouteChunk, VarGenerateRouteChunk | |||
| @@ -69,13 +70,15 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| _workflow_system_variables: dict[SystemVariable, Any] | |||
| _iteration_nested_relations: dict[str, list[str]] | |||
| def __init__(self, application_generate_entity: AdvancedChatAppGenerateEntity, | |||
| workflow: Workflow, | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message, | |||
| user: Union[Account, EndUser], | |||
| stream: bool) -> None: | |||
| def __init__( | |||
| self, application_generate_entity: AdvancedChatAppGenerateEntity, | |||
| workflow: Workflow, | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message, | |||
| user: Union[Account, EndUser], | |||
| stream: bool | |||
| ) -> None: | |||
| """ | |||
| Initialize AdvancedChatAppGenerateTaskPipeline. | |||
| :param application_generate_entity: application generate entity | |||
| @@ -126,14 +129,16 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| self._application_generate_entity.query | |||
| ) | |||
| generator = self._process_stream_response() | |||
| generator = self._process_stream_response( | |||
| trace_manager=self._application_generate_entity.trace_manager | |||
| ) | |||
| if self._stream: | |||
| return self._to_stream_response(generator) | |||
| else: | |||
| return self._to_blocking_response(generator) | |||
| def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) \ | |||
| -> ChatbotAppBlockingResponse: | |||
| -> ChatbotAppBlockingResponse: | |||
| """ | |||
| Process blocking response. | |||
| :return: | |||
| @@ -164,7 +169,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| raise Exception('Queue listening stopped unexpectedly.') | |||
| def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) \ | |||
| -> Generator[ChatbotAppStreamResponse, None, None]: | |||
| -> Generator[ChatbotAppStreamResponse, None, None]: | |||
| """ | |||
| To stream response. | |||
| :return: | |||
| @@ -177,7 +182,9 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| stream_response=stream_response | |||
| ) | |||
| def _process_stream_response(self) -> Generator[StreamResponse, None, None]: | |||
| def _process_stream_response( | |||
| self, trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> Generator[StreamResponse, None, None]: | |||
| """ | |||
| Process stream response. | |||
| :return: | |||
| @@ -249,7 +256,9 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) | |||
| self._handle_iteration_operation(event) | |||
| elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): | |||
| workflow_run = self._handle_workflow_finished(event) | |||
| workflow_run = self._handle_workflow_finished( | |||
| event, conversation_id=self._conversation.id, trace_manager=trace_manager | |||
| ) | |||
| if workflow_run: | |||
| yield self._workflow_finish_to_stream_response( | |||
| task_id=self._application_generate_entity.task_id, | |||
| @@ -292,7 +301,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| continue | |||
| if not self._is_stream_out_support( | |||
| event=event | |||
| event=event | |||
| ): | |||
| continue | |||
| @@ -361,7 +370,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| id=self._message.id, | |||
| **extras | |||
| ) | |||
| def _get_stream_generate_routes(self) -> dict[str, ChatflowStreamGenerateRoute]: | |||
| """ | |||
| Get stream generate routes. | |||
| @@ -391,9 +400,9 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| ) | |||
| return stream_generate_routes | |||
| def _get_answer_start_at_node_ids(self, graph: dict, target_node_id: str) \ | |||
| -> list[str]: | |||
| -> list[str]: | |||
| """ | |||
| Get answer start at node id. | |||
| :param graph: graph | |||
| @@ -414,14 +423,14 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| target_node = next((node for node in nodes if node.get('id') == target_node_id), None) | |||
| if not target_node: | |||
| return [] | |||
| node_iteration_id = target_node.get('data', {}).get('iteration_id') | |||
| # get iteration start node id | |||
| for node in nodes: | |||
| if node.get('id') == node_iteration_id: | |||
| if node.get('data', {}).get('start_node_id') == target_node_id: | |||
| return [target_node_id] | |||
| return [] | |||
| start_node_ids = [] | |||
| @@ -457,7 +466,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| start_node_ids.extend(sub_start_node_ids) | |||
| return start_node_ids | |||
| def _get_iteration_nested_relations(self, graph: dict) -> dict[str, list[str]]: | |||
| """ | |||
| Get iteration nested relations. | |||
| @@ -466,18 +475,18 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| """ | |||
| nodes = graph.get('nodes') | |||
| iteration_ids = [node.get('id') for node in nodes | |||
| iteration_ids = [node.get('id') for node in nodes | |||
| if node.get('data', {}).get('type') in [ | |||
| NodeType.ITERATION.value, | |||
| NodeType.LOOP.value, | |||
| ]] | |||
| ]] | |||
| return { | |||
| iteration_id: [ | |||
| node.get('id') for node in nodes if node.get('data', {}).get('iteration_id') == iteration_id | |||
| ] for iteration_id in iteration_ids | |||
| } | |||
| def _generate_stream_outputs_when_node_started(self) -> Generator: | |||
| """ | |||
| Generate stream outputs. | |||
| @@ -485,8 +494,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| """ | |||
| if self._task_state.current_stream_generate_state: | |||
| route_chunks = self._task_state.current_stream_generate_state.generate_route[ | |||
| self._task_state.current_stream_generate_state.current_route_position: | |||
| ] | |||
| self._task_state.current_stream_generate_state.current_route_position: | |||
| ] | |||
| for route_chunk in route_chunks: | |||
| if route_chunk.type == 'text': | |||
| @@ -506,7 +515,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| # all route chunks are generated | |||
| if self._task_state.current_stream_generate_state.current_route_position == len( | |||
| self._task_state.current_stream_generate_state.generate_route): | |||
| self._task_state.current_stream_generate_state.generate_route | |||
| ): | |||
| self._task_state.current_stream_generate_state = None | |||
| def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: | |||
| @@ -519,7 +529,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| route_chunks = self._task_state.current_stream_generate_state.generate_route[ | |||
| self._task_state.current_stream_generate_state.current_route_position:] | |||
| for route_chunk in route_chunks: | |||
| if route_chunk.type == 'text': | |||
| route_chunk = cast(TextGenerateRouteChunk, route_chunk) | |||
| @@ -551,7 +561,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| value = iteration_state.current_index | |||
| elif value_selector[1] == 'item': | |||
| value = iterator_selector[iteration_state.current_index] if iteration_state.current_index < len( | |||
| iterator_selector) else None | |||
| iterator_selector | |||
| ) else None | |||
| else: | |||
| # check chunk node id is before current node id or equal to current node id | |||
| if route_chunk_node_id not in self._task_state.ran_node_execution_infos: | |||
| @@ -562,14 +573,15 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| # get route chunk node execution info | |||
| route_chunk_node_execution_info = self._task_state.ran_node_execution_infos[route_chunk_node_id] | |||
| if (route_chunk_node_execution_info.node_type == NodeType.LLM | |||
| and latest_node_execution_info.node_type == NodeType.LLM): | |||
| and latest_node_execution_info.node_type == NodeType.LLM): | |||
| # only LLM support chunk stream output | |||
| self._task_state.current_stream_generate_state.current_route_position += 1 | |||
| continue | |||
| # get route chunk node execution | |||
| route_chunk_node_execution = db.session.query(WorkflowNodeExecution).filter( | |||
| WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id).first() | |||
| WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id | |||
| ).first() | |||
| outputs = route_chunk_node_execution.outputs_dict | |||
| @@ -631,7 +643,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc | |||
| # all route chunks are generated | |||
| if self._task_state.current_stream_generate_state.current_route_position == len( | |||
| self._task_state.current_stream_generate_state.generate_route): | |||
| self._task_state.current_stream_generate_state.generate_route | |||
| ): | |||
| self._task_state.current_stream_generate_state = None | |||
| def _is_stream_out_support(self, event: QueueTextChunkEvent) -> bool: | |||
| @@ -19,6 +19,7 @@ from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueMa | |||
| from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, InvokeFrom | |||
| from core.file.message_file_parser import MessageFileParser | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from extensions.ext_database import db | |||
| from models.account import Account | |||
| from models.model import App, EndUser | |||
| @@ -108,6 +109,9 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): | |||
| override_config_dict=override_model_config_dict | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_model.id) | |||
| # init application generate entity | |||
| application_generate_entity = AgentChatAppGenerateEntity( | |||
| task_id=str(uuid.uuid4()), | |||
| @@ -121,7 +125,8 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): | |||
| stream=stream, | |||
| invoke_from=invoke_from, | |||
| extras=extras, | |||
| call_depth=0 | |||
| call_depth=0, | |||
| trace_manager=trace_manager | |||
| ) | |||
| # init generate records | |||
| @@ -158,7 +163,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): | |||
| conversation=conversation, | |||
| message=message, | |||
| user=user, | |||
| stream=stream | |||
| stream=stream, | |||
| ) | |||
| return AgentChatAppGenerateResponseConverter.convert( | |||
| @@ -166,11 +171,13 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): | |||
| invoke_from=invoke_from | |||
| ) | |||
| def _generate_worker(self, flask_app: Flask, | |||
| application_generate_entity: AgentChatAppGenerateEntity, | |||
| queue_manager: AppQueueManager, | |||
| conversation_id: str, | |||
| message_id: str) -> None: | |||
| def _generate_worker( | |||
| self, flask_app: Flask, | |||
| application_generate_entity: AgentChatAppGenerateEntity, | |||
| queue_manager: AppQueueManager, | |||
| conversation_id: str, | |||
| message_id: str, | |||
| ) -> None: | |||
| """ | |||
| Generate worker in a new thread. | |||
| :param flask_app: Flask app | |||
| @@ -192,7 +199,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator): | |||
| application_generate_entity=application_generate_entity, | |||
| queue_manager=queue_manager, | |||
| conversation=conversation, | |||
| message=message | |||
| message=message, | |||
| ) | |||
| except GenerateTaskStoppedException: | |||
| pass | |||
| @@ -28,10 +28,13 @@ class AgentChatAppRunner(AppRunner): | |||
| """ | |||
| Agent Application Runner | |||
| """ | |||
| def run(self, application_generate_entity: AgentChatAppGenerateEntity, | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message) -> None: | |||
| def run( | |||
| self, application_generate_entity: AgentChatAppGenerateEntity, | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message, | |||
| ) -> None: | |||
| """ | |||
| Run assistant application | |||
| :param application_generate_entity: application generate entity | |||
| @@ -100,6 +103,7 @@ class AgentChatAppRunner(AppRunner): | |||
| app_generate_entity=application_generate_entity, | |||
| inputs=inputs, | |||
| query=query, | |||
| message_id=message.id | |||
| ) | |||
| except ModerationException as e: | |||
| self.direct_output( | |||
| @@ -219,7 +223,7 @@ class AgentChatAppRunner(AppRunner): | |||
| runner_cls = FunctionCallAgentRunner | |||
| else: | |||
| raise ValueError(f"Invalid agent strategy: {agent_entity.strategy}") | |||
| runner = runner_cls( | |||
| tenant_id=app_config.tenant_id, | |||
| application_generate_entity=application_generate_entity, | |||
| @@ -338,11 +338,14 @@ class AppRunner: | |||
| ), PublishFrom.APPLICATION_MANAGER | |||
| ) | |||
| def moderation_for_inputs(self, app_id: str, | |||
| tenant_id: str, | |||
| app_generate_entity: AppGenerateEntity, | |||
| inputs: dict, | |||
| query: str) -> tuple[bool, dict, str]: | |||
| def moderation_for_inputs( | |||
| self, app_id: str, | |||
| tenant_id: str, | |||
| app_generate_entity: AppGenerateEntity, | |||
| inputs: dict, | |||
| query: str, | |||
| message_id: str, | |||
| ) -> tuple[bool, dict, str]: | |||
| """ | |||
| Process sensitive_word_avoidance. | |||
| :param app_id: app id | |||
| @@ -350,6 +353,7 @@ class AppRunner: | |||
| :param app_generate_entity: app generate entity | |||
| :param inputs: inputs | |||
| :param query: query | |||
| :param message_id: message id | |||
| :return: | |||
| """ | |||
| moderation_feature = InputModeration() | |||
| @@ -358,7 +362,9 @@ class AppRunner: | |||
| tenant_id=tenant_id, | |||
| app_config=app_generate_entity.app_config, | |||
| inputs=inputs, | |||
| query=query if query else '' | |||
| query=query if query else '', | |||
| message_id=message_id, | |||
| trace_manager=app_generate_entity.trace_manager | |||
| ) | |||
| def check_hosting_moderation(self, application_generate_entity: EasyUIBasedAppGenerateEntity, | |||
| @@ -19,6 +19,7 @@ from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueMa | |||
| from core.app.entities.app_invoke_entities import ChatAppGenerateEntity, InvokeFrom | |||
| from core.file.message_file_parser import MessageFileParser | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from extensions.ext_database import db | |||
| from models.account import Account | |||
| from models.model import App, EndUser | |||
| @@ -27,12 +28,13 @@ logger = logging.getLogger(__name__) | |||
| class ChatAppGenerator(MessageBasedAppGenerator): | |||
| def generate(self, app_model: App, | |||
| user: Union[Account, EndUser], | |||
| args: Any, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True) \ | |||
| -> Union[dict, Generator[dict, None, None]]: | |||
| def generate( | |||
| self, app_model: App, | |||
| user: Union[Account, EndUser], | |||
| args: Any, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| ) -> Union[dict, Generator[dict, None, None]]: | |||
| """ | |||
| Generate App response. | |||
| @@ -105,6 +107,9 @@ class ChatAppGenerator(MessageBasedAppGenerator): | |||
| override_config_dict=override_model_config_dict | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_model.id) | |||
| # init application generate entity | |||
| application_generate_entity = ChatAppGenerateEntity( | |||
| task_id=str(uuid.uuid4()), | |||
| @@ -117,7 +122,8 @@ class ChatAppGenerator(MessageBasedAppGenerator): | |||
| user_id=user.id, | |||
| stream=stream, | |||
| invoke_from=invoke_from, | |||
| extras=extras | |||
| extras=extras, | |||
| trace_manager=trace_manager | |||
| ) | |||
| # init generate records | |||
| @@ -154,7 +160,7 @@ class ChatAppGenerator(MessageBasedAppGenerator): | |||
| conversation=conversation, | |||
| message=message, | |||
| user=user, | |||
| stream=stream | |||
| stream=stream, | |||
| ) | |||
| return ChatAppGenerateResponseConverter.convert( | |||
| @@ -96,6 +96,7 @@ class ChatAppRunner(AppRunner): | |||
| app_generate_entity=application_generate_entity, | |||
| inputs=inputs, | |||
| query=query, | |||
| message_id=message.id | |||
| ) | |||
| except ModerationException as e: | |||
| self.direct_output( | |||
| @@ -154,7 +155,7 @@ class ChatAppRunner(AppRunner): | |||
| application_generate_entity.invoke_from | |||
| ) | |||
| dataset_retrieval = DatasetRetrieval() | |||
| dataset_retrieval = DatasetRetrieval(application_generate_entity) | |||
| context = dataset_retrieval.retrieve( | |||
| app_id=app_record.id, | |||
| user_id=application_generate_entity.user_id, | |||
| @@ -165,7 +166,8 @@ class ChatAppRunner(AppRunner): | |||
| invoke_from=application_generate_entity.invoke_from, | |||
| show_retrieve_source=app_config.additional_features.show_retrieve_source, | |||
| hit_callback=hit_callback, | |||
| memory=memory | |||
| memory=memory, | |||
| message_id=message.id, | |||
| ) | |||
| # reorganize all inputs and template to prompt messages | |||
| @@ -19,6 +19,7 @@ from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueMa | |||
| from core.app.entities.app_invoke_entities import CompletionAppGenerateEntity, InvokeFrom | |||
| from core.file.message_file_parser import MessageFileParser | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from extensions.ext_database import db | |||
| from models.account import Account | |||
| from models.model import App, EndUser, Message | |||
| @@ -94,6 +95,9 @@ class CompletionAppGenerator(MessageBasedAppGenerator): | |||
| override_config_dict=override_model_config_dict | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_model.id) | |||
| # init application generate entity | |||
| application_generate_entity = CompletionAppGenerateEntity( | |||
| task_id=str(uuid.uuid4()), | |||
| @@ -105,7 +109,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator): | |||
| user_id=user.id, | |||
| stream=stream, | |||
| invoke_from=invoke_from, | |||
| extras=extras | |||
| extras=extras, | |||
| trace_manager=trace_manager | |||
| ) | |||
| # init generate records | |||
| @@ -141,7 +146,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator): | |||
| conversation=conversation, | |||
| message=message, | |||
| user=user, | |||
| stream=stream | |||
| stream=stream, | |||
| ) | |||
| return CompletionAppGenerateResponseConverter.convert( | |||
| @@ -158,7 +163,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator): | |||
| :param flask_app: Flask app | |||
| :param application_generate_entity: application generate entity | |||
| :param queue_manager: queue manager | |||
| :param conversation_id: conversation ID | |||
| :param message_id: message ID | |||
| :return: | |||
| """ | |||
| @@ -300,7 +304,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator): | |||
| conversation=conversation, | |||
| message=message, | |||
| user=user, | |||
| stream=stream | |||
| stream=stream, | |||
| ) | |||
| return CompletionAppGenerateResponseConverter.convert( | |||
| @@ -77,6 +77,7 @@ class CompletionAppRunner(AppRunner): | |||
| app_generate_entity=application_generate_entity, | |||
| inputs=inputs, | |||
| query=query, | |||
| message_id=message.id | |||
| ) | |||
| except ModerationException as e: | |||
| self.direct_output( | |||
| @@ -114,7 +115,7 @@ class CompletionAppRunner(AppRunner): | |||
| if dataset_config and dataset_config.retrieve_config.query_variable: | |||
| query = inputs.get(dataset_config.retrieve_config.query_variable, "") | |||
| dataset_retrieval = DatasetRetrieval() | |||
| dataset_retrieval = DatasetRetrieval(application_generate_entity) | |||
| context = dataset_retrieval.retrieve( | |||
| app_id=app_record.id, | |||
| user_id=application_generate_entity.user_id, | |||
| @@ -124,7 +125,8 @@ class CompletionAppRunner(AppRunner): | |||
| query=query, | |||
| invoke_from=application_generate_entity.invoke_from, | |||
| show_retrieve_source=app_config.additional_features.show_retrieve_source, | |||
| hit_callback=hit_callback | |||
| hit_callback=hit_callback, | |||
| message_id=message.id | |||
| ) | |||
| # reorganize all inputs and template to prompt messages | |||
| @@ -35,22 +35,23 @@ logger = logging.getLogger(__name__) | |||
| class MessageBasedAppGenerator(BaseAppGenerator): | |||
| def _handle_response(self, application_generate_entity: Union[ | |||
| ChatAppGenerateEntity, | |||
| CompletionAppGenerateEntity, | |||
| AgentChatAppGenerateEntity, | |||
| AdvancedChatAppGenerateEntity | |||
| ], | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message, | |||
| user: Union[Account, EndUser], | |||
| stream: bool = False) \ | |||
| -> Union[ | |||
| ChatbotAppBlockingResponse, | |||
| CompletionAppBlockingResponse, | |||
| Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] | |||
| ]: | |||
| def _handle_response( | |||
| self, application_generate_entity: Union[ | |||
| ChatAppGenerateEntity, | |||
| CompletionAppGenerateEntity, | |||
| AgentChatAppGenerateEntity, | |||
| AdvancedChatAppGenerateEntity | |||
| ], | |||
| queue_manager: AppQueueManager, | |||
| conversation: Conversation, | |||
| message: Message, | |||
| user: Union[Account, EndUser], | |||
| stream: bool = False, | |||
| ) -> Union[ | |||
| ChatbotAppBlockingResponse, | |||
| CompletionAppBlockingResponse, | |||
| Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] | |||
| ]: | |||
| """ | |||
| Handle response. | |||
| :param application_generate_entity: application generate entity | |||
| @@ -20,6 +20,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerat | |||
| from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse | |||
| from core.file.message_file_parser import MessageFileParser | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from extensions.ext_database import db | |||
| from models.account import Account | |||
| from models.model import App, EndUser | |||
| @@ -29,14 +30,15 @@ logger = logging.getLogger(__name__) | |||
| class WorkflowAppGenerator(BaseAppGenerator): | |||
| def generate(self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| args: dict, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| call_depth: int = 0) \ | |||
| -> Union[dict, Generator[dict, None, None]]: | |||
| def generate( | |||
| self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| args: dict, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| call_depth: int = 0, | |||
| ) -> Union[dict, Generator[dict, None, None]]: | |||
| """ | |||
| Generate App response. | |||
| @@ -46,6 +48,7 @@ class WorkflowAppGenerator(BaseAppGenerator): | |||
| :param args: request args | |||
| :param invoke_from: invoke from source | |||
| :param stream: is stream | |||
| :param call_depth: call depth | |||
| """ | |||
| inputs = args['inputs'] | |||
| @@ -68,6 +71,9 @@ class WorkflowAppGenerator(BaseAppGenerator): | |||
| workflow=workflow | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_model.id) | |||
| # init application generate entity | |||
| application_generate_entity = WorkflowAppGenerateEntity( | |||
| task_id=str(uuid.uuid4()), | |||
| @@ -77,7 +83,8 @@ class WorkflowAppGenerator(BaseAppGenerator): | |||
| user_id=user.id, | |||
| stream=stream, | |||
| invoke_from=invoke_from, | |||
| call_depth=call_depth | |||
| call_depth=call_depth, | |||
| trace_manager=trace_manager | |||
| ) | |||
| return self._generate( | |||
| @@ -87,17 +94,18 @@ class WorkflowAppGenerator(BaseAppGenerator): | |||
| application_generate_entity=application_generate_entity, | |||
| invoke_from=invoke_from, | |||
| stream=stream, | |||
| call_depth=call_depth | |||
| call_depth=call_depth, | |||
| ) | |||
| def _generate(self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| application_generate_entity: WorkflowAppGenerateEntity, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| call_depth: int = 0) \ | |||
| -> Union[dict, Generator[dict, None, None]]: | |||
| def _generate( | |||
| self, app_model: App, | |||
| workflow: Workflow, | |||
| user: Union[Account, EndUser], | |||
| application_generate_entity: WorkflowAppGenerateEntity, | |||
| invoke_from: InvokeFrom, | |||
| stream: bool = True, | |||
| call_depth: int = 0 | |||
| ) -> Union[dict, Generator[dict, None, None]]: | |||
| """ | |||
| Generate App response. | |||
| @@ -131,7 +139,7 @@ class WorkflowAppGenerator(BaseAppGenerator): | |||
| workflow=workflow, | |||
| queue_manager=queue_manager, | |||
| user=user, | |||
| stream=stream | |||
| stream=stream, | |||
| ) | |||
| return WorkflowAppGenerateResponseConverter.convert( | |||
| @@ -1,6 +1,6 @@ | |||
| import logging | |||
| from collections.abc import Generator | |||
| from typing import Any, Union | |||
| from typing import Any, Optional, Union | |||
| from core.app.apps.base_app_queue_manager import AppQueueManager | |||
| from core.app.entities.app_invoke_entities import ( | |||
| @@ -36,6 +36,7 @@ from core.app.entities.task_entities import ( | |||
| ) | |||
| from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline | |||
| from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from core.workflow.entities.node_entities import NodeType, SystemVariable | |||
| from core.workflow.nodes.end.end_node import EndNode | |||
| from extensions.ext_database import db | |||
| @@ -104,7 +105,9 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa | |||
| db.session.refresh(self._user) | |||
| db.session.close() | |||
| generator = self._process_stream_response() | |||
| generator = self._process_stream_response( | |||
| trace_manager=self._application_generate_entity.trace_manager | |||
| ) | |||
| if self._stream: | |||
| return self._to_stream_response(generator) | |||
| else: | |||
| @@ -158,7 +161,10 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa | |||
| stream_response=stream_response | |||
| ) | |||
| def _process_stream_response(self) -> Generator[StreamResponse, None, None]: | |||
| def _process_stream_response( | |||
| self, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> Generator[StreamResponse, None, None]: | |||
| """ | |||
| Process stream response. | |||
| :return: | |||
| @@ -215,7 +221,9 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa | |||
| yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) | |||
| self._handle_iteration_operation(event) | |||
| elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): | |||
| workflow_run = self._handle_workflow_finished(event) | |||
| workflow_run = self._handle_workflow_finished( | |||
| event, trace_manager=trace_manager | |||
| ) | |||
| # save workflow app log | |||
| self._save_workflow_app_log(workflow_run) | |||
| @@ -7,6 +7,7 @@ from core.app.app_config.entities import AppConfig, EasyUIBasedAppConfig, Workfl | |||
| from core.entities.provider_configuration import ProviderModelBundle | |||
| from core.file.file_obj import FileVar | |||
| from core.model_runtime.entities.model_entities import AIModelEntity | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| class InvokeFrom(Enum): | |||
| @@ -89,6 +90,12 @@ class AppGenerateEntity(BaseModel): | |||
| # extra parameters, like: auto_generate_conversation_name | |||
| extras: dict[str, Any] = {} | |||
| # tracing instance | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| class Config: | |||
| arbitrary_types_allowed = True | |||
| class EasyUIBasedAppGenerateEntity(AppGenerateEntity): | |||
| """ | |||
| @@ -44,6 +44,7 @@ from core.model_runtime.entities.message_entities import ( | |||
| ) | |||
| from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel | |||
| from core.model_runtime.utils.encoders import jsonable_encoder | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| from core.prompt.utils.prompt_message_util import PromptMessageUtil | |||
| from core.prompt.utils.prompt_template_parser import PromptTemplateParser | |||
| from events.message_event import message_was_created | |||
| @@ -100,7 +101,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| self._conversation_name_generate_thread = None | |||
| def process(self) -> Union[ | |||
| def process( | |||
| self, | |||
| ) -> Union[ | |||
| ChatbotAppBlockingResponse, | |||
| CompletionAppBlockingResponse, | |||
| Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] | |||
| @@ -120,7 +123,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| self._application_generate_entity.query | |||
| ) | |||
| generator = self._process_stream_response() | |||
| generator = self._process_stream_response( | |||
| trace_manager=self._application_generate_entity.trace_manager | |||
| ) | |||
| if self._stream: | |||
| return self._to_stream_response(generator) | |||
| else: | |||
| @@ -197,7 +202,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| stream_response=stream_response | |||
| ) | |||
| def _process_stream_response(self) -> Generator[StreamResponse, None, None]: | |||
| def _process_stream_response( | |||
| self, trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> Generator[StreamResponse, None, None]: | |||
| """ | |||
| Process stream response. | |||
| :return: | |||
| @@ -224,7 +231,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| yield self._message_replace_to_stream_response(answer=output_moderation_answer) | |||
| # Save message | |||
| self._save_message() | |||
| self._save_message(trace_manager) | |||
| yield self._message_end_to_stream_response() | |||
| elif isinstance(event, QueueRetrieverResourcesEvent): | |||
| @@ -269,7 +276,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| if self._conversation_name_generate_thread: | |||
| self._conversation_name_generate_thread.join() | |||
| def _save_message(self) -> None: | |||
| def _save_message( | |||
| self, trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> None: | |||
| """ | |||
| Save message. | |||
| :return: | |||
| @@ -300,6 +309,15 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan | |||
| db.session.commit() | |||
| if trace_manager: | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.MESSAGE_TRACE, | |||
| conversation_id=self._conversation.id, | |||
| message_id=self._message.id | |||
| ) | |||
| ) | |||
| message_was_created.send( | |||
| self._message, | |||
| application_generate_entity=self._application_generate_entity, | |||
| @@ -22,6 +22,7 @@ from core.app.entities.task_entities import ( | |||
| from core.app.task_pipeline.workflow_iteration_cycle_manage import WorkflowIterationCycleManage | |||
| from core.file.file_obj import FileVar | |||
| from core.model_runtime.utils.encoders import jsonable_encoder | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| from core.tools.tool_manager import ToolManager | |||
| from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType | |||
| from core.workflow.nodes.tool.entities import ToolNodeData | |||
| @@ -94,11 +95,15 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| return workflow_run | |||
| def _workflow_run_success(self, workflow_run: WorkflowRun, | |||
| start_at: float, | |||
| total_tokens: int, | |||
| total_steps: int, | |||
| outputs: Optional[str] = None) -> WorkflowRun: | |||
| def _workflow_run_success( | |||
| self, workflow_run: WorkflowRun, | |||
| start_at: float, | |||
| total_tokens: int, | |||
| total_steps: int, | |||
| outputs: Optional[str] = None, | |||
| conversation_id: Optional[str] = None, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> WorkflowRun: | |||
| """ | |||
| Workflow run success | |||
| :param workflow_run: workflow run | |||
| @@ -106,6 +111,7 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| :param total_tokens: total tokens | |||
| :param total_steps: total steps | |||
| :param outputs: outputs | |||
| :param conversation_id: conversation id | |||
| :return: | |||
| """ | |||
| workflow_run.status = WorkflowRunStatus.SUCCEEDED.value | |||
| @@ -119,14 +125,27 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| db.session.refresh(workflow_run) | |||
| db.session.close() | |||
| if trace_manager: | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.WORKFLOW_TRACE, | |||
| workflow_run=workflow_run, | |||
| conversation_id=conversation_id, | |||
| ) | |||
| ) | |||
| return workflow_run | |||
| def _workflow_run_failed(self, workflow_run: WorkflowRun, | |||
| start_at: float, | |||
| total_tokens: int, | |||
| total_steps: int, | |||
| status: WorkflowRunStatus, | |||
| error: str) -> WorkflowRun: | |||
| def _workflow_run_failed( | |||
| self, workflow_run: WorkflowRun, | |||
| start_at: float, | |||
| total_tokens: int, | |||
| total_steps: int, | |||
| status: WorkflowRunStatus, | |||
| error: str, | |||
| conversation_id: Optional[str] = None, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> WorkflowRun: | |||
| """ | |||
| Workflow run failed | |||
| :param workflow_run: workflow run | |||
| @@ -148,6 +167,14 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| db.session.refresh(workflow_run) | |||
| db.session.close() | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.WORKFLOW_TRACE, | |||
| workflow_run=workflow_run, | |||
| conversation_id=conversation_id, | |||
| ) | |||
| ) | |||
| return workflow_run | |||
| def _init_node_execution_from_workflow_run(self, workflow_run: WorkflowRun, | |||
| @@ -180,7 +207,8 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| title=node_title, | |||
| status=WorkflowNodeExecutionStatus.RUNNING.value, | |||
| created_by_role=workflow_run.created_by_role, | |||
| created_by=workflow_run.created_by | |||
| created_by=workflow_run.created_by, | |||
| created_at=datetime.now(timezone.utc).replace(tzinfo=None) | |||
| ) | |||
| db.session.add(workflow_node_execution) | |||
| @@ -440,9 +468,9 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| current_node_execution = self._task_state.ran_node_execution_infos[event.node_id] | |||
| workflow_node_execution = db.session.query(WorkflowNodeExecution).filter( | |||
| WorkflowNodeExecution.id == current_node_execution.workflow_node_execution_id).first() | |||
| execution_metadata = event.execution_metadata if isinstance(event, QueueNodeSucceededEvent) else None | |||
| if self._iteration_state and self._iteration_state.current_iterations: | |||
| if not execution_metadata: | |||
| execution_metadata = {} | |||
| @@ -470,7 +498,7 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| if execution_metadata and execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS): | |||
| self._task_state.total_tokens += ( | |||
| int(execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS))) | |||
| if self._iteration_state: | |||
| for iteration_node_id in self._iteration_state.current_iterations: | |||
| data = self._iteration_state.current_iterations[iteration_node_id] | |||
| @@ -496,13 +524,18 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| return workflow_node_execution | |||
| def _handle_workflow_finished(self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent) \ | |||
| -> Optional[WorkflowRun]: | |||
| def _handle_workflow_finished( | |||
| self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent, | |||
| conversation_id: Optional[str] = None, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> Optional[WorkflowRun]: | |||
| workflow_run = db.session.query(WorkflowRun).filter( | |||
| WorkflowRun.id == self._task_state.workflow_run_id).first() | |||
| if not workflow_run: | |||
| return None | |||
| if conversation_id is None: | |||
| conversation_id = self._application_generate_entity.inputs.get('sys.conversation_id') | |||
| if isinstance(event, QueueStopEvent): | |||
| workflow_run = self._workflow_run_failed( | |||
| workflow_run=workflow_run, | |||
| @@ -510,7 +543,9 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| total_tokens=self._task_state.total_tokens, | |||
| total_steps=self._task_state.total_steps, | |||
| status=WorkflowRunStatus.STOPPED, | |||
| error='Workflow stopped.' | |||
| error='Workflow stopped.', | |||
| conversation_id=conversation_id, | |||
| trace_manager=trace_manager | |||
| ) | |||
| latest_node_execution_info = self._task_state.latest_node_execution_info | |||
| @@ -531,7 +566,9 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| total_tokens=self._task_state.total_tokens, | |||
| total_steps=self._task_state.total_steps, | |||
| status=WorkflowRunStatus.FAILED, | |||
| error=event.error | |||
| error=event.error, | |||
| conversation_id=conversation_id, | |||
| trace_manager=trace_manager | |||
| ) | |||
| else: | |||
| if self._task_state.latest_node_execution_info: | |||
| @@ -546,7 +583,9 @@ class WorkflowCycleManage(WorkflowIterationCycleManage): | |||
| start_at=self._task_state.start_at, | |||
| total_tokens=self._task_state.total_tokens, | |||
| total_steps=self._task_state.total_steps, | |||
| outputs=outputs | |||
| outputs=outputs, | |||
| conversation_id=conversation_id, | |||
| trace_manager=trace_manager | |||
| ) | |||
| self._task_state.workflow_run_id = workflow_run.id | |||
| @@ -1,6 +1,7 @@ | |||
| import json | |||
| import time | |||
| from collections.abc import Generator | |||
| from datetime import datetime, timezone | |||
| from typing import Optional, Union | |||
| from core.app.entities.queue_entities import ( | |||
| @@ -131,7 +132,8 @@ class WorkflowIterationCycleManage(WorkflowCycleStateManager): | |||
| 'started_run_index': node_run_index + 1, | |||
| 'current_index': 0, | |||
| 'steps_boundary': [], | |||
| }) | |||
| }), | |||
| created_at=datetime.now(timezone.utc).replace(tzinfo=None) | |||
| ) | |||
| db.session.add(workflow_node_execution) | |||
| @@ -3,6 +3,8 @@ from typing import Any, Optional, TextIO, Union | |||
| from pydantic import BaseModel | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| _TEXT_COLOR_MAPPING = { | |||
| "blue": "36;1", | |||
| "yellow": "33;1", | |||
| @@ -51,6 +53,9 @@ class DifyAgentCallbackHandler(BaseModel): | |||
| tool_name: str, | |||
| tool_inputs: dict[str, Any], | |||
| tool_outputs: str, | |||
| message_id: Optional[str] = None, | |||
| timer: Optional[Any] = None, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> None: | |||
| """If not the final action, print out observation.""" | |||
| print_text("\n[on_tool_end]\n", color=self.color) | |||
| @@ -59,6 +64,18 @@ class DifyAgentCallbackHandler(BaseModel): | |||
| print_text("Outputs: " + str(tool_outputs)[:1000] + "\n", color=self.color) | |||
| print_text("\n") | |||
| if trace_manager: | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.TOOL_TRACE, | |||
| message_id=message_id, | |||
| tool_name=tool_name, | |||
| tool_inputs=tool_inputs, | |||
| tool_outputs=tool_outputs, | |||
| timer=timer, | |||
| ) | |||
| ) | |||
| def on_tool_error( | |||
| self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any | |||
| ) -> None: | |||
| @@ -1,5 +1,7 @@ | |||
| import json | |||
| import logging | |||
| import re | |||
| from typing import Optional | |||
| from core.llm_generator.output_parser.errors import OutputParserException | |||
| from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser | |||
| @@ -9,12 +11,16 @@ from core.model_manager import ModelManager | |||
| from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage | |||
| from core.model_runtime.entities.model_entities import ModelType | |||
| from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| from core.ops.utils import measure_time | |||
| from core.prompt.utils.prompt_template_parser import PromptTemplateParser | |||
| class LLMGenerator: | |||
| @classmethod | |||
| def generate_conversation_name(cls, tenant_id: str, query): | |||
| def generate_conversation_name( | |||
| cls, tenant_id: str, query, conversation_id: Optional[str] = None, app_id: Optional[str] = None | |||
| ): | |||
| prompt = CONVERSATION_TITLE_PROMPT | |||
| if len(query) > 2000: | |||
| @@ -29,25 +35,39 @@ class LLMGenerator: | |||
| tenant_id=tenant_id, | |||
| model_type=ModelType.LLM, | |||
| ) | |||
| prompts = [UserPromptMessage(content=prompt)] | |||
| response = model_instance.invoke_llm( | |||
| prompt_messages=prompts, | |||
| model_parameters={ | |||
| "max_tokens": 100, | |||
| "temperature": 1 | |||
| }, | |||
| stream=False | |||
| ) | |||
| answer = response.message.content | |||
| result_dict = json.loads(answer) | |||
| with measure_time() as timer: | |||
| response = model_instance.invoke_llm( | |||
| prompt_messages=prompts, | |||
| model_parameters={ | |||
| "max_tokens": 100, | |||
| "temperature": 1 | |||
| }, | |||
| stream=False | |||
| ) | |||
| answer = response.message.content | |||
| cleaned_answer = re.sub(r'^.*(\{.*\}).*$', r'\1', answer, flags=re.DOTALL) | |||
| result_dict = json.loads(cleaned_answer) | |||
| answer = result_dict['Your Output'] | |||
| name = answer.strip() | |||
| if len(name) > 75: | |||
| name = name[:75] + '...' | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_id=app_id) | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.GENERATE_NAME_TRACE, | |||
| conversation_id=conversation_id, | |||
| generate_conversation_name=name, | |||
| inputs=prompt, | |||
| timer=timer, | |||
| tenant_id=tenant_id, | |||
| ) | |||
| ) | |||
| return name | |||
| @classmethod | |||
| @@ -1,4 +1,20 @@ | |||
| from core.model_runtime.errors.invoke import InvokeError | |||
| from dashscope.common.error import ( | |||
| AuthenticationError, | |||
| InvalidParameter, | |||
| RequestFailure, | |||
| ServiceUnavailableError, | |||
| UnsupportedHTTPMethod, | |||
| UnsupportedModel, | |||
| ) | |||
| from core.model_runtime.errors.invoke import ( | |||
| InvokeAuthorizationError, | |||
| InvokeBadRequestError, | |||
| InvokeConnectionError, | |||
| InvokeError, | |||
| InvokeRateLimitError, | |||
| InvokeServerUnavailableError, | |||
| ) | |||
| class _CommonTongyi: | |||
| @@ -20,4 +36,20 @@ class _CommonTongyi: | |||
| :return: Invoke error mapping | |||
| """ | |||
| pass | |||
| return { | |||
| InvokeConnectionError: [ | |||
| RequestFailure, | |||
| ], | |||
| InvokeServerUnavailableError: [ | |||
| ServiceUnavailableError, | |||
| ], | |||
| InvokeRateLimitError: [], | |||
| InvokeAuthorizationError: [ | |||
| AuthenticationError, | |||
| ], | |||
| InvokeBadRequestError: [ | |||
| InvalidParameter, | |||
| UnsupportedModel, | |||
| UnsupportedHTTPMethod, | |||
| ] | |||
| } | |||
| @@ -1,18 +1,25 @@ | |||
| import logging | |||
| from typing import Optional | |||
| from core.app.app_config.entities import AppConfig | |||
| from core.moderation.base import ModerationAction, ModerationException | |||
| from core.moderation.factory import ModerationFactory | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| from core.ops.utils import measure_time | |||
| logger = logging.getLogger(__name__) | |||
| class InputModeration: | |||
| def check(self, app_id: str, | |||
| tenant_id: str, | |||
| app_config: AppConfig, | |||
| inputs: dict, | |||
| query: str) -> tuple[bool, dict, str]: | |||
| def check( | |||
| self, app_id: str, | |||
| tenant_id: str, | |||
| app_config: AppConfig, | |||
| inputs: dict, | |||
| query: str, | |||
| message_id: str, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> tuple[bool, dict, str]: | |||
| """ | |||
| Process sensitive_word_avoidance. | |||
| :param app_id: app id | |||
| @@ -20,6 +27,8 @@ class InputModeration: | |||
| :param app_config: app config | |||
| :param inputs: inputs | |||
| :param query: query | |||
| :param message_id: message id | |||
| :param trace_manager: trace manager | |||
| :return: | |||
| """ | |||
| if not app_config.sensitive_word_avoidance: | |||
| @@ -35,8 +44,20 @@ class InputModeration: | |||
| config=sensitive_word_avoidance_config.config | |||
| ) | |||
| moderation_result = moderation_factory.moderation_for_inputs(inputs, query) | |||
| with measure_time() as timer: | |||
| moderation_result = moderation_factory.moderation_for_inputs(inputs, query) | |||
| if trace_manager: | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.MODERATION_TRACE, | |||
| message_id=message_id, | |||
| moderation_result=moderation_result, | |||
| inputs=inputs, | |||
| timer=timer | |||
| ) | |||
| ) | |||
| if not moderation_result.flagged: | |||
| return False, inputs, query | |||
| @@ -0,0 +1,26 @@ | |||
| from abc import ABC, abstractmethod | |||
| from core.ops.entities.config_entity import BaseTracingConfig | |||
| from core.ops.entities.trace_entity import BaseTraceInfo | |||
| class BaseTraceInstance(ABC): | |||
| """ | |||
| Base trace instance for ops trace services | |||
| """ | |||
| @abstractmethod | |||
| def __init__(self, trace_config: BaseTracingConfig): | |||
| """ | |||
| Abstract initializer for the trace instance. | |||
| Distribute trace tasks by matching entities | |||
| """ | |||
| self.trace_config = trace_config | |||
| @abstractmethod | |||
| def trace(self, trace_info: BaseTraceInfo): | |||
| """ | |||
| Abstract method to trace activities. | |||
| Subclasses must implement specific tracing logic for activities. | |||
| """ | |||
| ... | |||
| @@ -0,0 +1,51 @@ | |||
| from enum import Enum | |||
| from pydantic import BaseModel, ValidationInfo, field_validator | |||
| class TracingProviderEnum(Enum): | |||
| LANGFUSE = 'langfuse' | |||
| LANGSMITH = 'langsmith' | |||
| class BaseTracingConfig(BaseModel): | |||
| """ | |||
| Base model class for tracing | |||
| """ | |||
| ... | |||
| class LangfuseConfig(BaseTracingConfig): | |||
| """ | |||
| Model class for Langfuse tracing config. | |||
| """ | |||
| public_key: str | |||
| secret_key: str | |||
| host: str = 'https://api.langfuse.com' | |||
| @field_validator("host") | |||
| def set_value(cls, v, info: ValidationInfo): | |||
| if v is None or v == "": | |||
| v = 'https://api.langfuse.com' | |||
| if not v.startswith('https://'): | |||
| raise ValueError('host must start with https://') | |||
| return v | |||
| class LangSmithConfig(BaseTracingConfig): | |||
| """ | |||
| Model class for Langsmith tracing config. | |||
| """ | |||
| api_key: str | |||
| project: str | |||
| endpoint: str = 'https://api.smith.langchain.com' | |||
| @field_validator("endpoint") | |||
| def set_value(cls, v, info: ValidationInfo): | |||
| if v is None or v == "": | |||
| v = 'https://api.smith.langchain.com' | |||
| if not v.startswith('https://'): | |||
| raise ValueError('endpoint must start with https://') | |||
| return v | |||
| @@ -0,0 +1,98 @@ | |||
| from datetime import datetime | |||
| from typing import Any, Optional, Union | |||
| from pydantic import BaseModel, ConfigDict, field_validator | |||
| class BaseTraceInfo(BaseModel): | |||
| message_id: Optional[str] = None | |||
| message_data: Optional[Any] = None | |||
| inputs: Optional[Union[str, dict[str, Any], list]] = None | |||
| outputs: Optional[Union[str, dict[str, Any], list]] = None | |||
| start_time: Optional[datetime] = None | |||
| end_time: Optional[datetime] = None | |||
| metadata: dict[str, Any] | |||
| @field_validator("inputs", "outputs") | |||
| def ensure_type(cls, v): | |||
| if v is None: | |||
| return None | |||
| if isinstance(v, str | dict | list): | |||
| return v | |||
| else: | |||
| return "" | |||
| class WorkflowTraceInfo(BaseTraceInfo): | |||
| workflow_data: Any | |||
| conversation_id: Optional[str] = None | |||
| workflow_app_log_id: Optional[str] = None | |||
| workflow_id: str | |||
| tenant_id: str | |||
| workflow_run_id: str | |||
| workflow_run_elapsed_time: Union[int, float] | |||
| workflow_run_status: str | |||
| workflow_run_inputs: dict[str, Any] | |||
| workflow_run_outputs: dict[str, Any] | |||
| workflow_run_version: str | |||
| error: Optional[str] = None | |||
| total_tokens: int | |||
| file_list: list[str] | |||
| query: str | |||
| metadata: dict[str, Any] | |||
| class MessageTraceInfo(BaseTraceInfo): | |||
| conversation_model: str | |||
| message_tokens: int | |||
| answer_tokens: int | |||
| total_tokens: int | |||
| error: Optional[str] = None | |||
| file_list: Optional[Union[str, dict[str, Any], list]] = None | |||
| message_file_data: Optional[Any] = None | |||
| conversation_mode: str | |||
| class ModerationTraceInfo(BaseTraceInfo): | |||
| flagged: bool | |||
| action: str | |||
| preset_response: str | |||
| query: str | |||
| class SuggestedQuestionTraceInfo(BaseTraceInfo): | |||
| total_tokens: int | |||
| status: Optional[str] = None | |||
| error: Optional[str] = None | |||
| from_account_id: Optional[str] = None | |||
| agent_based: Optional[bool] = None | |||
| from_source: Optional[str] = None | |||
| model_provider: Optional[str] = None | |||
| model_id: Optional[str] = None | |||
| suggested_question: list[str] | |||
| level: str | |||
| status_message: Optional[str] = None | |||
| workflow_run_id: Optional[str] = None | |||
| model_config = ConfigDict(protected_namespaces=()) | |||
| class DatasetRetrievalTraceInfo(BaseTraceInfo): | |||
| documents: Any | |||
| class ToolTraceInfo(BaseTraceInfo): | |||
| tool_name: str | |||
| tool_inputs: dict[str, Any] | |||
| tool_outputs: str | |||
| metadata: dict[str, Any] | |||
| message_file_data: Any | |||
| error: Optional[str] = None | |||
| tool_config: dict[str, Any] | |||
| time_cost: Union[int, float] | |||
| tool_parameters: dict[str, Any] | |||
| file_url: Union[str, None, list] | |||
| class GenerateNameTraceInfo(BaseTraceInfo): | |||
| conversation_id: str | |||
| tenant_id: str | |||
| @@ -0,0 +1,280 @@ | |||
| from datetime import datetime | |||
| from enum import Enum | |||
| from typing import Any, Optional, Union | |||
| from pydantic import BaseModel, ConfigDict, Field, field_validator | |||
| from pydantic_core.core_schema import ValidationInfo | |||
| from core.ops.utils import replace_text_with_content | |||
| def validate_input_output(v, field_name): | |||
| """ | |||
| Validate input output | |||
| :param v: | |||
| :param field_name: | |||
| :return: | |||
| """ | |||
| if v == {} or v is None: | |||
| return v | |||
| if isinstance(v, str): | |||
| return [ | |||
| { | |||
| "role": "assistant" if field_name == "output" else "user", | |||
| "content": v, | |||
| } | |||
| ] | |||
| elif isinstance(v, list): | |||
| if len(v) > 0 and isinstance(v[0], dict): | |||
| v = replace_text_with_content(data=v) | |||
| return v | |||
| else: | |||
| return [ | |||
| { | |||
| "role": "assistant" if field_name == "output" else "user", | |||
| "content": str(v), | |||
| } | |||
| ] | |||
| return v | |||
| class LevelEnum(str, Enum): | |||
| DEBUG = "DEBUG" | |||
| WARNING = "WARNING" | |||
| ERROR = "ERROR" | |||
| DEFAULT = "DEFAULT" | |||
| class LangfuseTrace(BaseModel): | |||
| """ | |||
| Langfuse trace model | |||
| """ | |||
| id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the trace can be set, defaults to a random id. Used to link traces to external systems " | |||
| "or when creating a distributed trace. Traces are upserted on id.", | |||
| ) | |||
| name: Optional[str] = Field( | |||
| default=None, | |||
| description="Identifier of the trace. Useful for sorting/filtering in the UI.", | |||
| ) | |||
| input: Optional[Union[str, dict[str, Any], list, None]] = Field( | |||
| default=None, description="The input of the trace. Can be any JSON object." | |||
| ) | |||
| output: Optional[Union[str, dict[str, Any], list, None]] = Field( | |||
| default=None, description="The output of the trace. Can be any JSON object." | |||
| ) | |||
| metadata: Optional[dict[str, Any]] = Field( | |||
| default=None, | |||
| description="Additional metadata of the trace. Can be any JSON object. Metadata is merged when being updated " | |||
| "via the API.", | |||
| ) | |||
| user_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the user that triggered the execution. Used to provide user-level analytics.", | |||
| ) | |||
| session_id: Optional[str] = Field( | |||
| default=None, | |||
| description="Used to group multiple traces into a session in Langfuse. Use your own session/thread identifier.", | |||
| ) | |||
| version: Optional[str] = Field( | |||
| default=None, | |||
| description="The version of the trace type. Used to understand how changes to the trace type affect metrics. " | |||
| "Useful in debugging.", | |||
| ) | |||
| release: Optional[str] = Field( | |||
| default=None, | |||
| description="The release identifier of the current deployment. Used to understand how changes of different " | |||
| "deployments affect metrics. Useful in debugging.", | |||
| ) | |||
| tags: Optional[list[str]] = Field( | |||
| default=None, | |||
| description="Tags are used to categorize or label traces. Traces can be filtered by tags in the UI and GET " | |||
| "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.", | |||
| ) | |||
| public: Optional[bool] = Field( | |||
| default=None, | |||
| description="You can make a trace public to share it via a public link. This allows others to view the trace " | |||
| "without needing to log in or be members of your Langfuse project.", | |||
| ) | |||
| @field_validator("input", "output") | |||
| def ensure_dict(cls, v, info: ValidationInfo): | |||
| field_name = info.field_name | |||
| return validate_input_output(v, field_name) | |||
| class LangfuseSpan(BaseModel): | |||
| """ | |||
| Langfuse span model | |||
| """ | |||
| id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the span can be set, otherwise a random id is generated. Spans are upserted on id.", | |||
| ) | |||
| session_id: Optional[str] = Field( | |||
| default=None, | |||
| description="Used to group multiple spans into a session in Langfuse. Use your own session/thread identifier.", | |||
| ) | |||
| trace_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the trace the span belongs to. Used to link spans to traces.", | |||
| ) | |||
| user_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the user that triggered the execution. Used to provide user-level analytics.", | |||
| ) | |||
| start_time: Optional[datetime | str] = Field( | |||
| default_factory=datetime.now, | |||
| description="The time at which the span started, defaults to the current time.", | |||
| ) | |||
| end_time: Optional[datetime | str] = Field( | |||
| default=None, | |||
| description="The time at which the span ended. Automatically set by span.end().", | |||
| ) | |||
| name: Optional[str] = Field( | |||
| default=None, | |||
| description="Identifier of the span. Useful for sorting/filtering in the UI.", | |||
| ) | |||
| metadata: Optional[dict[str, Any]] = Field( | |||
| default=None, | |||
| description="Additional metadata of the span. Can be any JSON object. Metadata is merged when being updated " | |||
| "via the API.", | |||
| ) | |||
| level: Optional[str] = Field( | |||
| default=None, | |||
| description="The level of the span. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering of " | |||
| "traces with elevated error levels and for highlighting in the UI.", | |||
| ) | |||
| status_message: Optional[str] = Field( | |||
| default=None, | |||
| description="The status message of the span. Additional field for context of the event. E.g. the error " | |||
| "message of an error event.", | |||
| ) | |||
| input: Optional[Union[str, dict[str, Any], list, None]] = Field( | |||
| default=None, description="The input of the span. Can be any JSON object." | |||
| ) | |||
| output: Optional[Union[str, dict[str, Any], list, None]] = Field( | |||
| default=None, description="The output of the span. Can be any JSON object." | |||
| ) | |||
| version: Optional[str] = Field( | |||
| default=None, | |||
| description="The version of the span type. Used to understand how changes to the span type affect metrics. " | |||
| "Useful in debugging.", | |||
| ) | |||
| parent_observation_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the observation the span belongs to. Used to link spans to observations.", | |||
| ) | |||
| @field_validator("input", "output") | |||
| def ensure_dict(cls, v, info: ValidationInfo): | |||
| field_name = info.field_name | |||
| return validate_input_output(v, field_name) | |||
| class UnitEnum(str, Enum): | |||
| CHARACTERS = "CHARACTERS" | |||
| TOKENS = "TOKENS" | |||
| SECONDS = "SECONDS" | |||
| MILLISECONDS = "MILLISECONDS" | |||
| IMAGES = "IMAGES" | |||
| class GenerationUsage(BaseModel): | |||
| promptTokens: Optional[int] = None | |||
| completionTokens: Optional[int] = None | |||
| totalTokens: Optional[int] = None | |||
| input: Optional[int] = None | |||
| output: Optional[int] = None | |||
| total: Optional[int] = None | |||
| unit: Optional[UnitEnum] = None | |||
| inputCost: Optional[float] = None | |||
| outputCost: Optional[float] = None | |||
| totalCost: Optional[float] = None | |||
| @field_validator("input", "output") | |||
| def ensure_dict(cls, v, info: ValidationInfo): | |||
| field_name = info.field_name | |||
| return validate_input_output(v, field_name) | |||
| class LangfuseGeneration(BaseModel): | |||
| id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the generation can be set, defaults to random id.", | |||
| ) | |||
| trace_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the trace the generation belongs to. Used to link generations to traces.", | |||
| ) | |||
| parent_observation_id: Optional[str] = Field( | |||
| default=None, | |||
| description="The id of the observation the generation belongs to. Used to link generations to observations.", | |||
| ) | |||
| name: Optional[str] = Field( | |||
| default=None, | |||
| description="Identifier of the generation. Useful for sorting/filtering in the UI.", | |||
| ) | |||
| start_time: Optional[datetime | str] = Field( | |||
| default_factory=datetime.now, | |||
| description="The time at which the generation started, defaults to the current time.", | |||
| ) | |||
| completion_start_time: Optional[datetime | str] = Field( | |||
| default=None, | |||
| description="The time at which the completion started (streaming). Set it to get latency analytics broken " | |||
| "down into time until completion started and completion duration.", | |||
| ) | |||
| end_time: Optional[datetime | str] = Field( | |||
| default=None, | |||
| description="The time at which the generation ended. Automatically set by generation.end().", | |||
| ) | |||
| model: Optional[str] = Field( | |||
| default=None, description="The name of the model used for the generation." | |||
| ) | |||
| model_parameters: Optional[dict[str, Any]] = Field( | |||
| default=None, | |||
| description="The parameters of the model used for the generation; can be any key-value pairs.", | |||
| ) | |||
| input: Optional[Any] = Field( | |||
| default=None, | |||
| description="The prompt used for the generation. Can be any string or JSON object.", | |||
| ) | |||
| output: Optional[Any] = Field( | |||
| default=None, | |||
| description="The completion generated by the model. Can be any string or JSON object.", | |||
| ) | |||
| usage: Optional[GenerationUsage] = Field( | |||
| default=None, | |||
| description="The usage object supports the OpenAi structure with tokens and a more generic version with " | |||
| "detailed costs and units.", | |||
| ) | |||
| metadata: Optional[dict[str, Any]] = Field( | |||
| default=None, | |||
| description="Additional metadata of the generation. Can be any JSON object. Metadata is merged when being " | |||
| "updated via the API.", | |||
| ) | |||
| level: Optional[LevelEnum] = Field( | |||
| default=None, | |||
| description="The level of the generation. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering " | |||
| "of traces with elevated error levels and for highlighting in the UI.", | |||
| ) | |||
| status_message: Optional[str] = Field( | |||
| default=None, | |||
| description="The status message of the generation. Additional field for context of the event. E.g. the error " | |||
| "message of an error event.", | |||
| ) | |||
| version: Optional[str] = Field( | |||
| default=None, | |||
| description="The version of the generation type. Used to understand how changes to the span type affect " | |||
| "metrics. Useful in debugging.", | |||
| ) | |||
| model_config = ConfigDict(protected_namespaces=()) | |||
| @field_validator("input", "output") | |||
| def ensure_dict(cls, v, info: ValidationInfo): | |||
| field_name = info.field_name | |||
| return validate_input_output(v, field_name) | |||
| @@ -0,0 +1,392 @@ | |||
| import json | |||
| import logging | |||
| import os | |||
| from datetime import datetime, timedelta | |||
| from typing import Optional | |||
| from langfuse import Langfuse | |||
| from core.ops.base_trace_instance import BaseTraceInstance | |||
| from core.ops.entities.config_entity import LangfuseConfig | |||
| from core.ops.entities.trace_entity import ( | |||
| BaseTraceInfo, | |||
| DatasetRetrievalTraceInfo, | |||
| GenerateNameTraceInfo, | |||
| MessageTraceInfo, | |||
| ModerationTraceInfo, | |||
| SuggestedQuestionTraceInfo, | |||
| ToolTraceInfo, | |||
| WorkflowTraceInfo, | |||
| ) | |||
| from core.ops.langfuse_trace.entities.langfuse_trace_entity import ( | |||
| GenerationUsage, | |||
| LangfuseGeneration, | |||
| LangfuseSpan, | |||
| LangfuseTrace, | |||
| LevelEnum, | |||
| UnitEnum, | |||
| ) | |||
| from core.ops.utils import filter_none_values | |||
| from extensions.ext_database import db | |||
| from models.model import EndUser | |||
| from models.workflow import WorkflowNodeExecution | |||
| logger = logging.getLogger(__name__) | |||
| class LangFuseDataTrace(BaseTraceInstance): | |||
| def __init__( | |||
| self, | |||
| langfuse_config: LangfuseConfig, | |||
| ): | |||
| super().__init__(langfuse_config) | |||
| self.langfuse_client = Langfuse( | |||
| public_key=langfuse_config.public_key, | |||
| secret_key=langfuse_config.secret_key, | |||
| host=langfuse_config.host, | |||
| ) | |||
| self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") | |||
| def trace(self, trace_info: BaseTraceInfo): | |||
| if isinstance(trace_info, WorkflowTraceInfo): | |||
| self.workflow_trace(trace_info) | |||
| if isinstance(trace_info, MessageTraceInfo): | |||
| self.message_trace(trace_info) | |||
| if isinstance(trace_info, ModerationTraceInfo): | |||
| self.moderation_trace(trace_info) | |||
| if isinstance(trace_info, SuggestedQuestionTraceInfo): | |||
| self.suggested_question_trace(trace_info) | |||
| if isinstance(trace_info, DatasetRetrievalTraceInfo): | |||
| self.dataset_retrieval_trace(trace_info) | |||
| if isinstance(trace_info, ToolTraceInfo): | |||
| self.tool_trace(trace_info) | |||
| if isinstance(trace_info, GenerateNameTraceInfo): | |||
| self.generate_name_trace(trace_info) | |||
| def workflow_trace(self, trace_info: WorkflowTraceInfo): | |||
| trace_id = trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id | |||
| if trace_info.message_id: | |||
| trace_id = trace_info.message_id | |||
| name = f"message_{trace_info.message_id}" | |||
| trace_data = LangfuseTrace( | |||
| id=trace_info.message_id, | |||
| user_id=trace_info.tenant_id, | |||
| name=name, | |||
| input=trace_info.workflow_run_inputs, | |||
| output=trace_info.workflow_run_outputs, | |||
| metadata=trace_info.metadata, | |||
| session_id=trace_info.conversation_id, | |||
| tags=["message", "workflow"], | |||
| ) | |||
| self.add_trace(langfuse_trace_data=trace_data) | |||
| workflow_span_data = LangfuseSpan( | |||
| id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, | |||
| name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", | |||
| input=trace_info.workflow_run_inputs, | |||
| output=trace_info.workflow_run_outputs, | |||
| trace_id=trace_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| metadata=trace_info.metadata, | |||
| level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, | |||
| status_message=trace_info.error if trace_info.error else "", | |||
| ) | |||
| self.add_span(langfuse_span_data=workflow_span_data) | |||
| else: | |||
| trace_data = LangfuseTrace( | |||
| id=trace_id, | |||
| user_id=trace_info.tenant_id, | |||
| name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", | |||
| input=trace_info.workflow_run_inputs, | |||
| output=trace_info.workflow_run_outputs, | |||
| metadata=trace_info.metadata, | |||
| session_id=trace_info.conversation_id, | |||
| tags=["workflow"], | |||
| ) | |||
| self.add_trace(langfuse_trace_data=trace_data) | |||
| # through workflow_run_id get all_nodes_execution | |||
| workflow_nodes_executions = ( | |||
| db.session.query(WorkflowNodeExecution) | |||
| .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id) | |||
| .order_by(WorkflowNodeExecution.index.desc()) | |||
| .all() | |||
| ) | |||
| for node_execution in workflow_nodes_executions: | |||
| node_execution_id = node_execution.id | |||
| tenant_id = node_execution.tenant_id | |||
| app_id = node_execution.app_id | |||
| node_name = node_execution.title | |||
| node_type = node_execution.node_type | |||
| status = node_execution.status | |||
| if node_type == "llm": | |||
| inputs = json.loads(node_execution.process_data).get("prompts", {}) | |||
| else: | |||
| inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} | |||
| outputs = ( | |||
| json.loads(node_execution.outputs) if node_execution.outputs else {} | |||
| ) | |||
| created_at = node_execution.created_at if node_execution.created_at else datetime.now() | |||
| elapsed_time = node_execution.elapsed_time | |||
| finished_at = created_at + timedelta(seconds=elapsed_time) | |||
| metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} | |||
| metadata.update( | |||
| { | |||
| "workflow_run_id": trace_info.workflow_run_id, | |||
| "node_execution_id": node_execution_id, | |||
| "tenant_id": tenant_id, | |||
| "app_id": app_id, | |||
| "node_name": node_name, | |||
| "node_type": node_type, | |||
| "status": status, | |||
| } | |||
| ) | |||
| # add span | |||
| if trace_info.message_id: | |||
| span_data = LangfuseSpan( | |||
| name=f"{node_name}_{node_execution_id}", | |||
| input=inputs, | |||
| output=outputs, | |||
| trace_id=trace_id, | |||
| start_time=created_at, | |||
| end_time=finished_at, | |||
| metadata=metadata, | |||
| level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, | |||
| status_message=trace_info.error if trace_info.error else "", | |||
| parent_observation_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, | |||
| ) | |||
| else: | |||
| span_data = LangfuseSpan( | |||
| name=f"{node_name}_{node_execution_id}", | |||
| input=inputs, | |||
| output=outputs, | |||
| trace_id=trace_id, | |||
| start_time=created_at, | |||
| end_time=finished_at, | |||
| metadata=metadata, | |||
| level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, | |||
| status_message=trace_info.error if trace_info.error else "", | |||
| ) | |||
| self.add_span(langfuse_span_data=span_data) | |||
| def message_trace( | |||
| self, trace_info: MessageTraceInfo, **kwargs | |||
| ): | |||
| # get message file data | |||
| file_list = trace_info.file_list | |||
| metadata = trace_info.metadata | |||
| message_data = trace_info.message_data | |||
| message_id = message_data.id | |||
| user_id = message_data.from_account_id | |||
| if message_data.from_end_user_id: | |||
| end_user_data: EndUser = db.session.query(EndUser).filter( | |||
| EndUser.id == message_data.from_end_user_id | |||
| ).first().session_id | |||
| user_id = end_user_data.session_id | |||
| trace_data = LangfuseTrace( | |||
| id=message_id, | |||
| user_id=user_id, | |||
| name=f"message_{message_id}", | |||
| input={ | |||
| "message": trace_info.inputs, | |||
| "files": file_list, | |||
| "message_tokens": trace_info.message_tokens, | |||
| "answer_tokens": trace_info.answer_tokens, | |||
| "total_tokens": trace_info.total_tokens, | |||
| "error": trace_info.error, | |||
| "provider_response_latency": message_data.provider_response_latency, | |||
| "created_at": trace_info.start_time, | |||
| }, | |||
| output=trace_info.outputs, | |||
| metadata=metadata, | |||
| session_id=message_data.conversation_id, | |||
| tags=["message", str(trace_info.conversation_mode)], | |||
| version=None, | |||
| release=None, | |||
| public=None, | |||
| ) | |||
| self.add_trace(langfuse_trace_data=trace_data) | |||
| # start add span | |||
| generation_usage = GenerationUsage( | |||
| totalTokens=trace_info.total_tokens, | |||
| input=trace_info.message_tokens, | |||
| output=trace_info.answer_tokens, | |||
| total=trace_info.total_tokens, | |||
| unit=UnitEnum.TOKENS, | |||
| ) | |||
| langfuse_generation_data = LangfuseGeneration( | |||
| name=f"generation_{message_id}", | |||
| trace_id=message_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| model=message_data.model_id, | |||
| input=trace_info.inputs, | |||
| output=message_data.answer, | |||
| metadata=metadata, | |||
| level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, | |||
| status_message=message_data.error if message_data.error else "", | |||
| usage=generation_usage, | |||
| ) | |||
| self.add_generation(langfuse_generation_data) | |||
| def moderation_trace(self, trace_info: ModerationTraceInfo): | |||
| span_data = LangfuseSpan( | |||
| name="moderation", | |||
| input=trace_info.inputs, | |||
| output={ | |||
| "action": trace_info.action, | |||
| "flagged": trace_info.flagged, | |||
| "preset_response": trace_info.preset_response, | |||
| "inputs": trace_info.inputs, | |||
| }, | |||
| trace_id=trace_info.message_id, | |||
| start_time=trace_info.start_time or trace_info.message_data.created_at, | |||
| end_time=trace_info.end_time or trace_info.message_data.created_at, | |||
| metadata=trace_info.metadata, | |||
| ) | |||
| self.add_span(langfuse_span_data=span_data) | |||
| def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): | |||
| message_data = trace_info.message_data | |||
| generation_usage = GenerationUsage( | |||
| totalTokens=len(str(trace_info.suggested_question)), | |||
| input=len(trace_info.inputs), | |||
| output=len(trace_info.suggested_question), | |||
| total=len(trace_info.suggested_question), | |||
| unit=UnitEnum.CHARACTERS, | |||
| ) | |||
| generation_data = LangfuseGeneration( | |||
| name="suggested_question", | |||
| input=trace_info.inputs, | |||
| output=str(trace_info.suggested_question), | |||
| trace_id=trace_info.message_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| metadata=trace_info.metadata, | |||
| level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, | |||
| status_message=message_data.error if message_data.error else "", | |||
| usage=generation_usage, | |||
| ) | |||
| self.add_generation(langfuse_generation_data=generation_data) | |||
| def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): | |||
| dataset_retrieval_span_data = LangfuseSpan( | |||
| name="dataset_retrieval", | |||
| input=trace_info.inputs, | |||
| output={"documents": trace_info.documents}, | |||
| trace_id=trace_info.message_id, | |||
| start_time=trace_info.start_time or trace_info.message_data.created_at, | |||
| end_time=trace_info.end_time or trace_info.message_data.updated_at, | |||
| metadata=trace_info.metadata, | |||
| ) | |||
| self.add_span(langfuse_span_data=dataset_retrieval_span_data) | |||
| def tool_trace(self, trace_info: ToolTraceInfo): | |||
| tool_span_data = LangfuseSpan( | |||
| name=trace_info.tool_name, | |||
| input=trace_info.tool_inputs, | |||
| output=trace_info.tool_outputs, | |||
| trace_id=trace_info.message_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| metadata=trace_info.metadata, | |||
| level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, | |||
| status_message=trace_info.error, | |||
| ) | |||
| self.add_span(langfuse_span_data=tool_span_data) | |||
| def generate_name_trace(self, trace_info: GenerateNameTraceInfo): | |||
| name_generation_trace_data = LangfuseTrace( | |||
| name="generate_name", | |||
| input=trace_info.inputs, | |||
| output=trace_info.outputs, | |||
| user_id=trace_info.tenant_id, | |||
| metadata=trace_info.metadata, | |||
| session_id=trace_info.conversation_id, | |||
| ) | |||
| self.add_trace(langfuse_trace_data=name_generation_trace_data) | |||
| name_generation_span_data = LangfuseSpan( | |||
| name="generate_name", | |||
| input=trace_info.inputs, | |||
| output=trace_info.outputs, | |||
| trace_id=trace_info.conversation_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| metadata=trace_info.metadata, | |||
| ) | |||
| self.add_span(langfuse_span_data=name_generation_span_data) | |||
| def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None): | |||
| format_trace_data = ( | |||
| filter_none_values(langfuse_trace_data.model_dump()) if langfuse_trace_data else {} | |||
| ) | |||
| try: | |||
| self.langfuse_client.trace(**format_trace_data) | |||
| logger.debug("LangFuse Trace created successfully") | |||
| except Exception as e: | |||
| raise ValueError(f"LangFuse Failed to create trace: {str(e)}") | |||
| def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None): | |||
| format_span_data = ( | |||
| filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {} | |||
| ) | |||
| try: | |||
| self.langfuse_client.span(**format_span_data) | |||
| logger.debug("LangFuse Span created successfully") | |||
| except Exception as e: | |||
| raise ValueError(f"LangFuse Failed to create span: {str(e)}") | |||
| def update_span(self, span, langfuse_span_data: Optional[LangfuseSpan] = None): | |||
| format_span_data = ( | |||
| filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {} | |||
| ) | |||
| span.end(**format_span_data) | |||
| def add_generation( | |||
| self, langfuse_generation_data: Optional[LangfuseGeneration] = None | |||
| ): | |||
| format_generation_data = ( | |||
| filter_none_values(langfuse_generation_data.model_dump()) | |||
| if langfuse_generation_data | |||
| else {} | |||
| ) | |||
| try: | |||
| self.langfuse_client.generation(**format_generation_data) | |||
| logger.debug("LangFuse Generation created successfully") | |||
| except Exception as e: | |||
| raise ValueError(f"LangFuse Failed to create generation: {str(e)}") | |||
| def update_generation( | |||
| self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None | |||
| ): | |||
| format_generation_data = ( | |||
| filter_none_values(langfuse_generation_data.model_dump()) | |||
| if langfuse_generation_data | |||
| else {} | |||
| ) | |||
| generation.end(**format_generation_data) | |||
| def api_check(self): | |||
| try: | |||
| return self.langfuse_client.auth_check() | |||
| except Exception as e: | |||
| logger.debug(f"LangFuse API check failed: {str(e)}") | |||
| raise ValueError(f"LangFuse API check failed: {str(e)}") | |||
| @@ -0,0 +1,167 @@ | |||
| from datetime import datetime | |||
| from enum import Enum | |||
| from typing import Any, Optional, Union | |||
| from pydantic import BaseModel, Field, field_validator | |||
| from pydantic_core.core_schema import ValidationInfo | |||
| from core.ops.utils import replace_text_with_content | |||
| class LangSmithRunType(str, Enum): | |||
| tool = "tool" | |||
| chain = "chain" | |||
| llm = "llm" | |||
| retriever = "retriever" | |||
| embedding = "embedding" | |||
| prompt = "prompt" | |||
| parser = "parser" | |||
| class LangSmithTokenUsage(BaseModel): | |||
| input_tokens: Optional[int] = None | |||
| output_tokens: Optional[int] = None | |||
| total_tokens: Optional[int] = None | |||
| class LangSmithMultiModel(BaseModel): | |||
| file_list: Optional[list[str]] = Field(None, description="List of files") | |||
| class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): | |||
| name: Optional[str] = Field(..., description="Name of the run") | |||
| inputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Inputs of the run") | |||
| outputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Outputs of the run") | |||
| run_type: LangSmithRunType = Field(..., description="Type of the run") | |||
| start_time: Optional[datetime | str] = Field(None, description="Start time of the run") | |||
| end_time: Optional[datetime | str] = Field(None, description="End time of the run") | |||
| extra: Optional[dict[str, Any]] = Field( | |||
| None, description="Extra information of the run" | |||
| ) | |||
| error: Optional[str] = Field(None, description="Error message of the run") | |||
| serialized: Optional[dict[str, Any]] = Field( | |||
| None, description="Serialized data of the run" | |||
| ) | |||
| parent_run_id: Optional[str] = Field(None, description="Parent run ID") | |||
| events: Optional[list[dict[str, Any]]] = Field( | |||
| None, description="Events associated with the run" | |||
| ) | |||
| tags: Optional[list[str]] = Field(None, description="Tags associated with the run") | |||
| trace_id: Optional[str] = Field( | |||
| None, description="Trace ID associated with the run" | |||
| ) | |||
| dotted_order: Optional[str] = Field(None, description="Dotted order of the run") | |||
| id: Optional[str] = Field(None, description="ID of the run") | |||
| session_id: Optional[str] = Field( | |||
| None, description="Session ID associated with the run" | |||
| ) | |||
| session_name: Optional[str] = Field( | |||
| None, description="Session name associated with the run" | |||
| ) | |||
| reference_example_id: Optional[str] = Field( | |||
| None, description="Reference example ID associated with the run" | |||
| ) | |||
| input_attachments: Optional[dict[str, Any]] = Field( | |||
| None, description="Input attachments of the run" | |||
| ) | |||
| output_attachments: Optional[dict[str, Any]] = Field( | |||
| None, description="Output attachments of the run" | |||
| ) | |||
| @field_validator("inputs", "outputs") | |||
| def ensure_dict(cls, v, info: ValidationInfo): | |||
| field_name = info.field_name | |||
| values = info.data | |||
| if v == {} or v is None: | |||
| return v | |||
| usage_metadata = { | |||
| "input_tokens": values.get('input_tokens', 0), | |||
| "output_tokens": values.get('output_tokens', 0), | |||
| "total_tokens": values.get('total_tokens', 0), | |||
| } | |||
| file_list = values.get("file_list", []) | |||
| if isinstance(v, str): | |||
| if field_name == "inputs": | |||
| return { | |||
| "messages": { | |||
| "role": "user", | |||
| "content": v, | |||
| "usage_metadata": usage_metadata, | |||
| "file_list": file_list, | |||
| }, | |||
| } | |||
| elif field_name == "outputs": | |||
| return { | |||
| "choices": { | |||
| "role": "ai", | |||
| "content": v, | |||
| "usage_metadata": usage_metadata, | |||
| "file_list": file_list, | |||
| }, | |||
| } | |||
| elif isinstance(v, list): | |||
| data = {} | |||
| if len(v) > 0 and isinstance(v[0], dict): | |||
| # rename text to content | |||
| v = replace_text_with_content(data=v) | |||
| if field_name == "inputs": | |||
| data = { | |||
| "messages": v, | |||
| } | |||
| elif field_name == "outputs": | |||
| data = { | |||
| "choices": { | |||
| "role": "ai", | |||
| "content": v, | |||
| "usage_metadata": usage_metadata, | |||
| "file_list": file_list, | |||
| }, | |||
| } | |||
| return data | |||
| else: | |||
| return { | |||
| "choices": { | |||
| "role": "ai" if field_name == "outputs" else "user", | |||
| "content": str(v), | |||
| "usage_metadata": usage_metadata, | |||
| "file_list": file_list, | |||
| }, | |||
| } | |||
| if isinstance(v, dict): | |||
| v["usage_metadata"] = usage_metadata | |||
| v["file_list"] = file_list | |||
| return v | |||
| return v | |||
| @field_validator("start_time", "end_time") | |||
| def format_time(cls, v, info: ValidationInfo): | |||
| if not isinstance(v, datetime): | |||
| raise ValueError(f"{info.field_name} must be a datetime object") | |||
| else: | |||
| return v.strftime("%Y-%m-%dT%H:%M:%S.%fZ") | |||
| class LangSmithRunUpdateModel(BaseModel): | |||
| run_id: str = Field(..., description="ID of the run") | |||
| trace_id: Optional[str] = Field( | |||
| None, description="Trace ID associated with the run" | |||
| ) | |||
| dotted_order: Optional[str] = Field(None, description="Dotted order of the run") | |||
| parent_run_id: Optional[str] = Field(None, description="Parent run ID") | |||
| end_time: Optional[datetime | str] = Field(None, description="End time of the run") | |||
| error: Optional[str] = Field(None, description="Error message of the run") | |||
| inputs: Optional[dict[str, Any]] = Field(None, description="Inputs of the run") | |||
| outputs: Optional[dict[str, Any]] = Field(None, description="Outputs of the run") | |||
| events: Optional[list[dict[str, Any]]] = Field( | |||
| None, description="Events associated with the run" | |||
| ) | |||
| tags: Optional[list[str]] = Field(None, description="Tags associated with the run") | |||
| extra: Optional[dict[str, Any]] = Field( | |||
| None, description="Extra information of the run" | |||
| ) | |||
| input_attachments: Optional[dict[str, Any]] = Field( | |||
| None, description="Input attachments of the run" | |||
| ) | |||
| output_attachments: Optional[dict[str, Any]] = Field( | |||
| None, description="Output attachments of the run" | |||
| ) | |||
| @@ -0,0 +1,355 @@ | |||
| import json | |||
| import logging | |||
| import os | |||
| from datetime import datetime, timedelta | |||
| from langsmith import Client | |||
| from core.ops.base_trace_instance import BaseTraceInstance | |||
| from core.ops.entities.config_entity import LangSmithConfig | |||
| from core.ops.entities.trace_entity import ( | |||
| BaseTraceInfo, | |||
| DatasetRetrievalTraceInfo, | |||
| GenerateNameTraceInfo, | |||
| MessageTraceInfo, | |||
| ModerationTraceInfo, | |||
| SuggestedQuestionTraceInfo, | |||
| ToolTraceInfo, | |||
| WorkflowTraceInfo, | |||
| ) | |||
| from core.ops.langsmith_trace.entities.langsmith_trace_entity import ( | |||
| LangSmithRunModel, | |||
| LangSmithRunType, | |||
| LangSmithRunUpdateModel, | |||
| ) | |||
| from core.ops.utils import filter_none_values | |||
| from extensions.ext_database import db | |||
| from models.model import EndUser, MessageFile | |||
| from models.workflow import WorkflowNodeExecution | |||
| logger = logging.getLogger(__name__) | |||
| class LangSmithDataTrace(BaseTraceInstance): | |||
| def __init__( | |||
| self, | |||
| langsmith_config: LangSmithConfig, | |||
| ): | |||
| super().__init__(langsmith_config) | |||
| self.langsmith_key = langsmith_config.api_key | |||
| self.project_name = langsmith_config.project | |||
| self.project_id = None | |||
| self.langsmith_client = Client( | |||
| api_key=langsmith_config.api_key, api_url=langsmith_config.endpoint | |||
| ) | |||
| self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") | |||
| def trace(self, trace_info: BaseTraceInfo): | |||
| if isinstance(trace_info, WorkflowTraceInfo): | |||
| self.workflow_trace(trace_info) | |||
| if isinstance(trace_info, MessageTraceInfo): | |||
| self.message_trace(trace_info) | |||
| if isinstance(trace_info, ModerationTraceInfo): | |||
| self.moderation_trace(trace_info) | |||
| if isinstance(trace_info, SuggestedQuestionTraceInfo): | |||
| self.suggested_question_trace(trace_info) | |||
| if isinstance(trace_info, DatasetRetrievalTraceInfo): | |||
| self.dataset_retrieval_trace(trace_info) | |||
| if isinstance(trace_info, ToolTraceInfo): | |||
| self.tool_trace(trace_info) | |||
| if isinstance(trace_info, GenerateNameTraceInfo): | |||
| self.generate_name_trace(trace_info) | |||
| def workflow_trace(self, trace_info: WorkflowTraceInfo): | |||
| if trace_info.message_id: | |||
| message_run = LangSmithRunModel( | |||
| id=trace_info.message_id, | |||
| name=f"message_{trace_info.message_id}", | |||
| inputs=trace_info.workflow_run_inputs, | |||
| outputs=trace_info.workflow_run_outputs, | |||
| run_type=LangSmithRunType.chain, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["message"], | |||
| error=trace_info.error | |||
| ) | |||
| self.add_run(message_run) | |||
| langsmith_run = LangSmithRunModel( | |||
| file_list=trace_info.file_list, | |||
| total_tokens=trace_info.total_tokens, | |||
| id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, | |||
| name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", | |||
| inputs=trace_info.workflow_run_inputs, | |||
| run_type=LangSmithRunType.tool, | |||
| start_time=trace_info.workflow_data.created_at, | |||
| end_time=trace_info.workflow_data.finished_at, | |||
| outputs=trace_info.workflow_run_outputs, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| error=trace_info.error, | |||
| tags=["workflow"], | |||
| parent_run_id=trace_info.message_id if trace_info.message_id else None, | |||
| ) | |||
| self.add_run(langsmith_run) | |||
| # through workflow_run_id get all_nodes_execution | |||
| workflow_nodes_executions = ( | |||
| db.session.query(WorkflowNodeExecution) | |||
| .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id) | |||
| .order_by(WorkflowNodeExecution.index.desc()) | |||
| .all() | |||
| ) | |||
| for node_execution in workflow_nodes_executions: | |||
| node_execution_id = node_execution.id | |||
| tenant_id = node_execution.tenant_id | |||
| app_id = node_execution.app_id | |||
| node_name = node_execution.title | |||
| node_type = node_execution.node_type | |||
| status = node_execution.status | |||
| if node_type == "llm": | |||
| inputs = json.loads(node_execution.process_data).get("prompts", {}) | |||
| else: | |||
| inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} | |||
| outputs = ( | |||
| json.loads(node_execution.outputs) if node_execution.outputs else {} | |||
| ) | |||
| created_at = node_execution.created_at if node_execution.created_at else datetime.now() | |||
| elapsed_time = node_execution.elapsed_time | |||
| finished_at = created_at + timedelta(seconds=elapsed_time) | |||
| execution_metadata = ( | |||
| json.loads(node_execution.execution_metadata) | |||
| if node_execution.execution_metadata | |||
| else {} | |||
| ) | |||
| node_total_tokens = execution_metadata.get("total_tokens", 0) | |||
| metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} | |||
| metadata.update( | |||
| { | |||
| "workflow_run_id": trace_info.workflow_run_id, | |||
| "node_execution_id": node_execution_id, | |||
| "tenant_id": tenant_id, | |||
| "app_id": app_id, | |||
| "app_name": node_name, | |||
| "node_type": node_type, | |||
| "status": status, | |||
| } | |||
| ) | |||
| process_data = json.loads(node_execution.process_data) if node_execution.process_data else {} | |||
| if process_data and process_data.get("model_mode") == "chat": | |||
| run_type = LangSmithRunType.llm | |||
| elif node_type == "knowledge-retrieval": | |||
| run_type = LangSmithRunType.retriever | |||
| else: | |||
| run_type = LangSmithRunType.tool | |||
| langsmith_run = LangSmithRunModel( | |||
| total_tokens=node_total_tokens, | |||
| name=f"{node_name}_{node_execution_id}", | |||
| inputs=inputs, | |||
| run_type=run_type, | |||
| start_time=created_at, | |||
| end_time=finished_at, | |||
| outputs=outputs, | |||
| file_list=trace_info.file_list, | |||
| extra={ | |||
| "metadata": metadata, | |||
| }, | |||
| parent_run_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, | |||
| tags=["node_execution"], | |||
| ) | |||
| self.add_run(langsmith_run) | |||
| def message_trace(self, trace_info: MessageTraceInfo): | |||
| # get message file data | |||
| file_list = trace_info.file_list | |||
| message_file_data: MessageFile = trace_info.message_file_data | |||
| file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" | |||
| file_list.append(file_url) | |||
| metadata = trace_info.metadata | |||
| message_data = trace_info.message_data | |||
| message_id = message_data.id | |||
| user_id = message_data.from_account_id | |||
| if message_data.from_end_user_id: | |||
| end_user_data: EndUser = db.session.query(EndUser).filter( | |||
| EndUser.id == message_data.from_end_user_id | |||
| ).first().session_id | |||
| end_user_id = end_user_data.session_id | |||
| metadata["end_user_id"] = end_user_id | |||
| metadata["user_id"] = user_id | |||
| message_run = LangSmithRunModel( | |||
| input_tokens=trace_info.message_tokens, | |||
| output_tokens=trace_info.answer_tokens, | |||
| total_tokens=trace_info.total_tokens, | |||
| id=message_id, | |||
| name=f"message_{message_id}", | |||
| inputs=trace_info.inputs, | |||
| run_type=LangSmithRunType.chain, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| outputs=message_data.answer, | |||
| extra={ | |||
| "metadata": metadata, | |||
| }, | |||
| tags=["message", str(trace_info.conversation_mode)], | |||
| error=trace_info.error, | |||
| file_list=file_list, | |||
| ) | |||
| self.add_run(message_run) | |||
| # create llm run parented to message run | |||
| llm_run = LangSmithRunModel( | |||
| input_tokens=trace_info.message_tokens, | |||
| output_tokens=trace_info.answer_tokens, | |||
| total_tokens=trace_info.total_tokens, | |||
| name=f"llm_{message_id}", | |||
| inputs=trace_info.inputs, | |||
| run_type=LangSmithRunType.llm, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| outputs=message_data.answer, | |||
| extra={ | |||
| "metadata": metadata, | |||
| }, | |||
| parent_run_id=message_id, | |||
| tags=["llm", str(trace_info.conversation_mode)], | |||
| error=trace_info.error, | |||
| file_list=file_list, | |||
| ) | |||
| self.add_run(llm_run) | |||
| def moderation_trace(self, trace_info: ModerationTraceInfo): | |||
| langsmith_run = LangSmithRunModel( | |||
| name="moderation", | |||
| inputs=trace_info.inputs, | |||
| outputs={ | |||
| "action": trace_info.action, | |||
| "flagged": trace_info.flagged, | |||
| "preset_response": trace_info.preset_response, | |||
| "inputs": trace_info.inputs, | |||
| }, | |||
| run_type=LangSmithRunType.tool, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["moderation"], | |||
| parent_run_id=trace_info.message_id, | |||
| start_time=trace_info.start_time or trace_info.message_data.created_at, | |||
| end_time=trace_info.end_time or trace_info.message_data.updated_at, | |||
| ) | |||
| self.add_run(langsmith_run) | |||
| def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): | |||
| message_data = trace_info.message_data | |||
| suggested_question_run = LangSmithRunModel( | |||
| name="suggested_question", | |||
| inputs=trace_info.inputs, | |||
| outputs=trace_info.suggested_question, | |||
| run_type=LangSmithRunType.tool, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["suggested_question"], | |||
| parent_run_id=trace_info.message_id, | |||
| start_time=trace_info.start_time or message_data.created_at, | |||
| end_time=trace_info.end_time or message_data.updated_at, | |||
| ) | |||
| self.add_run(suggested_question_run) | |||
| def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): | |||
| dataset_retrieval_run = LangSmithRunModel( | |||
| name="dataset_retrieval", | |||
| inputs=trace_info.inputs, | |||
| outputs={"documents": trace_info.documents}, | |||
| run_type=LangSmithRunType.retriever, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["dataset_retrieval"], | |||
| parent_run_id=trace_info.message_id, | |||
| start_time=trace_info.start_time or trace_info.message_data.created_at, | |||
| end_time=trace_info.end_time or trace_info.message_data.updated_at, | |||
| ) | |||
| self.add_run(dataset_retrieval_run) | |||
| def tool_trace(self, trace_info: ToolTraceInfo): | |||
| tool_run = LangSmithRunModel( | |||
| name=trace_info.tool_name, | |||
| inputs=trace_info.tool_inputs, | |||
| outputs=trace_info.tool_outputs, | |||
| run_type=LangSmithRunType.tool, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["tool", trace_info.tool_name], | |||
| parent_run_id=trace_info.message_id, | |||
| start_time=trace_info.start_time, | |||
| end_time=trace_info.end_time, | |||
| file_list=[trace_info.file_url], | |||
| ) | |||
| self.add_run(tool_run) | |||
| def generate_name_trace(self, trace_info: GenerateNameTraceInfo): | |||
| name_run = LangSmithRunModel( | |||
| name="generate_name", | |||
| inputs=trace_info.inputs, | |||
| outputs=trace_info.outputs, | |||
| run_type=LangSmithRunType.tool, | |||
| extra={ | |||
| "metadata": trace_info.metadata, | |||
| }, | |||
| tags=["generate_name"], | |||
| start_time=trace_info.start_time or datetime.now(), | |||
| end_time=trace_info.end_time or datetime.now(), | |||
| ) | |||
| self.add_run(name_run) | |||
| def add_run(self, run_data: LangSmithRunModel): | |||
| data = run_data.model_dump() | |||
| if self.project_id: | |||
| data["session_id"] = self.project_id | |||
| elif self.project_name: | |||
| data["session_name"] = self.project_name | |||
| data = filter_none_values(data) | |||
| try: | |||
| self.langsmith_client.create_run(**data) | |||
| logger.debug("LangSmith Run created successfully.") | |||
| except Exception as e: | |||
| raise ValueError(f"LangSmith Failed to create run: {str(e)}") | |||
| def update_run(self, update_run_data: LangSmithRunUpdateModel): | |||
| data = update_run_data.model_dump() | |||
| data = filter_none_values(data) | |||
| try: | |||
| self.langsmith_client.update_run(**data) | |||
| logger.debug("LangSmith Run updated successfully.") | |||
| except Exception as e: | |||
| raise ValueError(f"LangSmith Failed to update run: {str(e)}") | |||
| def api_check(self): | |||
| try: | |||
| random_project_name = f"test_project_{datetime.now().strftime('%Y%m%d%H%M%S')}" | |||
| self.langsmith_client.create_project(project_name=random_project_name) | |||
| self.langsmith_client.delete_project(project_name=random_project_name) | |||
| return True | |||
| except Exception as e: | |||
| logger.debug(f"LangSmith API check failed: {str(e)}") | |||
| raise ValueError(f"LangSmith API check failed: {str(e)}") | |||
| @@ -0,0 +1,687 @@ | |||
| import json | |||
| import os | |||
| import queue | |||
| import threading | |||
| from datetime import timedelta | |||
| from enum import Enum | |||
| from typing import Any, Optional, Union | |||
| from uuid import UUID | |||
| from flask import Flask, current_app | |||
| from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token | |||
| from core.ops.base_trace_instance import BaseTraceInstance | |||
| from core.ops.entities.config_entity import ( | |||
| LangfuseConfig, | |||
| LangSmithConfig, | |||
| TracingProviderEnum, | |||
| ) | |||
| from core.ops.entities.trace_entity import ( | |||
| DatasetRetrievalTraceInfo, | |||
| GenerateNameTraceInfo, | |||
| MessageTraceInfo, | |||
| ModerationTraceInfo, | |||
| SuggestedQuestionTraceInfo, | |||
| ToolTraceInfo, | |||
| WorkflowTraceInfo, | |||
| ) | |||
| from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace | |||
| from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace | |||
| from core.ops.utils import get_message_data | |||
| from extensions.ext_database import db | |||
| from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig | |||
| from models.workflow import WorkflowAppLog, WorkflowRun | |||
| provider_config_map = { | |||
| TracingProviderEnum.LANGFUSE.value: { | |||
| 'config_class': LangfuseConfig, | |||
| 'secret_keys': ['public_key', 'secret_key'], | |||
| 'other_keys': ['host'], | |||
| 'trace_instance': LangFuseDataTrace | |||
| }, | |||
| TracingProviderEnum.LANGSMITH.value: { | |||
| 'config_class': LangSmithConfig, | |||
| 'secret_keys': ['api_key'], | |||
| 'other_keys': ['project', 'endpoint'], | |||
| 'trace_instance': LangSmithDataTrace | |||
| } | |||
| } | |||
| class OpsTraceManager: | |||
| @classmethod | |||
| def encrypt_tracing_config( | |||
| cls, tenant_id: str, tracing_provider: str, tracing_config: dict, current_trace_config=None | |||
| ): | |||
| """ | |||
| Encrypt tracing config. | |||
| :param tenant_id: tenant id | |||
| :param tracing_provider: tracing provider | |||
| :param tracing_config: tracing config dictionary to be encrypted | |||
| :param current_trace_config: current tracing configuration for keeping existing values | |||
| :return: encrypted tracing configuration | |||
| """ | |||
| # Get the configuration class and the keys that require encryption | |||
| config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ | |||
| provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] | |||
| new_config = {} | |||
| # Encrypt necessary keys | |||
| for key in secret_keys: | |||
| if key in tracing_config: | |||
| if '*' in tracing_config[key]: | |||
| # If the key contains '*', retain the original value from the current config | |||
| new_config[key] = current_trace_config.get(key, tracing_config[key]) | |||
| else: | |||
| # Otherwise, encrypt the key | |||
| new_config[key] = encrypt_token(tenant_id, tracing_config[key]) | |||
| for key in other_keys: | |||
| new_config[key] = tracing_config.get(key, "") | |||
| # Create a new instance of the config class with the new configuration | |||
| encrypted_config = config_class(**new_config) | |||
| return encrypted_config.model_dump() | |||
| @classmethod | |||
| def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): | |||
| """ | |||
| Decrypt tracing config | |||
| :param tenant_id: tenant id | |||
| :param tracing_provider: tracing provider | |||
| :param tracing_config: tracing config | |||
| :return: | |||
| """ | |||
| config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ | |||
| provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] | |||
| new_config = {} | |||
| for key in secret_keys: | |||
| if key in tracing_config: | |||
| new_config[key] = decrypt_token(tenant_id, tracing_config[key]) | |||
| for key in other_keys: | |||
| new_config[key] = tracing_config.get(key, "") | |||
| return config_class(**new_config).model_dump() | |||
| @classmethod | |||
| def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): | |||
| """ | |||
| Decrypt tracing config | |||
| :param tracing_provider: tracing provider | |||
| :param decrypt_tracing_config: tracing config | |||
| :return: | |||
| """ | |||
| config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ | |||
| provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] | |||
| new_config = {} | |||
| for key in secret_keys: | |||
| if key in decrypt_tracing_config: | |||
| new_config[key] = obfuscated_token(decrypt_tracing_config[key]) | |||
| for key in other_keys: | |||
| new_config[key] = decrypt_tracing_config.get(key, "") | |||
| return config_class(**new_config).model_dump() | |||
| @classmethod | |||
| def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): | |||
| """ | |||
| Get decrypted tracing config | |||
| :param app_id: app id | |||
| :param tracing_provider: tracing provider | |||
| :return: | |||
| """ | |||
| trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( | |||
| TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider | |||
| ).first() | |||
| if not trace_config_data: | |||
| return None | |||
| # decrypt_token | |||
| tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id | |||
| decrypt_tracing_config = cls.decrypt_tracing_config( | |||
| tenant_id, tracing_provider, trace_config_data.tracing_config | |||
| ) | |||
| return decrypt_tracing_config | |||
| @classmethod | |||
| def get_ops_trace_instance( | |||
| cls, | |||
| app_id: Optional[Union[UUID, str]] = None, | |||
| message_id: Optional[str] = None, | |||
| conversation_id: Optional[str] = None | |||
| ): | |||
| """ | |||
| Get ops trace through model config | |||
| :param app_id: app_id | |||
| :param message_id: message_id | |||
| :param conversation_id: conversation_id | |||
| :return: | |||
| """ | |||
| if conversation_id is not None: | |||
| conversation_data: Conversation = db.session.query(Conversation).filter( | |||
| Conversation.id == conversation_id | |||
| ).first() | |||
| if conversation_data: | |||
| app_id = conversation_data.app_id | |||
| if message_id is not None: | |||
| record: Message = db.session.query(Message).filter(Message.id == message_id).first() | |||
| app_id = record.app_id | |||
| if isinstance(app_id, UUID): | |||
| app_id = str(app_id) | |||
| if app_id is None: | |||
| return None | |||
| app: App = db.session.query(App).filter( | |||
| App.id == app_id | |||
| ).first() | |||
| app_ops_trace_config = json.loads(app.tracing) if app.tracing else None | |||
| if app_ops_trace_config is not None: | |||
| tracing_provider = app_ops_trace_config.get('tracing_provider') | |||
| else: | |||
| return None | |||
| # decrypt_token | |||
| decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) | |||
| if app_ops_trace_config.get('enabled'): | |||
| trace_instance, config_class = provider_config_map[tracing_provider]['trace_instance'], \ | |||
| provider_config_map[tracing_provider]['config_class'] | |||
| tracing_instance = trace_instance(config_class(**decrypt_trace_config)) | |||
| return tracing_instance | |||
| return None | |||
| @classmethod | |||
| def get_app_config_through_message_id(cls, message_id: str): | |||
| app_model_config = None | |||
| message_data = db.session.query(Message).filter(Message.id == message_id).first() | |||
| conversation_id = message_data.conversation_id | |||
| conversation_data = db.session.query(Conversation).filter(Conversation.id == conversation_id).first() | |||
| if conversation_data.app_model_config_id: | |||
| app_model_config = db.session.query(AppModelConfig).filter( | |||
| AppModelConfig.id == conversation_data.app_model_config_id | |||
| ).first() | |||
| elif conversation_data.app_model_config_id is None and conversation_data.override_model_configs: | |||
| app_model_config = conversation_data.override_model_configs | |||
| return app_model_config | |||
| @classmethod | |||
| def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: str): | |||
| """ | |||
| Update app tracing config | |||
| :param app_id: app id | |||
| :param enabled: enabled | |||
| :param tracing_provider: tracing provider | |||
| :return: | |||
| """ | |||
| # auth check | |||
| if tracing_provider not in provider_config_map.keys() and tracing_provider is not None: | |||
| raise ValueError(f"Invalid tracing provider: {tracing_provider}") | |||
| app_config: App = db.session.query(App).filter(App.id == app_id).first() | |||
| app_config.tracing = json.dumps( | |||
| { | |||
| "enabled": enabled, | |||
| "tracing_provider": tracing_provider, | |||
| } | |||
| ) | |||
| db.session.commit() | |||
| @classmethod | |||
| def get_app_tracing_config(cls, app_id: str): | |||
| """ | |||
| Get app tracing config | |||
| :param app_id: app id | |||
| :return: | |||
| """ | |||
| app: App = db.session.query(App).filter(App.id == app_id).first() | |||
| if not app.tracing: | |||
| return { | |||
| "enabled": False, | |||
| "tracing_provider": None | |||
| } | |||
| app_trace_config = json.loads(app.tracing) | |||
| return app_trace_config | |||
| @staticmethod | |||
| def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str): | |||
| """ | |||
| Check trace config is effective | |||
| :param tracing_config: tracing config | |||
| :param tracing_provider: tracing provider | |||
| :return: | |||
| """ | |||
| config_type, trace_instance = provider_config_map[tracing_provider]['config_class'], \ | |||
| provider_config_map[tracing_provider]['trace_instance'] | |||
| tracing_config = config_type(**tracing_config) | |||
| return trace_instance(tracing_config).api_check() | |||
| class TraceTaskName(str, Enum): | |||
| CONVERSATION_TRACE = 'conversation_trace' | |||
| WORKFLOW_TRACE = 'workflow_trace' | |||
| MESSAGE_TRACE = 'message_trace' | |||
| MODERATION_TRACE = 'moderation_trace' | |||
| SUGGESTED_QUESTION_TRACE = 'suggested_question_trace' | |||
| DATASET_RETRIEVAL_TRACE = 'dataset_retrieval_trace' | |||
| TOOL_TRACE = 'tool_trace' | |||
| GENERATE_NAME_TRACE = 'generate_name_trace' | |||
| class TraceTask: | |||
| def __init__( | |||
| self, | |||
| trace_type: Any, | |||
| message_id: Optional[str] = None, | |||
| workflow_run: Optional[WorkflowRun] = None, | |||
| conversation_id: Optional[str] = None, | |||
| timer: Optional[Any] = None, | |||
| **kwargs | |||
| ): | |||
| self.trace_type = trace_type | |||
| self.message_id = message_id | |||
| self.workflow_run = workflow_run | |||
| self.conversation_id = conversation_id | |||
| self.timer = timer | |||
| self.kwargs = kwargs | |||
| self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") | |||
| def execute(self, trace_instance: BaseTraceInstance): | |||
| method_name, trace_info = self.preprocess() | |||
| if trace_instance: | |||
| method = trace_instance.trace | |||
| method(trace_info) | |||
| def preprocess(self): | |||
| if self.trace_type == TraceTaskName.CONVERSATION_TRACE: | |||
| return TraceTaskName.CONVERSATION_TRACE, self.conversation_trace(**self.kwargs) | |||
| if self.trace_type == TraceTaskName.WORKFLOW_TRACE: | |||
| return TraceTaskName.WORKFLOW_TRACE, self.workflow_trace(self.workflow_run, self.conversation_id) | |||
| elif self.trace_type == TraceTaskName.MESSAGE_TRACE: | |||
| return TraceTaskName.MESSAGE_TRACE, self.message_trace(self.message_id) | |||
| elif self.trace_type == TraceTaskName.MODERATION_TRACE: | |||
| return TraceTaskName.MODERATION_TRACE, self.moderation_trace(self.message_id, self.timer, **self.kwargs) | |||
| elif self.trace_type == TraceTaskName.SUGGESTED_QUESTION_TRACE: | |||
| return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.suggested_question_trace( | |||
| self.message_id, self.timer, **self.kwargs | |||
| ) | |||
| elif self.trace_type == TraceTaskName.DATASET_RETRIEVAL_TRACE: | |||
| return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.dataset_retrieval_trace( | |||
| self.message_id, self.timer, **self.kwargs | |||
| ) | |||
| elif self.trace_type == TraceTaskName.TOOL_TRACE: | |||
| return TraceTaskName.TOOL_TRACE, self.tool_trace(self.message_id, self.timer, **self.kwargs) | |||
| elif self.trace_type == TraceTaskName.GENERATE_NAME_TRACE: | |||
| return TraceTaskName.GENERATE_NAME_TRACE, self.generate_name_trace( | |||
| self.conversation_id, self.timer, **self.kwargs | |||
| ) | |||
| else: | |||
| return '', {} | |||
| # process methods for different trace types | |||
| def conversation_trace(self, **kwargs): | |||
| return kwargs | |||
| def workflow_trace(self, workflow_run: WorkflowRun, conversation_id): | |||
| workflow_id = workflow_run.workflow_id | |||
| tenant_id = workflow_run.tenant_id | |||
| workflow_run_id = workflow_run.id | |||
| workflow_run_elapsed_time = workflow_run.elapsed_time | |||
| workflow_run_status = workflow_run.status | |||
| workflow_run_inputs = ( | |||
| json.loads(workflow_run.inputs) if workflow_run.inputs else {} | |||
| ) | |||
| workflow_run_outputs = ( | |||
| json.loads(workflow_run.outputs) if workflow_run.outputs else {} | |||
| ) | |||
| workflow_run_version = workflow_run.version | |||
| error = workflow_run.error if workflow_run.error else "" | |||
| total_tokens = workflow_run.total_tokens | |||
| file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] | |||
| query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" | |||
| # get workflow_app_log_id | |||
| workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(workflow_run_id=workflow_run.id).first() | |||
| workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None | |||
| # get message_id | |||
| message_data = db.session.query(Message.id).filter_by(workflow_run_id=workflow_run_id).first() | |||
| message_id = str(message_data.id) if message_data else None | |||
| metadata = { | |||
| "workflow_id": workflow_id, | |||
| "conversation_id": conversation_id, | |||
| "workflow_run_id": workflow_run_id, | |||
| "tenant_id": tenant_id, | |||
| "elapsed_time": workflow_run_elapsed_time, | |||
| "status": workflow_run_status, | |||
| "version": workflow_run_version, | |||
| "total_tokens": total_tokens, | |||
| "file_list": file_list, | |||
| "triggered_form": workflow_run.triggered_from, | |||
| } | |||
| workflow_trace_info = WorkflowTraceInfo( | |||
| workflow_data=workflow_run, | |||
| conversation_id=conversation_id, | |||
| workflow_id=workflow_id, | |||
| tenant_id=tenant_id, | |||
| workflow_run_id=workflow_run_id, | |||
| workflow_run_elapsed_time=workflow_run_elapsed_time, | |||
| workflow_run_status=workflow_run_status, | |||
| workflow_run_inputs=workflow_run_inputs, | |||
| workflow_run_outputs=workflow_run_outputs, | |||
| workflow_run_version=workflow_run_version, | |||
| error=error, | |||
| total_tokens=total_tokens, | |||
| file_list=file_list, | |||
| query=query, | |||
| metadata=metadata, | |||
| workflow_app_log_id=workflow_app_log_id, | |||
| message_id=message_id, | |||
| start_time=workflow_run.created_at, | |||
| end_time=workflow_run.finished_at, | |||
| ) | |||
| return workflow_trace_info | |||
| def message_trace(self, message_id): | |||
| message_data = get_message_data(message_id) | |||
| if not message_data: | |||
| return {} | |||
| conversation_mode = db.session.query(Conversation.mode).filter_by(id=message_data.conversation_id).first() | |||
| conversation_mode = conversation_mode[0] | |||
| created_at = message_data.created_at | |||
| inputs = message_data.message | |||
| # get message file data | |||
| message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() | |||
| file_list = [] | |||
| if message_file_data and message_file_data.url is not None: | |||
| file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" | |||
| file_list.append(file_url) | |||
| metadata = { | |||
| "conversation_id": message_data.conversation_id, | |||
| "ls_provider": message_data.model_provider, | |||
| "ls_model_name": message_data.model_id, | |||
| "status": message_data.status, | |||
| "from_end_user_id": message_data.from_account_id, | |||
| "from_account_id": message_data.from_account_id, | |||
| "agent_based": message_data.agent_based, | |||
| "workflow_run_id": message_data.workflow_run_id, | |||
| "from_source": message_data.from_source, | |||
| "message_id": message_id, | |||
| } | |||
| message_tokens = message_data.message_tokens | |||
| message_trace_info = MessageTraceInfo( | |||
| message_data=message_data, | |||
| conversation_model=conversation_mode, | |||
| message_tokens=message_tokens, | |||
| answer_tokens=message_data.answer_tokens, | |||
| total_tokens=message_tokens + message_data.answer_tokens, | |||
| error=message_data.error if message_data.error else "", | |||
| inputs=inputs, | |||
| outputs=message_data.answer, | |||
| file_list=file_list, | |||
| start_time=created_at, | |||
| end_time=created_at + timedelta(seconds=message_data.provider_response_latency), | |||
| metadata=metadata, | |||
| message_file_data=message_file_data, | |||
| conversation_mode=conversation_mode, | |||
| ) | |||
| return message_trace_info | |||
| def moderation_trace(self, message_id, timer, **kwargs): | |||
| moderation_result = kwargs.get("moderation_result") | |||
| inputs = kwargs.get("inputs") | |||
| message_data = get_message_data(message_id) | |||
| if not message_data: | |||
| return {} | |||
| metadata = { | |||
| "message_id": message_id, | |||
| "action": moderation_result.action, | |||
| "preset_response": moderation_result.preset_response, | |||
| "query": moderation_result.query, | |||
| } | |||
| # get workflow_app_log_id | |||
| workflow_app_log_id = None | |||
| if message_data.workflow_run_id: | |||
| workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by( | |||
| workflow_run_id=message_data.workflow_run_id | |||
| ).first() | |||
| workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None | |||
| moderation_trace_info = ModerationTraceInfo( | |||
| message_id=workflow_app_log_id if workflow_app_log_id else message_id, | |||
| inputs=inputs, | |||
| message_data=message_data, | |||
| flagged=moderation_result.flagged, | |||
| action=moderation_result.action, | |||
| preset_response=moderation_result.preset_response, | |||
| query=moderation_result.query, | |||
| start_time=timer.get("start"), | |||
| end_time=timer.get("end"), | |||
| metadata=metadata, | |||
| ) | |||
| return moderation_trace_info | |||
| def suggested_question_trace(self, message_id, timer, **kwargs): | |||
| suggested_question = kwargs.get("suggested_question") | |||
| message_data = get_message_data(message_id) | |||
| if not message_data: | |||
| return {} | |||
| metadata = { | |||
| "message_id": message_id, | |||
| "ls_provider": message_data.model_provider, | |||
| "ls_model_name": message_data.model_id, | |||
| "status": message_data.status, | |||
| "from_end_user_id": message_data.from_account_id, | |||
| "from_account_id": message_data.from_account_id, | |||
| "agent_based": message_data.agent_based, | |||
| "workflow_run_id": message_data.workflow_run_id, | |||
| "from_source": message_data.from_source, | |||
| } | |||
| # get workflow_app_log_id | |||
| workflow_app_log_id = None | |||
| if message_data.workflow_run_id: | |||
| workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by( | |||
| workflow_run_id=message_data.workflow_run_id | |||
| ).first() | |||
| workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None | |||
| suggested_question_trace_info = SuggestedQuestionTraceInfo( | |||
| message_id=workflow_app_log_id if workflow_app_log_id else message_id, | |||
| message_data=message_data, | |||
| inputs=message_data.message, | |||
| outputs=message_data.answer, | |||
| start_time=timer.get("start"), | |||
| end_time=timer.get("end"), | |||
| metadata=metadata, | |||
| total_tokens=message_data.message_tokens + message_data.answer_tokens, | |||
| status=message_data.status, | |||
| error=message_data.error, | |||
| from_account_id=message_data.from_account_id, | |||
| agent_based=message_data.agent_based, | |||
| from_source=message_data.from_source, | |||
| model_provider=message_data.model_provider, | |||
| model_id=message_data.model_id, | |||
| suggested_question=suggested_question, | |||
| level=message_data.status, | |||
| status_message=message_data.error, | |||
| ) | |||
| return suggested_question_trace_info | |||
| def dataset_retrieval_trace(self, message_id, timer, **kwargs): | |||
| documents = kwargs.get("documents") | |||
| message_data = get_message_data(message_id) | |||
| if not message_data: | |||
| return {} | |||
| metadata = { | |||
| "message_id": message_id, | |||
| "ls_provider": message_data.model_provider, | |||
| "ls_model_name": message_data.model_id, | |||
| "status": message_data.status, | |||
| "from_end_user_id": message_data.from_account_id, | |||
| "from_account_id": message_data.from_account_id, | |||
| "agent_based": message_data.agent_based, | |||
| "workflow_run_id": message_data.workflow_run_id, | |||
| "from_source": message_data.from_source, | |||
| } | |||
| dataset_retrieval_trace_info = DatasetRetrievalTraceInfo( | |||
| message_id=message_id, | |||
| inputs=message_data.query if message_data.query else message_data.inputs, | |||
| documents=documents, | |||
| start_time=timer.get("start"), | |||
| end_time=timer.get("end"), | |||
| metadata=metadata, | |||
| message_data=message_data, | |||
| ) | |||
| return dataset_retrieval_trace_info | |||
| def tool_trace(self, message_id, timer, **kwargs): | |||
| tool_name = kwargs.get('tool_name') | |||
| tool_inputs = kwargs.get('tool_inputs') | |||
| tool_outputs = kwargs.get('tool_outputs') | |||
| message_data = get_message_data(message_id) | |||
| if not message_data: | |||
| return {} | |||
| tool_config = {} | |||
| time_cost = 0 | |||
| error = None | |||
| tool_parameters = {} | |||
| created_time = message_data.created_at | |||
| end_time = message_data.updated_at | |||
| agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts | |||
| for agent_thought in agent_thoughts: | |||
| if tool_name in agent_thought.tools: | |||
| created_time = agent_thought.created_at | |||
| tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) | |||
| tool_config = tool_meta_data.get('tool_config', {}) | |||
| time_cost = tool_meta_data.get('time_cost', 0) | |||
| end_time = created_time + timedelta(seconds=time_cost) | |||
| error = tool_meta_data.get('error', "") | |||
| tool_parameters = tool_meta_data.get('tool_parameters', {}) | |||
| metadata = { | |||
| "message_id": message_id, | |||
| "tool_name": tool_name, | |||
| "tool_inputs": tool_inputs, | |||
| "tool_outputs": tool_outputs, | |||
| "tool_config": tool_config, | |||
| "time_cost": time_cost, | |||
| "error": error, | |||
| "tool_parameters": tool_parameters, | |||
| } | |||
| file_url = "" | |||
| message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() | |||
| if message_file_data: | |||
| message_file_id = message_file_data.id if message_file_data else None | |||
| type = message_file_data.type | |||
| created_by_role = message_file_data.created_by_role | |||
| created_user_id = message_file_data.created_by | |||
| file_url = f"{self.file_base_url}/{message_file_data.url}" | |||
| metadata.update( | |||
| { | |||
| "message_file_id": message_file_id, | |||
| "created_by_role": created_by_role, | |||
| "created_user_id": created_user_id, | |||
| "type": type, | |||
| } | |||
| ) | |||
| tool_trace_info = ToolTraceInfo( | |||
| message_id=message_id, | |||
| message_data=message_data, | |||
| tool_name=tool_name, | |||
| start_time=timer.get("start") if timer else created_time, | |||
| end_time=timer.get("end") if timer else end_time, | |||
| tool_inputs=tool_inputs, | |||
| tool_outputs=tool_outputs, | |||
| metadata=metadata, | |||
| message_file_data=message_file_data, | |||
| error=error, | |||
| inputs=message_data.message, | |||
| outputs=message_data.answer, | |||
| tool_config=tool_config, | |||
| time_cost=time_cost, | |||
| tool_parameters=tool_parameters, | |||
| file_url=file_url, | |||
| ) | |||
| return tool_trace_info | |||
| def generate_name_trace(self, conversation_id, timer, **kwargs): | |||
| generate_conversation_name = kwargs.get("generate_conversation_name") | |||
| inputs = kwargs.get("inputs") | |||
| tenant_id = kwargs.get("tenant_id") | |||
| start_time = timer.get("start") | |||
| end_time = timer.get("end") | |||
| metadata = { | |||
| "conversation_id": conversation_id, | |||
| "tenant_id": tenant_id, | |||
| } | |||
| generate_name_trace_info = GenerateNameTraceInfo( | |||
| conversation_id=conversation_id, | |||
| inputs=inputs, | |||
| outputs=generate_conversation_name, | |||
| start_time=start_time, | |||
| end_time=end_time, | |||
| metadata=metadata, | |||
| tenant_id=tenant_id, | |||
| ) | |||
| return generate_name_trace_info | |||
| class TraceQueueManager: | |||
| def __init__(self, app_id=None, conversation_id=None, message_id=None): | |||
| tracing_instance = OpsTraceManager.get_ops_trace_instance(app_id, conversation_id, message_id) | |||
| self.queue = queue.Queue() | |||
| self.is_running = True | |||
| self.thread = threading.Thread( | |||
| target=self.process_queue, kwargs={ | |||
| 'flask_app': current_app._get_current_object(), | |||
| 'trace_instance': tracing_instance | |||
| } | |||
| ) | |||
| self.thread.start() | |||
| def stop(self): | |||
| self.is_running = False | |||
| def process_queue(self, flask_app: Flask, trace_instance: BaseTraceInstance): | |||
| with flask_app.app_context(): | |||
| while self.is_running: | |||
| try: | |||
| task = self.queue.get(timeout=60) | |||
| task.execute(trace_instance) | |||
| self.queue.task_done() | |||
| except queue.Empty: | |||
| self.stop() | |||
| def add_trace_task(self, trace_task: TraceTask): | |||
| self.queue.put(trace_task) | |||
| @@ -0,0 +1,43 @@ | |||
| from contextlib import contextmanager | |||
| from datetime import datetime | |||
| from extensions.ext_database import db | |||
| from models.model import Message | |||
| def filter_none_values(data: dict): | |||
| for key, value in data.items(): | |||
| if value is None: | |||
| continue | |||
| if isinstance(value, datetime): | |||
| data[key] = value.isoformat() | |||
| return {key: value for key, value in data.items() if value is not None} | |||
| def get_message_data(message_id): | |||
| return db.session.query(Message).filter(Message.id == message_id).first() | |||
| @contextmanager | |||
| def measure_time(): | |||
| timing_info = {'start': datetime.now(), 'end': None} | |||
| try: | |||
| yield timing_info | |||
| finally: | |||
| timing_info['end'] = datetime.now() | |||
| print(f"Execution time: {timing_info['end'] - timing_info['start']}") | |||
| def replace_text_with_content(data): | |||
| if isinstance(data, dict): | |||
| new_data = {} | |||
| for key, value in data.items(): | |||
| if key == 'text': | |||
| new_data['content'] = value | |||
| else: | |||
| new_data[key] = replace_text_with_content(value) | |||
| return new_data | |||
| elif isinstance(data, list): | |||
| return [replace_text_with_content(item) for item in data] | |||
| else: | |||
| return data | |||
| @@ -12,6 +12,8 @@ from core.model_manager import ModelInstance, ModelManager | |||
| from core.model_runtime.entities.message_entities import PromptMessageTool | |||
| from core.model_runtime.entities.model_entities import ModelFeature, ModelType | |||
| from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel | |||
| from core.ops.ops_trace_manager import TraceTask, TraceTaskName | |||
| from core.ops.utils import measure_time | |||
| from core.rag.datasource.retrieval_service import RetrievalService | |||
| from core.rag.models.document import Document | |||
| from core.rag.rerank.rerank import RerankRunner | |||
| @@ -38,14 +40,20 @@ default_retrieval_model = { | |||
| class DatasetRetrieval: | |||
| def retrieve(self, app_id: str, user_id: str, tenant_id: str, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| config: DatasetEntity, | |||
| query: str, | |||
| invoke_from: InvokeFrom, | |||
| show_retrieve_source: bool, | |||
| hit_callback: DatasetIndexToolCallbackHandler, | |||
| memory: Optional[TokenBufferMemory] = None) -> Optional[str]: | |||
| def __init__(self, application_generate_entity=None): | |||
| self.application_generate_entity = application_generate_entity | |||
| def retrieve( | |||
| self, app_id: str, user_id: str, tenant_id: str, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| config: DatasetEntity, | |||
| query: str, | |||
| invoke_from: InvokeFrom, | |||
| show_retrieve_source: bool, | |||
| hit_callback: DatasetIndexToolCallbackHandler, | |||
| message_id: str, | |||
| memory: Optional[TokenBufferMemory] = None, | |||
| ) -> Optional[str]: | |||
| """ | |||
| Retrieve dataset. | |||
| :param app_id: app_id | |||
| @@ -57,6 +65,7 @@ class DatasetRetrieval: | |||
| :param invoke_from: invoke from | |||
| :param show_retrieve_source: show retrieve source | |||
| :param hit_callback: hit callback | |||
| :param message_id: message id | |||
| :param memory: memory | |||
| :return: | |||
| """ | |||
| @@ -113,15 +122,20 @@ class DatasetRetrieval: | |||
| all_documents = [] | |||
| user_from = 'account' if invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else 'end_user' | |||
| if retrieve_config.retrieve_strategy == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE: | |||
| all_documents = self.single_retrieve(app_id, tenant_id, user_id, user_from, available_datasets, query, | |||
| model_instance, | |||
| model_config, planning_strategy) | |||
| all_documents = self.single_retrieve( | |||
| app_id, tenant_id, user_id, user_from, available_datasets, query, | |||
| model_instance, | |||
| model_config, planning_strategy, message_id | |||
| ) | |||
| elif retrieve_config.retrieve_strategy == DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE: | |||
| all_documents = self.multiple_retrieve(app_id, tenant_id, user_id, user_from, | |||
| available_datasets, query, retrieve_config.top_k, | |||
| retrieve_config.score_threshold, | |||
| retrieve_config.reranking_model.get('reranking_provider_name'), | |||
| retrieve_config.reranking_model.get('reranking_model_name')) | |||
| all_documents = self.multiple_retrieve( | |||
| app_id, tenant_id, user_id, user_from, | |||
| available_datasets, query, retrieve_config.top_k, | |||
| retrieve_config.score_threshold, | |||
| retrieve_config.reranking_model.get('reranking_provider_name'), | |||
| retrieve_config.reranking_model.get('reranking_model_name'), | |||
| message_id, | |||
| ) | |||
| document_score_list = {} | |||
| for item in all_documents: | |||
| @@ -189,16 +203,18 @@ class DatasetRetrieval: | |||
| return str("\n".join(document_context_list)) | |||
| return '' | |||
| def single_retrieve(self, app_id: str, | |||
| tenant_id: str, | |||
| user_id: str, | |||
| user_from: str, | |||
| available_datasets: list, | |||
| query: str, | |||
| model_instance: ModelInstance, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| planning_strategy: PlanningStrategy, | |||
| ): | |||
| def single_retrieve( | |||
| self, app_id: str, | |||
| tenant_id: str, | |||
| user_id: str, | |||
| user_from: str, | |||
| available_datasets: list, | |||
| query: str, | |||
| model_instance: ModelInstance, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| planning_strategy: PlanningStrategy, | |||
| message_id: Optional[str] = None, | |||
| ): | |||
| tools = [] | |||
| for dataset in available_datasets: | |||
| description = dataset.description | |||
| @@ -251,27 +267,35 @@ class DatasetRetrieval: | |||
| if score_threshold_enabled: | |||
| score_threshold = retrieval_model_config.get("score_threshold") | |||
| results = RetrievalService.retrieve(retrival_method=retrival_method, dataset_id=dataset.id, | |||
| query=query, | |||
| top_k=top_k, score_threshold=score_threshold, | |||
| reranking_model=reranking_model) | |||
| with measure_time() as timer: | |||
| results = RetrievalService.retrieve( | |||
| retrival_method=retrival_method, dataset_id=dataset.id, | |||
| query=query, | |||
| top_k=top_k, score_threshold=score_threshold, | |||
| reranking_model=reranking_model | |||
| ) | |||
| self._on_query(query, [dataset_id], app_id, user_from, user_id) | |||
| if results: | |||
| self._on_retrival_end(results) | |||
| self._on_retrival_end(results, message_id, timer) | |||
| return results | |||
| return [] | |||
| def multiple_retrieve(self, | |||
| app_id: str, | |||
| tenant_id: str, | |||
| user_id: str, | |||
| user_from: str, | |||
| available_datasets: list, | |||
| query: str, | |||
| top_k: int, | |||
| score_threshold: float, | |||
| reranking_provider_name: str, | |||
| reranking_model_name: str): | |||
| def multiple_retrieve( | |||
| self, | |||
| app_id: str, | |||
| tenant_id: str, | |||
| user_id: str, | |||
| user_from: str, | |||
| available_datasets: list, | |||
| query: str, | |||
| top_k: int, | |||
| score_threshold: float, | |||
| reranking_provider_name: str, | |||
| reranking_model_name: str, | |||
| message_id: Optional[str] = None, | |||
| ): | |||
| threads = [] | |||
| all_documents = [] | |||
| dataset_ids = [dataset.id for dataset in available_datasets] | |||
| @@ -297,15 +321,23 @@ class DatasetRetrieval: | |||
| ) | |||
| rerank_runner = RerankRunner(rerank_model_instance) | |||
| all_documents = rerank_runner.run(query, all_documents, | |||
| score_threshold, | |||
| top_k) | |||
| with measure_time() as timer: | |||
| all_documents = rerank_runner.run( | |||
| query, all_documents, | |||
| score_threshold, | |||
| top_k | |||
| ) | |||
| self._on_query(query, dataset_ids, app_id, user_from, user_id) | |||
| if all_documents: | |||
| self._on_retrival_end(all_documents) | |||
| self._on_retrival_end(all_documents, message_id, timer) | |||
| return all_documents | |||
| def _on_retrival_end(self, documents: list[Document]) -> None: | |||
| def _on_retrival_end( | |||
| self, documents: list[Document], message_id: Optional[str] = None, timer: Optional[dict] = None | |||
| ) -> None: | |||
| """Handle retrival end.""" | |||
| for document in documents: | |||
| query = db.session.query(DocumentSegment).filter( | |||
| @@ -324,6 +356,18 @@ class DatasetRetrieval: | |||
| db.session.commit() | |||
| # get tracing instance | |||
| trace_manager = self.application_generate_entity.trace_manager if self.application_generate_entity else None | |||
| if trace_manager: | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.DATASET_RETRIEVAL_TRACE, | |||
| message_id=message_id, | |||
| documents=documents, | |||
| timer=timer | |||
| ) | |||
| ) | |||
| def _on_query(self, query: str, dataset_ids: list[str], app_id: str, user_from: str, user_id: str) -> None: | |||
| """ | |||
| Handle query. | |||
| @@ -31,9 +31,10 @@ class WorkflowTool(Tool): | |||
| :return: the tool provider type | |||
| """ | |||
| return ToolProviderType.WORKFLOW | |||
| def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) \ | |||
| -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: | |||
| def _invoke( | |||
| self, user_id: str, tool_parameters: dict[str, Any] | |||
| ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: | |||
| """ | |||
| invoke the tool | |||
| """ | |||
| @@ -2,7 +2,7 @@ import json | |||
| from copy import deepcopy | |||
| from datetime import datetime, timezone | |||
| from mimetypes import guess_type | |||
| from typing import Any, Union | |||
| from typing import Any, Optional, Union | |||
| from yarl import URL | |||
| @@ -10,6 +10,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom | |||
| from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler | |||
| from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler | |||
| from core.file.file_obj import FileTransferMethod | |||
| from core.ops.ops_trace_manager import TraceQueueManager | |||
| from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, ToolInvokeMeta, ToolParameter | |||
| from core.tools.errors import ( | |||
| ToolEngineInvokeError, | |||
| @@ -32,10 +33,12 @@ class ToolEngine: | |||
| Tool runtime engine take care of the tool executions. | |||
| """ | |||
| @staticmethod | |||
| def agent_invoke(tool: Tool, tool_parameters: Union[str, dict], | |||
| user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, | |||
| agent_tool_callback: DifyAgentCallbackHandler) \ | |||
| -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: | |||
| def agent_invoke( | |||
| tool: Tool, tool_parameters: Union[str, dict], | |||
| user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, | |||
| agent_tool_callback: DifyAgentCallbackHandler, | |||
| trace_manager: Optional[TraceQueueManager] = None | |||
| ) -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: | |||
| """ | |||
| Agent invokes the tool with the given arguments. | |||
| """ | |||
| @@ -83,9 +86,11 @@ class ToolEngine: | |||
| # hit the callback handler | |||
| agent_tool_callback.on_tool_end( | |||
| tool_name=tool.identity.name, | |||
| tool_inputs=tool_parameters, | |||
| tool_outputs=plain_text | |||
| tool_name=tool.identity.name, | |||
| tool_inputs=tool_parameters, | |||
| tool_outputs=plain_text, | |||
| message_id=message.id, | |||
| trace_manager=trace_manager | |||
| ) | |||
| # transform tool invoke message to get LLM friendly message | |||
| @@ -121,8 +126,8 @@ class ToolEngine: | |||
| def workflow_invoke(tool: Tool, tool_parameters: dict, | |||
| user_id: str, workflow_id: str, | |||
| workflow_tool_callback: DifyWorkflowCallbackHandler, | |||
| workflow_call_depth: int) \ | |||
| -> list[ToolInvokeMessage]: | |||
| workflow_call_depth: int, | |||
| ) -> list[ToolInvokeMessage]: | |||
| """ | |||
| Workflow invokes the tool with the given arguments. | |||
| """ | |||
| @@ -140,9 +145,9 @@ class ToolEngine: | |||
| # hit the callback handler | |||
| workflow_tool_callback.on_tool_end( | |||
| tool_name=tool.identity.name, | |||
| tool_inputs=tool_parameters, | |||
| tool_outputs=response | |||
| tool_name=tool.identity.name, | |||
| tool_inputs=tool_parameters, | |||
| tool_outputs=response, | |||
| ) | |||
| return response | |||
| @@ -66,44 +66,43 @@ class ParameterExtractorNode(LLMNode): | |||
| } | |||
| } | |||
| def _run(self, variable_pool: VariablePool) -> NodeRunResult: | |||
| """ | |||
| Run the node. | |||
| """ | |||
| node_data = cast(ParameterExtractorNodeData, self.node_data) | |||
| query = variable_pool.get_variable_value(node_data.query) | |||
| if not query: | |||
| raise ValueError("Query not found") | |||
| inputs={ | |||
| raise ValueError("Input variable content not found or is empty") | |||
| inputs = { | |||
| 'query': query, | |||
| 'parameters': jsonable_encoder(node_data.parameters), | |||
| 'instruction': jsonable_encoder(node_data.instruction), | |||
| } | |||
| model_instance, model_config = self._fetch_model_config(node_data.model) | |||
| if not isinstance(model_instance.model_type_instance, LargeLanguageModel): | |||
| raise ValueError("Model is not a Large Language Model") | |||
| llm_model = model_instance.model_type_instance | |||
| model_schema = llm_model.get_model_schema(model_config.model, model_config.credentials) | |||
| if not model_schema: | |||
| raise ValueError("Model schema not found") | |||
| # fetch memory | |||
| memory = self._fetch_memory(node_data.memory, variable_pool, model_instance) | |||
| if set(model_schema.features or []) & set([ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL]) \ | |||
| and node_data.reasoning_mode == 'function_call': | |||
| and node_data.reasoning_mode == 'function_call': | |||
| # use function call | |||
| prompt_messages, prompt_message_tools = self._generate_function_call_prompt( | |||
| node_data, query, variable_pool, model_config, memory | |||
| ) | |||
| else: | |||
| # use prompt engineering | |||
| prompt_messages = self._generate_prompt_engineering_prompt(node_data, query, variable_pool, model_config, memory) | |||
| prompt_messages = self._generate_prompt_engineering_prompt(node_data, query, variable_pool, model_config, | |||
| memory) | |||
| prompt_message_tools = [] | |||
| process_data = { | |||
| @@ -202,7 +201,7 @@ class ParameterExtractorNode(LLMNode): | |||
| # handle invoke result | |||
| if not isinstance(invoke_result, LLMResult): | |||
| raise ValueError(f"Invalid invoke result: {invoke_result}") | |||
| text = invoke_result.message.content | |||
| usage = invoke_result.usage | |||
| tool_call = invoke_result.message.tool_calls[0] if invoke_result.message.tool_calls else None | |||
| @@ -212,21 +211,23 @@ class ParameterExtractorNode(LLMNode): | |||
| return text, usage, tool_call | |||
| def _generate_function_call_prompt(self, | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> tuple[list[PromptMessage], list[PromptMessageTool]]: | |||
| def _generate_function_call_prompt(self, | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> tuple[list[PromptMessage], list[PromptMessageTool]]: | |||
| """ | |||
| Generate function call prompt. | |||
| """ | |||
| query = FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE.format(content=query, structure=json.dumps(node_data.get_parameter_json_schema())) | |||
| query = FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE.format(content=query, structure=json.dumps( | |||
| node_data.get_parameter_json_schema())) | |||
| prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) | |||
| rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') | |||
| prompt_template = self._get_function_calling_prompt_template(node_data, query, variable_pool, memory, rest_token) | |||
| prompt_template = self._get_function_calling_prompt_template(node_data, query, variable_pool, memory, | |||
| rest_token) | |||
| prompt_messages = prompt_transform.get_prompt( | |||
| prompt_template=prompt_template, | |||
| inputs={}, | |||
| @@ -259,8 +260,8 @@ class ParameterExtractorNode(LLMNode): | |||
| function=AssistantPromptMessage.ToolCall.ToolCallFunction( | |||
| name=example['assistant']['function_call']['name'], | |||
| arguments=json.dumps(example['assistant']['function_call']['parameters'] | |||
| ) | |||
| )) | |||
| ) | |||
| )) | |||
| ] | |||
| ), | |||
| ToolPromptMessage( | |||
| @@ -273,8 +274,8 @@ class ParameterExtractorNode(LLMNode): | |||
| ]) | |||
| prompt_messages = prompt_messages[:last_user_message_idx] + \ | |||
| example_messages + prompt_messages[last_user_message_idx:] | |||
| example_messages + prompt_messages[last_user_message_idx:] | |||
| # generate tool | |||
| tool = PromptMessageTool( | |||
| name=FUNCTION_CALLING_EXTRACTOR_NAME, | |||
| @@ -284,13 +285,13 @@ class ParameterExtractorNode(LLMNode): | |||
| return prompt_messages, [tool] | |||
| def _generate_prompt_engineering_prompt(self, | |||
| data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| def _generate_prompt_engineering_prompt(self, | |||
| data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| """ | |||
| Generate prompt engineering prompt. | |||
| """ | |||
| @@ -308,18 +309,19 @@ class ParameterExtractorNode(LLMNode): | |||
| raise ValueError(f"Invalid model mode: {model_mode}") | |||
| def _generate_prompt_engineering_completion_prompt(self, | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| """ | |||
| Generate completion prompt. | |||
| """ | |||
| prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) | |||
| rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') | |||
| prompt_template = self._get_prompt_engineering_prompt_template(node_data, query, variable_pool, memory, rest_token) | |||
| prompt_template = self._get_prompt_engineering_prompt_template(node_data, query, variable_pool, memory, | |||
| rest_token) | |||
| prompt_messages = prompt_transform.get_prompt( | |||
| prompt_template=prompt_template, | |||
| inputs={ | |||
| @@ -336,23 +338,23 @@ class ParameterExtractorNode(LLMNode): | |||
| return prompt_messages | |||
| def _generate_prompt_engineering_chat_prompt(self, | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| node_data: ParameterExtractorNodeData, | |||
| query: str, | |||
| variable_pool: VariablePool, | |||
| model_config: ModelConfigWithCredentialsEntity, | |||
| memory: Optional[TokenBufferMemory], | |||
| ) -> list[PromptMessage]: | |||
| """ | |||
| Generate chat prompt. | |||
| """ | |||
| prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) | |||
| rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') | |||
| prompt_template = self._get_prompt_engineering_prompt_template( | |||
| node_data, | |||
| node_data, | |||
| CHAT_GENERATE_JSON_USER_MESSAGE_TEMPLATE.format( | |||
| structure=json.dumps(node_data.get_parameter_json_schema()), | |||
| text=query | |||
| ), | |||
| ), | |||
| variable_pool, memory, rest_token | |||
| ) | |||
| @@ -387,7 +389,7 @@ class ParameterExtractorNode(LLMNode): | |||
| ]) | |||
| prompt_messages = prompt_messages[:last_user_message_idx] + \ | |||
| example_messages + prompt_messages[last_user_message_idx:] | |||
| example_messages + prompt_messages[last_user_message_idx:] | |||
| return prompt_messages | |||
| @@ -397,23 +399,23 @@ class ParameterExtractorNode(LLMNode): | |||
| """ | |||
| if len(data.parameters) != len(result): | |||
| raise ValueError("Invalid number of parameters") | |||
| for parameter in data.parameters: | |||
| if parameter.required and parameter.name not in result: | |||
| raise ValueError(f"Parameter {parameter.name} is required") | |||
| if parameter.type == 'select' and parameter.options and result.get(parameter.name) not in parameter.options: | |||
| raise ValueError(f"Invalid `select` value for parameter {parameter.name}") | |||
| if parameter.type == 'number' and not isinstance(result.get(parameter.name), int | float): | |||
| raise ValueError(f"Invalid `number` value for parameter {parameter.name}") | |||
| if parameter.type == 'bool' and not isinstance(result.get(parameter.name), bool): | |||
| raise ValueError(f"Invalid `bool` value for parameter {parameter.name}") | |||
| if parameter.type == 'string' and not isinstance(result.get(parameter.name), str): | |||
| raise ValueError(f"Invalid `string` value for parameter {parameter.name}") | |||
| if parameter.type.startswith('array'): | |||
| if not isinstance(result.get(parameter.name), list): | |||
| raise ValueError(f"Invalid `array` value for parameter {parameter.name}") | |||
| @@ -499,6 +501,7 @@ class ParameterExtractorNode(LLMNode): | |||
| """ | |||
| Extract complete json response. | |||
| """ | |||
| def extract_json(text): | |||
| """ | |||
| From a given JSON started from '{' or '[' extract the complete JSON object. | |||
| @@ -515,11 +518,11 @@ class ParameterExtractorNode(LLMNode): | |||
| if (c == '}' and stack[-1] == '{') or (c == ']' and stack[-1] == '['): | |||
| stack.pop() | |||
| if not stack: | |||
| return text[:i+1] | |||
| return text[:i + 1] | |||
| else: | |||
| return text[:i] | |||
| return None | |||
| # extract json from the text | |||
| for idx in range(len(result)): | |||
| if result[idx] == '{' or result[idx] == '[': | |||
| @@ -536,9 +539,9 @@ class ParameterExtractorNode(LLMNode): | |||
| """ | |||
| if not tool_call or not tool_call.function.arguments: | |||
| return None | |||
| return json.loads(tool_call.function.arguments) | |||
| def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict: | |||
| """ | |||
| Generate default result. | |||
| @@ -551,7 +554,7 @@ class ParameterExtractorNode(LLMNode): | |||
| result[parameter.name] = False | |||
| elif parameter.type in ['string', 'select']: | |||
| result[parameter.name] = '' | |||
| return result | |||
| def _render_instruction(self, instruction: str, variable_pool: VariablePool) -> str: | |||
| @@ -562,13 +565,13 @@ class ParameterExtractorNode(LLMNode): | |||
| inputs = {} | |||
| for selector in variable_template_parser.extract_variable_selectors(): | |||
| inputs[selector.variable] = variable_pool.get_variable_value(selector.value_selector) | |||
| return variable_template_parser.format(inputs) | |||
| def _get_function_calling_prompt_template(self, node_data: ParameterExtractorNodeData, query: str, | |||
| variable_pool: VariablePool, | |||
| memory: Optional[TokenBufferMemory], | |||
| max_token_limit: int = 2000) \ | |||
| variable_pool: VariablePool, | |||
| memory: Optional[TokenBufferMemory], | |||
| max_token_limit: int = 2000) \ | |||
| -> list[ChatModelMessage]: | |||
| model_mode = ModelMode.value_of(node_data.model.mode) | |||
| input_text = query | |||
| @@ -590,12 +593,12 @@ class ParameterExtractorNode(LLMNode): | |||
| return [system_prompt_messages, user_prompt_message] | |||
| else: | |||
| raise ValueError(f"Model mode {model_mode} not support.") | |||
| def _get_prompt_engineering_prompt_template(self, node_data: ParameterExtractorNodeData, query: str, | |||
| variable_pool: VariablePool, | |||
| memory: Optional[TokenBufferMemory], | |||
| max_token_limit: int = 2000) \ | |||
| -> list[ChatModelMessage]: | |||
| -> list[ChatModelMessage]: | |||
| model_mode = ModelMode.value_of(node_data.model.mode) | |||
| input_text = query | |||
| @@ -620,8 +623,8 @@ class ParameterExtractorNode(LLMNode): | |||
| text=COMPLETION_GENERATE_JSON_PROMPT.format(histories=memory_str, | |||
| text=input_text, | |||
| instruction=instruction) | |||
| .replace('{γγγ', '') | |||
| .replace('}γγγ', '') | |||
| .replace('{γγγ', '') | |||
| .replace('}γγγ', '') | |||
| ) | |||
| else: | |||
| raise ValueError(f"Model mode {model_mode} not support.") | |||
| @@ -635,7 +638,7 @@ class ParameterExtractorNode(LLMNode): | |||
| model_instance, model_config = self._fetch_model_config(node_data.model) | |||
| if not isinstance(model_instance.model_type_instance, LargeLanguageModel): | |||
| raise ValueError("Model is not a Large Language Model") | |||
| llm_model = model_instance.model_type_instance | |||
| model_schema = llm_model.get_model_schema(model_config.model, model_config.credentials) | |||
| if not model_schema: | |||
| @@ -667,7 +670,7 @@ class ParameterExtractorNode(LLMNode): | |||
| model_config.model, | |||
| model_config.credentials, | |||
| prompt_messages | |||
| ) + 1000 # add 1000 to ensure tool call messages | |||
| ) + 1000 # add 1000 to ensure tool call messages | |||
| max_tokens = 0 | |||
| for parameter_rule in model_config.model_schema.parameter_rules: | |||
| @@ -680,8 +683,9 @@ class ParameterExtractorNode(LLMNode): | |||
| rest_tokens = max(rest_tokens, 0) | |||
| return rest_tokens | |||
| def _fetch_model_config(self, node_data_model: ModelConfig) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]: | |||
| def _fetch_model_config(self, node_data_model: ModelConfig) -> tuple[ | |||
| ModelInstance, ModelConfigWithCredentialsEntity]: | |||
| """ | |||
| Fetch model config. | |||
| """ | |||
| @@ -689,9 +693,10 @@ class ParameterExtractorNode(LLMNode): | |||
| self._model_instance, self._model_config = super()._fetch_model_config(node_data_model) | |||
| return self._model_instance, self._model_config | |||
| @classmethod | |||
| def _extract_variable_selector_to_variable_mapping(cls, node_data: ParameterExtractorNodeData) -> dict[str, list[str]]: | |||
| def _extract_variable_selector_to_variable_mapping(cls, node_data: ParameterExtractorNodeData) -> dict[ | |||
| str, list[str]]: | |||
| """ | |||
| Extract variable selector to variable mapping | |||
| :param node_data: node data | |||
| @@ -708,4 +713,4 @@ class ParameterExtractorNode(LLMNode): | |||
| for selector in variable_template_parser.extract_variable_selectors(): | |||
| variable_mapping[selector.variable] = selector.value_selector | |||
| return variable_mapping | |||
| return variable_mapping | |||
| @@ -50,6 +50,7 @@ app_detail_fields = { | |||
| 'enable_site': fields.Boolean, | |||
| 'enable_api': fields.Boolean, | |||
| 'model_config': fields.Nested(model_config_fields, attribute='app_model_config', allow_null=True), | |||
| 'tracing': fields.Raw, | |||
| 'created_at': TimestampField | |||
| } | |||
| @@ -0,0 +1,49 @@ | |||
| """update AppModelConfig and add table TracingAppConfig | |||
| Revision ID: 04c602f5dc9b | |||
| Revises: 4e99a8df00ff | |||
| Create Date: 2024-06-12 07:49:07.666510 | |||
| """ | |||
| import sqlalchemy as sa | |||
| from alembic import op | |||
| import models as models | |||
| # revision identifiers, used by Alembic. | |||
| revision = '04c602f5dc9b' | |||
| down_revision = '4ff534e1eb11' | |||
| branch_labels = None | |||
| depends_on = None | |||
| def upgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| op.create_table('tracing_app_configs', | |||
| sa.Column('id', models.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), | |||
| sa.Column('app_id', models.StringUUID(), nullable=False), | |||
| sa.Column('tracing_provider', sa.String(length=255), nullable=True), | |||
| sa.Column('tracing_config', sa.JSON(), nullable=True), | |||
| sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), | |||
| sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), | |||
| sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') | |||
| ) | |||
| with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: | |||
| batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) | |||
| with op.batch_alter_table('app_model_configs', schema=None) as batch_op: | |||
| batch_op.add_column(sa.Column('trace_config', sa.Text(), nullable=True)) | |||
| # ### end Alembic commands ### | |||
| def downgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| with op.batch_alter_table('app_model_configs', schema=None) as batch_op: | |||
| batch_op.drop_column('trace_config') | |||
| with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: | |||
| batch_op.drop_index('tracing_app_config_app_id_idx') | |||
| op.drop_table('tracing_app_configs') | |||
| # ### end Alembic commands ### | |||
| @@ -0,0 +1,39 @@ | |||
| """add app tracing | |||
| Revision ID: 2a3aebbbf4bb | |||
| Revises: c031d46af369 | |||
| Create Date: 2024-06-17 10:08:54.803701 | |||
| """ | |||
| import sqlalchemy as sa | |||
| from alembic import op | |||
| import models as models | |||
| # revision identifiers, used by Alembic. | |||
| revision = '2a3aebbbf4bb' | |||
| down_revision = 'c031d46af369' | |||
| branch_labels = None | |||
| depends_on = None | |||
| def upgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| with op.batch_alter_table('apps', schema=None) as batch_op: | |||
| batch_op.add_column(sa.Column('tracing', sa.Text(), nullable=True)) | |||
| with op.batch_alter_table('trace_app_config', schema=None) as batch_op: | |||
| batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) | |||
| # ### end Alembic commands ### | |||
| def downgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| with op.batch_alter_table('trace_app_config', schema=None) as batch_op: | |||
| batch_op.drop_index('tracing_app_config_app_id_idx') | |||
| with op.batch_alter_table('apps', schema=None) as batch_op: | |||
| batch_op.drop_column('tracing') | |||
| # ### end Alembic commands ### | |||
| @@ -0,0 +1,66 @@ | |||
| """remove app model config trace config and rename trace app config | |||
| Revision ID: c031d46af369 | |||
| Revises: 04c602f5dc9b | |||
| Create Date: 2024-06-17 10:01:00.255189 | |||
| """ | |||
| import sqlalchemy as sa | |||
| from alembic import op | |||
| from sqlalchemy.dialects import postgresql | |||
| import models as models | |||
| # revision identifiers, used by Alembic. | |||
| revision = 'c031d46af369' | |||
| down_revision = '04c602f5dc9b' | |||
| branch_labels = None | |||
| depends_on = None | |||
| def upgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| op.create_table('trace_app_config', | |||
| sa.Column('id', models.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), | |||
| sa.Column('app_id', models.StringUUID(), nullable=False), | |||
| sa.Column('tracing_provider', sa.String(length=255), nullable=True), | |||
| sa.Column('tracing_config', sa.JSON(), nullable=True), | |||
| sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), | |||
| sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), | |||
| sa.Column('is_active', sa.Boolean(), server_default=sa.text('true'), nullable=False), | |||
| sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') | |||
| ) | |||
| with op.batch_alter_table('trace_app_config', schema=None) as batch_op: | |||
| batch_op.create_index('trace_app_config_app_id_idx', ['app_id'], unique=False) | |||
| with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: | |||
| batch_op.drop_index('tracing_app_config_app_id_idx') | |||
| with op.batch_alter_table('app_model_configs', schema=None) as batch_op: | |||
| batch_op.drop_column('trace_config') | |||
| # ### end Alembic commands ### | |||
| def downgrade(): | |||
| # ### commands auto generated by Alembic - please adjust! ### | |||
| with op.batch_alter_table('app_model_configs', schema=None) as batch_op: | |||
| batch_op.add_column(sa.Column('trace_config', sa.TEXT(), autoincrement=False, nullable=True)) | |||
| op.create_table('tracing_app_configs', | |||
| sa.Column('id', sa.UUID(), server_default=sa.text('uuid_generate_v4()'), autoincrement=False, nullable=False), | |||
| sa.Column('app_id', sa.UUID(), autoincrement=False, nullable=False), | |||
| sa.Column('tracing_provider', sa.VARCHAR(length=255), autoincrement=False, nullable=True), | |||
| sa.Column('tracing_config', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), | |||
| sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), | |||
| sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), | |||
| sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') | |||
| ) | |||
| with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: | |||
| batch_op.create_index('trace_app_config_app_id_idx', ['app_id'], unique=False) | |||
| with op.batch_alter_table('trace_app_config', schema=None) as batch_op: | |||
| batch_op.drop_index('trace_app_config_app_id_idx') | |||
| op.drop_table('trace_app_config') | |||
| # ### end Alembic commands ### | |||
| @@ -6,7 +6,7 @@ from typing import Optional | |||
| from flask import current_app, request | |||
| from flask_login import UserMixin | |||
| from sqlalchemy import Float, text | |||
| from sqlalchemy import Float, func, text | |||
| from core.file.tool_file_parser import ToolFileParser | |||
| from core.file.upload_file_parser import UploadFileParser | |||
| @@ -73,6 +73,7 @@ class App(db.Model): | |||
| is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) | |||
| is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) | |||
| is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) | |||
| tracing = db.Column(db.Text, nullable=True) | |||
| created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) | |||
| updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) | |||
| @@ -1328,3 +1329,38 @@ class TagBinding(db.Model): | |||
| target_id = db.Column(StringUUID, nullable=True) | |||
| created_by = db.Column(StringUUID, nullable=False) | |||
| created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) | |||
| class TraceAppConfig(db.Model): | |||
| __tablename__ = 'trace_app_config' | |||
| __table_args__ = ( | |||
| db.PrimaryKeyConstraint('id', name='tracing_app_config_pkey'), | |||
| db.Index('tracing_app_config_app_id_idx', 'app_id'), | |||
| ) | |||
| id = db.Column(StringUUID, server_default=db.text('uuid_generate_v4()')) | |||
| app_id = db.Column(StringUUID, nullable=False) | |||
| tracing_provider = db.Column(db.String(255), nullable=True) | |||
| tracing_config = db.Column(db.JSON, nullable=True) | |||
| created_at = db.Column(db.DateTime, nullable=False, server_default=func.now()) | |||
| updated_at = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now()) | |||
| is_active = db.Column(db.Boolean, nullable=False, server_default=db.text('true')) | |||
| @property | |||
| def tracing_config_dict(self): | |||
| return self.tracing_config if self.tracing_config else {} | |||
| @property | |||
| def tracing_config_str(self): | |||
| return json.dumps(self.tracing_config_dict) | |||
| def to_dict(self): | |||
| return { | |||
| 'id': self.id, | |||
| 'app_id': self.app_id, | |||
| 'tracing_provider': self.tracing_provider, | |||
| 'tracing_config': self.tracing_config_dict, | |||
| "is_active": self.is_active, | |||
| "created_at": self.created_at.__str__() if self.created_at else None, | |||
| 'updated_at': self.updated_at.__str__() if self.updated_at else None, | |||
| } | |||
| @@ -572,53 +572,54 @@ crt = ["awscrt (==0.19.12)"] | |||
| [[package]] | |||
| name = "bottleneck" | |||
| version = "1.4.0" | |||
| version = "1.3.8" | |||
| description = "Fast NumPy array functions written in C" | |||
| optional = false | |||
| python-versions = "*" | |||
| files = [ | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2110af22aa8c2779faba8aa021d6b559df04449bdf21d510eacd7910934189fe"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:381cbd1e52338fcdf9ff01c962e6aa187b2d8b3b369d42e779b6d33ac61f8d35"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a91e40bbb8452e77772614d882be2c34b3b514d9f15460f703293525a6e173d"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:59604949aea476f5075b965129eaa3c2d90891fd43b0dfaf2ad7621bb5db14a5"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c2c92545e1bc8e859d8d137aefa3b24843bd374b17c9814dafa3bbcea9fc4ec0"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-win32.whl", hash = "sha256:f63e79bfa2f82a7432c8b147ed321d01ca7769bc17cc04644286a4ce58d30549"}, | |||
| {file = "Bottleneck-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:d69907d8d679cb5091a3f479c46bf1076f149f6311ff3298bac5089b86a2fab1"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67347b0f01f32a232a6269c37afc1c079e08f6455fa12e91f4a1cd12eb0d11a5"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1490348b3bbc0225523dc2c00c6bb3e66168c537d62797bd29783c0826c09838"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a704165552496cbcc8bcc5921bb679fd6fa66bb1e758888de091b1223231c9f0"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ffb4e4edf7997069719b9269926cc00a2a12c6e015422d1ebc2f621c4541396a"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5d6bf45ed58d5e7414c0011ef2da75474fe597a51970df83596b0bcb79c14c5e"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-win32.whl", hash = "sha256:ed209f8f3cb9954773764b0fa2510a7a9247ad245593187ac90bd0747771bc5c"}, | |||
| {file = "Bottleneck-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53f1a72b12cfd76b56934c33bc0cb7c1a295f23a2d3ffba8c764514c9b5e0ff"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e720ff24370324c84a82b1a18195274715c23181748b2b9e3dacad24198ca06f"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44305c70c2a1539b0ae968e033f301ad868a6146b47e3cccd73fdfe3fc07c4ee"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4dac5d2a871b7bd296c2b92426daa27d5b07aa84ef2557db097d29135da4eb"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fbcdd01db9e27741fb16a02b720cf02389d4b0b99cefe3c834c7df88c2d7412d"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:14b3334a39308fbb05dacd35ac100842aa9e9bc70afbdcebe43e46179d183fd0"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-win32.whl", hash = "sha256:520d7a83cd48b3f58e5df1a258acb547f8a5386a8c21ca9e1058d83a0d622fdf"}, | |||
| {file = "Bottleneck-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b1339b9ad3ee217253f246cde5c3789eb527cf9dd31ff0a1f5a8bf7fc89eadad"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2749602200aaa0e12a0f3f936dd6d4035384ad10d3acf7ac4f418c501683397"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb79a2ac135567694f13339f0bebcee96aec09c596b324b61cd7fd5e306f49d"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c6097bf39723e76ff5bba160daab92ae599df212c859db8d46648548584d04a8"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b5f72b66ccc0272de46b67346cf8490737ba2adc6a302664f5326e7741b6d5ab"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-win32.whl", hash = "sha256:9903f017b9d6f2f69ce241b424ddad7265624f64dc6eafbe257d45661febf8bd"}, | |||
| {file = "Bottleneck-1.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:834816c316ad184cae7ecb615b69876a42cd2cafb07ee66c57a9c1ccacb63339"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03c43150f180d86a5633a6da788660d335983f6798fca306ba7f47ff27a1b7e7"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea333dbcadb780356c54f5c4fa7754f143573b57508fff43d5daf63298eb26a"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6179791c0119aec3708ef74ddadab8d183e3742adb93a9028718e8696bdf572b"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:220b72405f77aebb0137b733b464c2526ded471e4289ac1e840bab8852759a55"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8746f0f727997ce4c7457dc1fec4e4e3c0fdd8803514baa3d1c4ea6515ab04b2"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-win32.whl", hash = "sha256:6a36280ee33d9db799163f04e88b950261e590cc71d089f5e179b21680b5d491"}, | |||
| {file = "Bottleneck-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:de17e012694e6a987bb4eb050dd7f0cf939195a8e00cb23aa93ebee5fd5e64a8"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28260197ab8a4a6b7adf810523147b1a3e85607f4e26a0f685eb9d155cfc75af"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90d5d188a0cca0b9655ff2904ee61e7f183079e97550be98c2541a2eec358a72"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2861ff645d236f1a6f5c6d1ddb3db37d19af1d91057bdc4fd7b76299a15b3079"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6136ce7dcf825c432a20b80ab1c460264a437d8430fff32536176147e0b6b832"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:889e6855b77345622b4ba927335d3118745d590492941f5f78554f157d259e92"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-win32.whl", hash = "sha256:817aa43a671ede696ea023d8f35839a391244662340cc95a0f46965dda8b35cf"}, | |||
| {file = "Bottleneck-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:23834d82177d6997f21fa63156550668cd07a9a6e5a1b66ea80f1a14ac6ffd07"}, | |||
| {file = "bottleneck-1.4.0.tar.gz", hash = "sha256:beb36df519b8709e7d357c0c9639b03b885ca6355bbf5e53752c685de51605b8"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:865c8ed5b798c0198b0b80553e09cc0d890c4f5feb3d81d31661517ca7819fa3"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d073a31e259d40b25e29dbba80f73abf38afe98fd730c79dad7edd9a0ad6cff5"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b806b277ab47495032822f55f43b8d336e4b7e73f8506ed34d3ea3da6d644abc"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:770b517609916adeb39d3b1a386a29bc316da03dd61e7ee6e8a38325b80cc327"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2948502b0394ee419945b55b092585222a505c61d41a874c741be49f2cac056f"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-win32.whl", hash = "sha256:271b6333522beb8aee32e640ba49a2064491d2c10317baa58a5996be3dd443e4"}, | |||
| {file = "Bottleneck-1.3.8-cp310-cp310-win_amd64.whl", hash = "sha256:d41000ea7ca196b5fd39d6fccd34bf0704c8831731cedd2da2dcae3c6ac49c42"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0a7f454394cd3642498b6e077e70f4a6b9fd46a8eb908c83ac737fdc9f9a98c"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c4ea8b9024dcb4e83b5c118a3c8faa863ace2ad572849da548a74a8ee4e8f2a"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f40724b6e965ff5b88b333d4a10097b1629e60c0db21bb3d08c24d7b1a904a16"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4bd7183b8dcca89d0e65abe4507c19667dd31dacfbcc8ed705bad642f26a46e1"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20aa31a7d9d747c499ace1610a6e1f7aba6e3d4a9923e0312f6b4b6d68a59af3"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-win32.whl", hash = "sha256:350520105d9449e6565b3f0c4ce1f80a0b3e4d63695ebbf29db41f62e13f6461"}, | |||
| {file = "Bottleneck-1.3.8-cp311-cp311-win_amd64.whl", hash = "sha256:167a278902775defde7dfded6e98e3707dfe54971ffd9aec25c43bc74e4e381a"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c6e93ed45c6c83392f73d0333b310b38772df7eb78c120c1447245691bdedaf4"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3400f47dda0196b5af50b0b0678e33cc8c42e52e55ae0a63cdfed60725659bc"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fba5fd1805c71b2eeea50bea93d59be449c4af23ebd8da5f75fd74fd0331e314"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:60139c5c3d2a9c1454a04af5ee981a9f56548d27fa36f264069b149a6e9b01ed"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:99fab17fa26c811ccad63e208314726e718ae6605314329eca09641954550523"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-win32.whl", hash = "sha256:d3ae2bb5d4168912e438e377cc1301fa01df949ba59cd86317b3e00404fd4a97"}, | |||
| {file = "Bottleneck-1.3.8-cp312-cp312-win_amd64.whl", hash = "sha256:bcba1d5d5328c50f94852ab521fcb26f35d9e0ccd928d120d56455d1a5bb743f"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8d01fd5389d3160d54619119987ac24b020fa6810b7b398fff4945892237b3da"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca25f0003ef65264942f6306d793e0f270ece8b406c5a293dfc7d878146e9f8"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7763cf1516fa388c3587d12182fc1bc1c8089eab1a0a1bf09761f4c41af73c"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:38837c022350e2a656453f0e448416b7108cf67baccf11d04a0b3b70a48074dd"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ca5e741fae1c1796744dbdd0d2c1789cb74dd79c12ea8ec5834f83430f8520"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-win32.whl", hash = "sha256:f4dfc22a3450227e692ef2ff4657639c33eec88ad04ee3ce29d1a23a4942da24"}, | |||
| {file = "Bottleneck-1.3.8-cp37-cp37m-win_amd64.whl", hash = "sha256:90b87eed152bbd760c4eb11473c2cf036abdb26e2f84caeb00787da74fb08c40"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54a1b5d9d63b2d9f2955f8542eea26c418f97873e0abf86ca52beea0208c9306"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:019dd142d1e870388fb0b649213a0d8e569cce784326e183deba8f17826edd9f"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b5ed34a540eb7df59f45da659af9f792306637de1c69c95f020294f3b9fc4a8"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b69fcd4d818bcf9d53497d8accd0d5f852a447728baaa33b9b7168f8c4221d06"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:02616a830bd477f5ba51103396092da4b9d83cea2e88f5b8069e3f4f7b796704"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-win32.whl", hash = "sha256:93d359fb83eb3bdd6635ef6e64835c38ffdc211441fc190549f286e6af98b5f6"}, | |||
| {file = "Bottleneck-1.3.8-cp38-cp38-win_amd64.whl", hash = "sha256:51c8bb3dffeb72c14f0382b80de76eabac6726d316babbd48f7e4056267d7910"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:84453548b0f722c3be912ce3c6b685917fea842bf1252eeb63714a2c1fd1ffc9"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92700867504a213cafa9b8d9be529bd6e18dc83366b2ba00e86e80769b93f678"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fadfd2f3931fdff42f4b9867eb02ed7c662d01e6099ff6b347b6ced791450651"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:cfbc4a3a934b677bfbc37ac8757c4e1264a76262b774259bd3fa8a265dbd668b"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3c74c18f86a1ffac22280b005df8bb8a58505ac6663c4d6807f39873c17dc347"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-win32.whl", hash = "sha256:211f881159e8adb3a57df2263028ae6dc89ec4328bfd43f3421e507406c28654"}, | |||
| {file = "Bottleneck-1.3.8-cp39-cp39-win_amd64.whl", hash = "sha256:8615eeb75009ba7c0a112a5a6a5154ed3d61fd6b0879631778b3e42e2d9a6d65"}, | |||
| {file = "Bottleneck-1.3.8.tar.gz", hash = "sha256:6780d896969ba7f53c8995ba90c87c548beb3db435dc90c60b9a10ed1ab4d868"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -1087,13 +1088,13 @@ numpy = "*" | |||
| [[package]] | |||
| name = "chromadb" | |||
| version = "0.5.3" | |||
| version = "0.5.1" | |||
| description = "Chroma." | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "chromadb-0.5.3-py3-none-any.whl", hash = "sha256:b3874f08356e291c68c6d2e177db472cd51f22f3af7b9746215b748fd1e29982"}, | |||
| {file = "chromadb-0.5.3.tar.gz", hash = "sha256:05d887f56a46b2e0fc6ac5ab979503a27b9ee50d5ca9e455f83b2fb9840cd026"}, | |||
| {file = "chromadb-0.5.1-py3-none-any.whl", hash = "sha256:61f1f75a672b6edce7f1c8875c67e2aaaaf130dc1c1684431fbc42ad7240d01d"}, | |||
| {file = "chromadb-0.5.1.tar.gz", hash = "sha256:e2b2b6a34c2a949bedcaa42fa7775f40c7f6667848fc8094dcbf97fc0d30bee7"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -1840,19 +1841,19 @@ files = [ | |||
| [[package]] | |||
| name = "duckduckgo-search" | |||
| version = "6.1.7" | |||
| version = "6.1.6" | |||
| description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "duckduckgo_search-6.1.7-py3-none-any.whl", hash = "sha256:ec7d5becb8c392c0293ff9464938c1014896e1e14725c05adc306290a636fab2"}, | |||
| {file = "duckduckgo_search-6.1.7.tar.gz", hash = "sha256:c6fd8ba17fe9cd0a4f32e5b96984e959c3da865f9c2864bfcf82bf7ff9b7e8f0"}, | |||
| {file = "duckduckgo_search-6.1.6-py3-none-any.whl", hash = "sha256:6139ab17579e96ca7c5ed9398365245a36ecca8e7432545e3115ef90a9304eb7"}, | |||
| {file = "duckduckgo_search-6.1.6.tar.gz", hash = "sha256:42c83d58f4f1d717a580b89cc86861cbae59e46e75288243776c53349d006bf1"}, | |||
| ] | |||
| [package.dependencies] | |||
| click = ">=8.1.7" | |||
| orjson = ">=3.10.5" | |||
| pyreqwest-impersonate = ">=0.4.8" | |||
| orjson = ">=3.10.4" | |||
| pyreqwest-impersonate = ">=0.4.7" | |||
| [package.extras] | |||
| dev = ["mypy (>=1.10.0)", "pytest (>=8.2.2)", "pytest-asyncio (>=0.23.7)", "ruff (>=0.4.8)"] | |||
| @@ -1860,13 +1861,13 @@ lxml = ["lxml (>=5.2.2)"] | |||
| [[package]] | |||
| name = "email-validator" | |||
| version = "2.1.2" | |||
| version = "2.1.1" | |||
| description = "A robust email address syntax and deliverability validation library." | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "email_validator-2.1.2-py3-none-any.whl", hash = "sha256:d89f6324e13b1e39889eab7f9ca2f91dc9aebb6fa50a6d8bd4329ab50f251115"}, | |||
| {file = "email_validator-2.1.2.tar.gz", hash = "sha256:14c0f3d343c4beda37400421b39fa411bbe33a75df20825df73ad53e06a9f04c"}, | |||
| {file = "email_validator-2.1.1-py3-none-any.whl", hash = "sha256:97d882d174e2a65732fb43bfce81a3a834cbc1bde8bf419e30ef5ea976370a05"}, | |||
| {file = "email_validator-2.1.1.tar.gz", hash = "sha256:200a70680ba08904be6d1eef729205cc0d687634399a5924d842533efb824b84"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -2057,18 +2058,18 @@ sgmllib3k = "*" | |||
| [[package]] | |||
| name = "filelock" | |||
| version = "3.15.3" | |||
| version = "3.14.0" | |||
| description = "A platform independent file lock." | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "filelock-3.15.3-py3-none-any.whl", hash = "sha256:0151273e5b5d6cf753a61ec83b3a9b7d8821c39ae9af9d7ecf2f9e2f17404103"}, | |||
| {file = "filelock-3.15.3.tar.gz", hash = "sha256:e1199bf5194a2277273dacd50269f0d87d0682088a3c561c15674ea9005d8635"}, | |||
| {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, | |||
| {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, | |||
| ] | |||
| [package.extras] | |||
| docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] | |||
| testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] | |||
| testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] | |||
| typing = ["typing-extensions (>=4.8)"] | |||
| [[package]] | |||
| @@ -3901,34 +3902,74 @@ files = [ | |||
| [package.dependencies] | |||
| six = "*" | |||
| [[package]] | |||
| name = "langfuse" | |||
| version = "2.36.2" | |||
| description = "A client library for accessing langfuse" | |||
| optional = false | |||
| python-versions = "<4.0,>=3.8.1" | |||
| files = [ | |||
| {file = "langfuse-2.36.2-py3-none-any.whl", hash = "sha256:66728feddcec0974e4eb31612151a282fcce2e333b5a61474182b5e67e78e090"}, | |||
| {file = "langfuse-2.36.2.tar.gz", hash = "sha256:3e784505d408aa2c9c2da79487b64d185d8f7fa8a855e5303bcce678454c715b"}, | |||
| ] | |||
| [package.dependencies] | |||
| backoff = ">=1.10.0" | |||
| httpx = ">=0.15.4,<1.0" | |||
| idna = ">=3.7,<4.0" | |||
| packaging = ">=23.2,<24.0" | |||
| pydantic = ">=1.10.7,<3.0" | |||
| wrapt = ">=1.14,<2.0" | |||
| [package.extras] | |||
| langchain = ["langchain (>=0.0.309)"] | |||
| llama-index = ["llama-index (>=0.10.12,<2.0.0)"] | |||
| openai = ["openai (>=0.27.8)"] | |||
| [[package]] | |||
| name = "langsmith" | |||
| version = "0.1.81" | |||
| description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." | |||
| optional = false | |||
| python-versions = "<4.0,>=3.8.1" | |||
| files = [ | |||
| {file = "langsmith-0.1.81-py3-none-any.whl", hash = "sha256:3251d823225eef23ee541980b9d9e506367eabbb7f985a086b5d09e8f78ba7e9"}, | |||
| {file = "langsmith-0.1.81.tar.gz", hash = "sha256:585ef3a2251380bd2843a664c9a28da4a7d28432e3ee8bcebf291ffb8e1f0af0"}, | |||
| ] | |||
| [package.dependencies] | |||
| orjson = ">=3.9.14,<4.0.0" | |||
| pydantic = ">=1,<3" | |||
| requests = ">=2,<3" | |||
| [[package]] | |||
| name = "llvmlite" | |||
| version = "0.43.0" | |||
| version = "0.42.0" | |||
| description = "lightweight wrapper around basic LLVM functionality" | |||
| optional = false | |||
| python-versions = ">=3.9" | |||
| files = [ | |||
| {file = "llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761"}, | |||
| {file = "llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc"}, | |||
| {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead"}, | |||
| {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a"}, | |||
| {file = "llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed"}, | |||
| {file = "llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98"}, | |||
| {file = "llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57"}, | |||
| {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2"}, | |||
| {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749"}, | |||
| {file = "llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91"}, | |||
| {file = "llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7"}, | |||
| {file = "llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7"}, | |||
| {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f"}, | |||
| {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844"}, | |||
| {file = "llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9"}, | |||
| {file = "llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c"}, | |||
| {file = "llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8"}, | |||
| {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a"}, | |||
| {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867"}, | |||
| {file = "llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4"}, | |||
| {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"}, | |||
| {file = "llvmlite-0.42.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3366938e1bf63d26c34fbfb4c8e8d2ded57d11e0567d5bb243d89aab1eb56098"}, | |||
| {file = "llvmlite-0.42.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c35da49666a21185d21b551fc3caf46a935d54d66969d32d72af109b5e7d2b6f"}, | |||
| {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70f44ccc3c6220bd23e0ba698a63ec2a7d3205da0d848804807f37fc243e3f77"}, | |||
| {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f8d8717a9073b9e0246998de89929071d15b47f254c10eef2310b9aac033d"}, | |||
| {file = "llvmlite-0.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:8d90edf400b4ceb3a0e776b6c6e4656d05c7187c439587e06f86afceb66d2be5"}, | |||
| {file = "llvmlite-0.42.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ae511caed28beaf1252dbaf5f40e663f533b79ceb408c874c01754cafabb9cbf"}, | |||
| {file = "llvmlite-0.42.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81e674c2fe85576e6c4474e8c7e7aba7901ac0196e864fe7985492b737dbab65"}, | |||
| {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb3975787f13eb97629052edb5017f6c170eebc1c14a0433e8089e5db43bcce6"}, | |||
| {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5bece0cdf77f22379f19b1959ccd7aee518afa4afbd3656c6365865f84903f9"}, | |||
| {file = "llvmlite-0.42.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e0c4c11c8c2aa9b0701f91b799cb9134a6a6de51444eff5a9087fc7c1384275"}, | |||
| {file = "llvmlite-0.42.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:08fa9ab02b0d0179c688a4216b8939138266519aaa0aa94f1195a8542faedb56"}, | |||
| {file = "llvmlite-0.42.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b2fce7d355068494d1e42202c7aff25d50c462584233013eb4470c33b995e3ee"}, | |||
| {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebe66a86dc44634b59a3bc860c7b20d26d9aaffcd30364ebe8ba79161a9121f4"}, | |||
| {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d47494552559e00d81bfb836cf1c4d5a5062e54102cc5767d5aa1e77ccd2505c"}, | |||
| {file = "llvmlite-0.42.0-cp312-cp312-win_amd64.whl", hash = "sha256:05cb7e9b6ce69165ce4d1b994fbdedca0c62492e537b0cc86141b6e2c78d5888"}, | |||
| {file = "llvmlite-0.42.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdd3888544538a94d7ec99e7c62a0cdd8833609c85f0c23fcb6c5c591aec60ad"}, | |||
| {file = "llvmlite-0.42.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0936c2067a67fb8816c908d5457d63eba3e2b17e515c5fe00e5ee2bace06040"}, | |||
| {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a78ab89f1924fc11482209f6799a7a3fc74ddc80425a7a3e0e8174af0e9e2301"}, | |||
| {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7599b65c7af7abbc978dbf345712c60fd596aa5670496561cc10e8a71cebfb2"}, | |||
| {file = "llvmlite-0.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:43d65cc4e206c2e902c1004dd5418417c4efa6c1d04df05c6c5675a27e8ca90e"}, | |||
| {file = "llvmlite-0.42.0.tar.gz", hash = "sha256:f92b09243c0cc3f457da8b983f67bd8e1295d0f5b3746c7a1861d7a99403854a"}, | |||
| ] | |||
| [[package]] | |||
| @@ -4650,37 +4691,37 @@ requests = ">=2.27.1" | |||
| [[package]] | |||
| name = "numba" | |||
| version = "0.60.0" | |||
| version = "0.59.1" | |||
| description = "compiling Python code using LLVM" | |||
| optional = false | |||
| python-versions = ">=3.9" | |||
| files = [ | |||
| {file = "numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651"}, | |||
| {file = "numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b"}, | |||
| {file = "numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781"}, | |||
| {file = "numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e"}, | |||
| {file = "numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198"}, | |||
| {file = "numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8"}, | |||
| {file = "numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b"}, | |||
| {file = "numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703"}, | |||
| {file = "numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8"}, | |||
| {file = "numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2"}, | |||
| {file = "numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404"}, | |||
| {file = "numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c"}, | |||
| {file = "numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e"}, | |||
| {file = "numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d"}, | |||
| {file = "numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347"}, | |||
| {file = "numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74"}, | |||
| {file = "numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449"}, | |||
| {file = "numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b"}, | |||
| {file = "numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25"}, | |||
| {file = "numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab"}, | |||
| {file = "numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16"}, | |||
| ] | |||
| [package.dependencies] | |||
| llvmlite = "==0.43.*" | |||
| numpy = ">=1.22,<2.1" | |||
| {file = "numba-0.59.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97385a7f12212c4f4bc28f648720a92514bee79d7063e40ef66c2d30600fd18e"}, | |||
| {file = "numba-0.59.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b77aecf52040de2a1eb1d7e314497b9e56fba17466c80b457b971a25bb1576d"}, | |||
| {file = "numba-0.59.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3476a4f641bfd58f35ead42f4dcaf5f132569c4647c6f1360ccf18ee4cda3990"}, | |||
| {file = "numba-0.59.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:525ef3f820931bdae95ee5379c670d5c97289c6520726bc6937a4a7d4230ba24"}, | |||
| {file = "numba-0.59.1-cp310-cp310-win_amd64.whl", hash = "sha256:990e395e44d192a12105eca3083b61307db7da10e093972ca285c85bef0963d6"}, | |||
| {file = "numba-0.59.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:43727e7ad20b3ec23ee4fc642f5b61845c71f75dd2825b3c234390c6d8d64051"}, | |||
| {file = "numba-0.59.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:411df625372c77959570050e861981e9d196cc1da9aa62c3d6a836b5cc338966"}, | |||
| {file = "numba-0.59.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2801003caa263d1e8497fb84829a7ecfb61738a95f62bc05693fcf1733e978e4"}, | |||
| {file = "numba-0.59.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dd2842fac03be4e5324ebbbd4d2d0c8c0fc6e0df75c09477dd45b288a0777389"}, | |||
| {file = "numba-0.59.1-cp311-cp311-win_amd64.whl", hash = "sha256:0594b3dfb369fada1f8bb2e3045cd6c61a564c62e50cf1f86b4666bc721b3450"}, | |||
| {file = "numba-0.59.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1cce206a3b92836cdf26ef39d3a3242fec25e07f020cc4feec4c4a865e340569"}, | |||
| {file = "numba-0.59.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8c8b4477763cb1fbd86a3be7050500229417bf60867c93e131fd2626edb02238"}, | |||
| {file = "numba-0.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d80bce4ef7e65bf895c29e3889ca75a29ee01da80266a01d34815918e365835"}, | |||
| {file = "numba-0.59.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f7ad1d217773e89a9845886401eaaab0a156a90aa2f179fdc125261fd1105096"}, | |||
| {file = "numba-0.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:5bf68f4d69dd3a9f26a9b23548fa23e3bcb9042e2935257b471d2a8d3c424b7f"}, | |||
| {file = "numba-0.59.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4e0318ae729de6e5dbe64c75ead1a95eb01fabfe0e2ebed81ebf0344d32db0ae"}, | |||
| {file = "numba-0.59.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f68589740a8c38bb7dc1b938b55d1145244c8353078eea23895d4f82c8b9ec1"}, | |||
| {file = "numba-0.59.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:649913a3758891c77c32e2d2a3bcbedf4a69f5fea276d11f9119677c45a422e8"}, | |||
| {file = "numba-0.59.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9712808e4545270291d76b9a264839ac878c5eb7d8b6e02c970dc0ac29bc8187"}, | |||
| {file = "numba-0.59.1-cp39-cp39-win_amd64.whl", hash = "sha256:8d51ccd7008a83105ad6a0082b6a2b70f1142dc7cfd76deb8c5a862367eb8c86"}, | |||
| {file = "numba-0.59.1.tar.gz", hash = "sha256:76f69132b96028d2774ed20415e8c528a34e3299a40581bae178f0994a2f370b"}, | |||
| ] | |||
| [package.dependencies] | |||
| llvmlite = "==0.42.*" | |||
| numpy = ">=1.22,<1.27" | |||
| [[package]] | |||
| name = "numexpr" | |||
| @@ -4878,13 +4919,13 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] | |||
| [[package]] | |||
| name = "openpyxl" | |||
| version = "3.1.4" | |||
| version = "3.1.3" | |||
| description = "A Python library to read/write Excel 2010 xlsx/xlsm files" | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| python-versions = ">=3.6" | |||
| files = [ | |||
| {file = "openpyxl-3.1.4-py2.py3-none-any.whl", hash = "sha256:ec17f6483f2b8f7c88c57e5e5d3b0de0e3fb9ac70edc084d28e864f5b33bbefd"}, | |||
| {file = "openpyxl-3.1.4.tar.gz", hash = "sha256:8d2c8adf5d20d6ce8f9bca381df86b534835e974ed0156dacefa76f68c1d69fb"}, | |||
| {file = "openpyxl-3.1.3-py2.py3-none-any.whl", hash = "sha256:25071b558db709de9e8782c3d3e058af3b23ffb2fc6f40c8f0c45a154eced2c3"}, | |||
| {file = "openpyxl-3.1.3.tar.gz", hash = "sha256:8dd482e5350125b2388070bb2477927be2e8ebc27df61178709bc8c8751da2f9"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -5121,57 +5162,57 @@ cryptography = ">=3.2.1" | |||
| [[package]] | |||
| name = "orjson" | |||
| version = "3.10.5" | |||
| version = "3.10.4" | |||
| description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"}, | |||
| {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"}, | |||
| {file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"}, | |||
| {file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"}, | |||
| {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"}, | |||
| {file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"}, | |||
| {file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"}, | |||
| {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"}, | |||
| {file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"}, | |||
| {file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"}, | |||
| {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"}, | |||
| {file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"}, | |||
| {file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"}, | |||
| {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"}, | |||
| {file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"}, | |||
| {file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"}, | |||
| {file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:afca963f19ca60c7aedadea9979f769139127288dd58ccf3f7c5e8e6dc62cabf"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b112eff36ba7ccc7a9d6b87e17b9d6bde4312d05e3ddf66bf5662481dee846"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02b192eaba048b1039eca9a0cef67863bd5623042f5c441889a9957121d97e14"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:827c3d0e4fc44242c82bfdb1a773235b8c0575afee99a9fa9a8ce920c14e440f"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca8ec09724f10ec209244caeb1f9f428b6bb03f2eda9ed5e2c4dd7f2b7fabd44"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8eaa5d531a8fde11993cbcb27e9acf7d9c457ba301adccb7fa3a021bfecab46c"}, | |||
| {file = "orjson-3.10.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e112aa7fc4ea67367ec5e86c39a6bb6c5719eddc8f999087b1759e765ddaf2d4"}, | |||
| {file = "orjson-3.10.4-cp310-none-win32.whl", hash = "sha256:1538844fb88446c42da3889f8c4ecce95a630b5a5ba18ecdfe5aea596f4dff21"}, | |||
| {file = "orjson-3.10.4-cp310-none-win_amd64.whl", hash = "sha256:de02811903a2e434127fba5389c3cc90f689542339a6e52e691ab7f693407b5a"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:358afaec75de7237dfea08e6b1b25d226e33a1e3b6dc154fc99eb697f24a1ffa"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb4e292c3198ab3d93e5f877301d2746be4ca0ba2d9c513da5e10eb90e19ff52"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c39e57cf6323a39238490092985d5d198a7da4a3be013cc891a33fef13a536e"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f86df433fc01361ff9270ad27455ce1ad43cd05e46de7152ca6adb405a16b2f6"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c9966276a2c97e93e6cbe8286537f88b2a071827514f0d9d47a0aefa77db458"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c499a14155a1f5a1e16e0cd31f6cf6f93965ac60a0822bc8340e7e2d3dac1108"}, | |||
| {file = "orjson-3.10.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3087023ce904a327c29487eb7e1f2c060070e8dbb9a3991b8e7952a9c6e62f38"}, | |||
| {file = "orjson-3.10.4-cp311-none-win32.whl", hash = "sha256:f965893244fe348b59e5ce560693e6dd03368d577ce26849b5d261ce31c70101"}, | |||
| {file = "orjson-3.10.4-cp311-none-win_amd64.whl", hash = "sha256:c212f06fad6aa6ce85d5665e91a83b866579f29441a47d3865c57329c0857357"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d0965a8b0131959833ca8a65af60285995d57ced0de2fd8f16fc03235975d238"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27b64695d9f2aef3ae15a0522e370ec95c946aaea7f2c97a1582a62b3bdd9169"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:867d882ddee6a20be4c8b03ae3d2b0333894d53ad632d32bd9b8123649577171"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a0667458f8a8ceb6dee5c08fec0b46195f92c474cbbec71dca2a6b7fd5b67b8d"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3eac9befc4eaec1d1ff3bba6210576be4945332dde194525601c5ddb5c060d3"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4343245443552eae240a33047a6d1bcac7a754ad4b1c57318173c54d7efb9aea"}, | |||
| {file = "orjson-3.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:30153e269eea43e98918d4d462a36a7065031d9246407dfff2579a4e457515c1"}, | |||
| {file = "orjson-3.10.4-cp312-none-win32.whl", hash = "sha256:1a7d092ee043abf3db19c2183115e80676495c9911843fdb3ebd48ca7b73079e"}, | |||
| {file = "orjson-3.10.4-cp312-none-win_amd64.whl", hash = "sha256:07a2adbeb8b9efe6d68fc557685954a1f19d9e33f5cc018ae1a89e96647c1b65"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f5a746f3d908bce1a1e347b9ca89864047533bdfab5a450066a0315f6566527b"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:465b4a8a3e459f8d304c19071b4badaa9b267c59207a005a7dd9dfe13d3a423f"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35858d260728c434a3d91b60685ab32418318567e8902039837e1c2af2719e0b"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a5ba090d40c4460312dd69c232b38c2ff67a823185cfe667e841c9dd5c06841"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dde86755d064664e62e3612a166c28298aa8dfd35a991553faa58855ae739cc"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:020a9e9001cfec85c156ef3b185ff758b62ef986cefdb8384c4579facd5ce126"}, | |||
| {file = "orjson-3.10.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3bf8e6e3388a2e83a86466c912387e0f0a765494c65caa7e865f99969b76ba0d"}, | |||
| {file = "orjson-3.10.4-cp38-none-win32.whl", hash = "sha256:c5a1cca6a4a3129db3da68a25dc0a459a62ae58e284e363b35ab304202d9ba9e"}, | |||
| {file = "orjson-3.10.4-cp38-none-win_amd64.whl", hash = "sha256:ecd97d98d7bee3e3d51d0b51c92c457f05db4993329eea7c69764f9820e27eb3"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:71362daa330a2fc85553a1469185ac448547392a8f83d34e67779f8df3a52743"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d24b59d1fecb0fd080c177306118a143f7322335309640c55ed9580d2044e363"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e906670aea5a605b083ebb58d575c35e88cf880fa372f7cedaac3d51e98ff164"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ce32ed4bc4d632268e4978e595fe5ea07e026b751482b4a0feec48f66a90abc"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1dcd34286246e0c5edd0e230d1da2daab2c1b465fcb6bac85b8d44057229d40a"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c45d4b8c403e50beedb1d006a8916d9910ed56bceaf2035dc253618b44d0a161"}, | |||
| {file = "orjson-3.10.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:aaed3253041b5002a4f5bfdf6f7b5cce657d974472b0699a469d439beba40381"}, | |||
| {file = "orjson-3.10.4-cp39-none-win32.whl", hash = "sha256:9a4f41b7dbf7896f8dbf559b9b43dcd99e31e0d49ac1b59d74f52ce51ab10eb9"}, | |||
| {file = "orjson-3.10.4-cp39-none-win_amd64.whl", hash = "sha256:6c4eb7d867ed91cb61e6514cb4f457aa01d7b0fd663089df60a69f3d38b69d4c"}, | |||
| {file = "orjson-3.10.4.tar.gz", hash = "sha256:c912ed25b787c73fe994a5decd81c3f3b256599b8a87d410d799d5d52013af2a"}, | |||
| ] | |||
| [[package]] | |||
| @@ -5205,13 +5246,13 @@ files = [ | |||
| [[package]] | |||
| name = "packaging" | |||
| version = "24.1" | |||
| version = "23.2" | |||
| description = "Core utilities for Python packages" | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| python-versions = ">=3.7" | |||
| files = [ | |||
| {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, | |||
| {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, | |||
| {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, | |||
| {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, | |||
| ] | |||
| [[package]] | |||
| @@ -5540,20 +5581,20 @@ wcwidth = "*" | |||
| [[package]] | |||
| name = "proto-plus" | |||
| version = "1.24.0" | |||
| version = "1.23.0" | |||
| description = "Beautiful, Pythonic protocol buffers." | |||
| optional = false | |||
| python-versions = ">=3.7" | |||
| python-versions = ">=3.6" | |||
| files = [ | |||
| {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, | |||
| {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, | |||
| {file = "proto-plus-1.23.0.tar.gz", hash = "sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2"}, | |||
| {file = "proto_plus-1.23.0-py3-none-any.whl", hash = "sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c"}, | |||
| ] | |||
| [package.dependencies] | |||
| protobuf = ">=3.19.0,<6.0.0dev" | |||
| protobuf = ">=3.19.0,<5.0.0dev" | |||
| [package.extras] | |||
| testing = ["google-api-core (>=1.31.5)"] | |||
| testing = ["google-api-core[grpc] (>=1.31.5)"] | |||
| [[package]] | |||
| name = "protobuf" | |||
| @@ -5904,13 +5945,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" | |||
| [[package]] | |||
| name = "pydantic-extra-types" | |||
| version = "2.8.2" | |||
| version = "2.8.1" | |||
| description = "Extra Pydantic types." | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "pydantic_extra_types-2.8.2-py3-none-any.whl", hash = "sha256:f2400b3c3553fb7fa09a131967b4edf2d53f01ad9fa89d158784653f2e5c13d1"}, | |||
| {file = "pydantic_extra_types-2.8.2.tar.gz", hash = "sha256:4d2b3c52c1e2e4dfa31bf1d5a37b841b09e3c5a08ec2bffca0e07fc2ad7d5c4a"}, | |||
| {file = "pydantic_extra_types-2.8.1-py3-none-any.whl", hash = "sha256:ca3fce71ee46bc1043bdf3d0e3c149a09ab162cb305c4ed8c501a5034a592dd6"}, | |||
| {file = "pydantic_extra_types-2.8.1.tar.gz", hash = "sha256:c7cabe403234658207dcefed3489f2e8bfc8f4a8e305e7ab25ee29eceed65b39"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -6113,59 +6154,59 @@ files = [ | |||
| [[package]] | |||
| name = "pyreqwest-impersonate" | |||
| version = "0.4.8" | |||
| version = "0.4.7" | |||
| description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:45cad57afe4e6f56078ed9a7a90d0dc839d19d3e7a70175c80af21017f383bfb"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1986600253baf38f25fd07b8bdc1903359c26e5d34beb7d7d084845554b5664d"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cca4e6e59b9ad0cd20bad6caed3ac96992cd9c1d3126ecdfcab2c0ac2b75376"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab6b32544491ee655264dab86fc8a58e47c4f87d196b28022d4007faf971a50"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:64bd6299e7fc888bb7f7292cf3e29504c406e5d5d04afd37ca994ab8142d8ee4"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e914b650dd953b8d9b24ef56aa4ecbfc16e399227b68accd818f8bf159e0c558"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp310-none-win_amd64.whl", hash = "sha256:cb56a2149b0c4548a8e0158b071a943f33dae9b717f92b5c9ac34ccd1f5a958c"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f62620e023490902feca0109f306e122e427feff7d59e03ecd22c69a89452367"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08d4c01d76da88cfe3d7d03b311b375ce3fb5a59130f93f0637bb755d6e56ff1"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524e276bc460176c79d7ba4b9131d9db73c534586660371ebdf067749252a33"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22863bc0aaf02ca2f5d76c8130929ae680b7d82dfc1c28c1ed5f306ff626928"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8cc82d57f6a91037e64a7aa9122f909576ef2a141a42ce599958ef9f8c4bc033"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:da8a053308210e44fd8349f07f45442a0691ac932f2881e98b05cf9ac404b091"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp311-none-win_amd64.whl", hash = "sha256:4baf3916c14364a815a64ead7f728afb61b37541933b2771f18dbb245029bb55"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:78db05deed0b32c9c75f2b3168a3a9b7d5e36487b218cb839bfe7e2a143450cb"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9af9446d605903c2b4e94621a9093f8d8a403729bc9cbfbcb62929f8238c838f"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c55890181d8d81e66cac25a95e215dc9680645d01e9091b64449d5407ad9bc6"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69344e7ae9964502a8693da7ad77ebc3e1418ee197e2e394bc23c5d4970772a"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b5db5c957a10d8cc2815085ba0b8fe09245b2f94c2225d9653a854a03b4217e1"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03c19c21f63f9c91c590c4bbcc32cc2d8066b508c683a1d163b8c7d9816a01d5"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp312-none-win_amd64.whl", hash = "sha256:0230610779129f74ff802c744643ce7589b1d07cba21d046fe3b574281c29581"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b8cb9471ab4b2fa7e80d3ac4e580249ff988d782f2938ad1f0428433652b170d"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8081a5ace2658be91519902bde9ddc5f94e1f850a39be196007a25e3da5bbfdc"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69eababfa3200459276acd780a0f3eaf41d1fe7c02bd169e714cba422055b5b9"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:632957fa671ebb841166e40913015de457225cb73600ef250c436c280e68bf45"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2ce7ddef334b4e5c68f5ea1da1d65f686b8d84f4443059d128e0f069d3fa499a"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6ce333d450b158d582e36317089a006440b4e66739a8e8849d170e4cb15e8c8d"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp38-none-win_amd64.whl", hash = "sha256:9d9c85ce19db92362854f534807e470f03e905f283a7de6826dc79b790a8788e"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2503277f2a95a30e28e498570e2ed03ef4302f873054e8e21d6c0e607cbbc1d1"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8260395ef4ddae325e8b30cef0391adde7bd35e1a1decf8c729e26391f09b52d"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d8066b46d82bbaff5402d767e2f13d3449b8191c37bf8283e91d301a7159869"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9c42f6343cfbd6663fb53edc9eb9feb4ebf6186b284e22368adc1eeb6a33854"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ff534f491a059e74fb7f994876df86078b4b125dbecc53c098a298ecd55fa9c6"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b8fbf73b3ac513ddadafd338d61f79cd2370f0691d9175b2b92a45920920d6b"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-cp39-none-win_amd64.whl", hash = "sha256:a26447c82665d0e361207c1a15e56b0ca54974aa6c1fdfa18c68f908dec78cbe"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24a16b8d55309f0af0db9d04ff442b0c91afccf078a94809e7c3a71747a5c214"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c8fada56465fc19179404cc9d5d5e1064f5dfe27405cb052f57a5b4fe06aed1"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a3d48d5abc146fd804395713427d944757a99254350e6a651e7d776818074aee"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:475829fe9994c66258157a8d4adb1c038f44f79f901208ba656d547842337227"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ef1ec0e97623bc0e18469418cc4dd2c59a2d5fddcae944de61e13c0b46f910e"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91857b196de89e9b36d3f8629aa8772c0bbe7efef8334fe266956b1c192ec31c"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:63831e407487b8a21bb51f97cd86a616c291d5138f8caec16ab6019cf6423935"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c30e61de93bcd0a9d3ca226b1ae5475002afde61e9d85018a6a4a040eeb86567"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c72c37b03bce9900f5dbb4f476af17253ec60c13bf7a7259f71a8dc1b036cb"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f1096165741b5c2178ab15b0eb09b5de16dd39b1cc135767d72471f0a69ce"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:70c940c0e4ef335e22a6c705b01f286ee44780b5909065d212d94d82ea2580cb"}, | |||
| {file = "pyreqwest_impersonate-0.4.8-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:81c06f21757602d85f16dbc1cbaee1121cd65455f65aed4c048b7dcda7be85c4"}, | |||
| {file = "pyreqwest_impersonate-0.4.8.tar.gz", hash = "sha256:1eba11d47bd17244c64fec1502cc26ee66cc5c8a3be131e408101ae2b455e5bc"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c175dfc429c4231a6ce03841630b236f50995ca613ff1eea26fa4c75c730b562"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3f83c50cef2d5ed0a9246318fd3ef3bfeabe286d4eabf92df4835c05a0be7dc"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f34930113aa42f47e0542418f6a67bdb2c23fe0e2fa1866f60b29280a036b829"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88d2792df548b845edd409a3e4284f76cb4fc2510fe4a69fde9e39d54910b935"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27622d5183185dc63bcab9a7dd1de566688c63b844812b1d9366da7c459a494"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b7bf13d49ef127e659ed134129336e94f7107023ed0138c81a46321b9a580428"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp310-none-win_amd64.whl", hash = "sha256:0cba006b076b85a875814a4b5dd8cb27f483ebeeb0de83984a3786060fe18e0d"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:370a8cb7a92b15749cbbe3ce7a9f09d35aac7d2a74505eb447f45419ea8ef2ff"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:33244ea10ccee08bac7a7ccdc3a8e6bef6e28f2466ed61de551fa24b76ee4b6a"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dba24fb6db822cbd9cbac32539893cc19cc06dd1820e03536e685b9fd2a2ffdd"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e001ed09fc364cc00578fd31c0ae44d543cf75daf06b2657c7a82dcd99336ce"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:608525535f078e85114fcd4eeba0f0771ffc7093c29208e9c0a55147502723bf"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:38daedba0fc997e29cbc25c684a42a04aed38bfbcf85d8f1ffe8f87314d5f72f"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp311-none-win_amd64.whl", hash = "sha256:d21f3e93ee0aecdc43d2914800bdf23501bde858d70ac7c0b06168f85f95bf22"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:5caeee29370a06a322ea6951730d21ec3c641ce46417fd2b5805b283564f2fef"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1c7aa4b428ed58370975d828a95eaf10561712e79a4e2eafca1746a4654a34a8"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:887249adcab35487a44a5428ccab2a6363642785b36649a732d5e649df568b8e"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60f932de8033c15323ba79a7470406ca8228e07aa60078dee5a18e89f0a9fc88"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a2e6332fd6d78623a22f4e747688fe9e6005b61b6f208936d5428d2a65d34b39"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:349b005eef323195685ba5cb2b6f302da0db481e59f03696ef57099f232f0c1f"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp312-none-win_amd64.whl", hash = "sha256:5620025ac138a10c46a9b14c91b6f58114d50063ff865a2d02ad632751b67b29"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ebf954e09b3dc800a7576c7bde9827b00064531364c7817356c7cc58eb4b46b2"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:112d9561f136548bd67d31cadb6b78d4c31751e526e62e09c6e581c2f1711455"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05213f5f014ecc6732d859a0f51b3dff0424748cc6e2d0d9a42aa1f7108b4eaa"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10fa70529a60fc043650ce03481fab7714e7519c3b06f5e81c95206b8b60aec6"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5b1288881eada1891db7e862c69b673fb159834a41f823b9b00fc52d0f096ccc"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:57ca562229c40615074f36e7f1ae5e57b8164f604eddb042132467c3a00fc2c5"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp38-none-win_amd64.whl", hash = "sha256:c098ef1333511ea9a43be9a818fcc0866bd2caa63cdc9cf4ab48450ace675646"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:39d961330190bf2d59983ad16dafb4b42d5adcdfe7531ad099c8f3ab53f8d906"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0d793591784b89953422b1efaa17460f57f6116de25b3e3065d9fa6cf220ef18"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:945116bb9ffb7e45a87e313f47de28c4da889b14bda620aebc5ba9c3600425cf"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b96a0955c49f346786ee997c755561fecf33b7886cecef861fe4db15c7b23ad3"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ed997197f907ccce9b86a75163b5e78743bc469d2ddcf8a22d4d90c2595573cb"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1f54788f6fb0ee8b31c1eaadba81fb003efb406a768844e2a1a50b855f4806bf"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-cp39-none-win_amd64.whl", hash = "sha256:0a679e81b0175dcc670a5ed47a5c184d7031ce16b5c58bf6b2c650ab9f2496c8"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bddb07e04e4006a2184608c44154983fdfa0ce2e230b0a7cec81cd4ba88dd07"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:780c53bfd2fbda151081165733fba5d5b1e17dd61999360110820942e351d011"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4bfa8ea763e6935e7660f8e885f1b00713b0d22f79a526c6ae6932b1856d1343"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:96b23b0688a63cbd6c39237461baa95162a69a15e9533789163aabcaf3f572fb"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b0eb56a8ad9d48952c613903d3ef6d8762d48dcec9807a509fee2a43e94ccac"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9330176494e260521ea0eaae349ca06128dc527400248c57b378597c470d335c"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6343bc3392781ff470e5dc47fea9f77bb61d8831b07e901900d31c46decec5d1"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ecd598e16020a165029647ca80078311bf079e8317bf61c1b2fa824b8967e0db"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a38f3014ac31b08f5fb1ef4e1eb6c6e810f51f6cb815d0066ab3f34ec0f82d98"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db76a97068e5145f5b348037e09a91b2bed9c8eab92e79a3297b1306429fa839"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1596a8ef8f20bbfe606a90ad524946747846611c8633cbdfbad0a4298b538218"}, | |||
| {file = "pyreqwest_impersonate-0.4.7-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dcee18bc350b3d3a0455422c446f1f03f00eb762b3e470066e2bc4664fd7110d"}, | |||
| {file = "pyreqwest_impersonate-0.4.7.tar.gz", hash = "sha256:74ba7e6e4f4f753da4f71a7e5dc12625b296bd7d6ddd64093a1fbff14d8d5df7"}, | |||
| ] | |||
| [package.extras] | |||
| @@ -6725,13 +6766,13 @@ test = ["coveralls", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-benc | |||
| [[package]] | |||
| name = "redis" | |||
| version = "5.0.6" | |||
| version = "5.0.5" | |||
| description = "Python client for Redis database and key-value store" | |||
| optional = false | |||
| python-versions = ">=3.7" | |||
| files = [ | |||
| {file = "redis-5.0.6-py3-none-any.whl", hash = "sha256:c0d6d990850c627bbf7be01c5c4cbaadf67b48593e913bb71c9819c30df37eee"}, | |||
| {file = "redis-5.0.6.tar.gz", hash = "sha256:38473cd7c6389ad3e44a91f4c3eaf6bcb8a9f746007f29bf4fb20824ff0b2197"}, | |||
| {file = "redis-5.0.5-py3-none-any.whl", hash = "sha256:30b47d4ebb6b7a0b9b40c1275a19b87bb6f46b3bed82a89012cf56dea4024ada"}, | |||
| {file = "redis-5.0.5.tar.gz", hash = "sha256:3417688621acf6ee368dec4a04dd95881be24efd34c79f00d31f62bb528800ae"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -6951,28 +6992,28 @@ pyasn1 = ">=0.1.3" | |||
| [[package]] | |||
| name = "ruff" | |||
| version = "0.4.9" | |||
| version = "0.4.8" | |||
| description = "An extremely fast Python linter and code formatter, written in Rust." | |||
| optional = false | |||
| python-versions = ">=3.7" | |||
| files = [ | |||
| {file = "ruff-0.4.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b262ed08d036ebe162123170b35703aaf9daffecb698cd367a8d585157732991"}, | |||
| {file = "ruff-0.4.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:98ec2775fd2d856dc405635e5ee4ff177920f2141b8e2d9eb5bd6efd50e80317"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4555056049d46d8a381f746680db1c46e67ac3b00d714606304077682832998e"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e91175fbe48f8a2174c9aad70438fe9cb0a5732c4159b2a10a3565fea2d94cde"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e8e7b95673f22e0efd3571fb5b0cf71a5eaaa3cc8a776584f3b2cc878e46bff"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2d45ddc6d82e1190ea737341326ecbc9a61447ba331b0a8962869fcada758505"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78de3fdb95c4af084087628132336772b1c5044f6e710739d440fc0bccf4d321"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06b60f91bfa5514bb689b500a25ba48e897d18fea14dce14b48a0c40d1635893"}, | |||
| {file = "ruff-0.4.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88bffe9c6a454bf8529f9ab9091c99490578a593cc9f9822b7fc065ee0712a06"}, | |||
| {file = "ruff-0.4.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:673bddb893f21ab47a8334c8e0ea7fd6598ecc8e698da75bcd12a7b9d0a3206e"}, | |||
| {file = "ruff-0.4.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8c1aff58c31948cc66d0b22951aa19edb5af0a3af40c936340cd32a8b1ab7438"}, | |||
| {file = "ruff-0.4.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:784d3ec9bd6493c3b720a0b76f741e6c2d7d44f6b2be87f5eef1ae8cc1d54c84"}, | |||
| {file = "ruff-0.4.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:732dd550bfa5d85af8c3c6cbc47ba5b67c6aed8a89e2f011b908fc88f87649db"}, | |||
| {file = "ruff-0.4.9-py3-none-win32.whl", hash = "sha256:8064590fd1a50dcf4909c268b0e7c2498253273309ad3d97e4a752bb9df4f521"}, | |||
| {file = "ruff-0.4.9-py3-none-win_amd64.whl", hash = "sha256:e0a22c4157e53d006530c902107c7f550b9233e9706313ab57b892d7197d8e52"}, | |||
| {file = "ruff-0.4.9-py3-none-win_arm64.whl", hash = "sha256:5d5460f789ccf4efd43f265a58538a2c24dbce15dbf560676e430375f20a8198"}, | |||
| {file = "ruff-0.4.9.tar.gz", hash = "sha256:f1cb0828ac9533ba0135d148d214e284711ede33640465e706772645483427e3"}, | |||
| {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, | |||
| {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, | |||
| {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, | |||
| {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, | |||
| {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, | |||
| {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, | |||
| {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, | |||
| {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, | |||
| {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, | |||
| {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, | |||
| {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, | |||
| ] | |||
| [[package]] | |||
| @@ -7248,18 +7289,18 @@ tornado = ["tornado (>=5)"] | |||
| [[package]] | |||
| name = "setuptools" | |||
| version = "70.1.0" | |||
| version = "70.0.0" | |||
| description = "Easily download, build, install, upgrade, and uninstall Python packages" | |||
| optional = false | |||
| python-versions = ">=3.8" | |||
| files = [ | |||
| {file = "setuptools-70.1.0-py3-none-any.whl", hash = "sha256:d9b8b771455a97c8a9f3ab3448ebe0b29b5e105f1228bba41028be116985a267"}, | |||
| {file = "setuptools-70.1.0.tar.gz", hash = "sha256:01a1e793faa5bd89abc851fa15d0a0db26f160890c7102cd8dce643e886b47f5"}, | |||
| {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, | |||
| {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, | |||
| ] | |||
| [package.extras] | |||
| docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] | |||
| testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] | |||
| testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] | |||
| [[package]] | |||
| name = "sgmllib3k" | |||
| @@ -7402,64 +7443,64 @@ files = [ | |||
| [[package]] | |||
| name = "sqlalchemy" | |||
| version = "2.0.31" | |||
| version = "2.0.30" | |||
| description = "Database Abstraction Library" | |||
| optional = false | |||
| python-versions = ">=3.7" | |||
| files = [ | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2a213c1b699d3f5768a7272de720387ae0122f1becf0901ed6eaa1abd1baf6c"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9fea3d0884e82d1e33226935dac990b967bef21315cbcc894605db3441347443"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad7f221d8a69d32d197e5968d798217a4feebe30144986af71ada8c548e9fa"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2bee229715b6366f86a95d497c347c22ddffa2c7c96143b59a2aa5cc9eebbc"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cd5b94d4819c0c89280b7c6109c7b788a576084bf0a480ae17c227b0bc41e109"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:750900a471d39a7eeba57580b11983030517a1f512c2cb287d5ad0fcf3aebd58"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-win32.whl", hash = "sha256:7bd112be780928c7f493c1a192cd8c5fc2a2a7b52b790bc5a84203fb4381c6be"}, | |||
| {file = "SQLAlchemy-2.0.31-cp310-cp310-win_amd64.whl", hash = "sha256:5a48ac4d359f058474fadc2115f78a5cdac9988d4f99eae44917f36aa1476327"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f68470edd70c3ac3b6cd5c2a22a8daf18415203ca1b036aaeb9b0fb6f54e8298"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e2c38c2a4c5c634fe6c3c58a789712719fa1bf9b9d6ff5ebfce9a9e5b89c1ca"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd15026f77420eb2b324dcb93551ad9c5f22fab2c150c286ef1dc1160f110203"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2196208432deebdfe3b22185d46b08f00ac9d7b01284e168c212919891289396"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:352b2770097f41bff6029b280c0e03b217c2dcaddc40726f8f53ed58d8a85da4"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56d51ae825d20d604583f82c9527d285e9e6d14f9a5516463d9705dab20c3740"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-win32.whl", hash = "sha256:6e2622844551945db81c26a02f27d94145b561f9d4b0c39ce7bfd2fda5776dac"}, | |||
| {file = "SQLAlchemy-2.0.31-cp311-cp311-win_amd64.whl", hash = "sha256:ccaf1b0c90435b6e430f5dd30a5aede4764942a695552eb3a4ab74ed63c5b8d3"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3b74570d99126992d4b0f91fb87c586a574a5872651185de8297c6f90055ae42"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f77c4f042ad493cb8595e2f503c7a4fe44cd7bd59c7582fd6d78d7e7b8ec52c"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1591329333daf94467e699e11015d9c944f44c94d2091f4ac493ced0119449"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74afabeeff415e35525bf7a4ecdab015f00e06456166a2eba7590e49f8db940e"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b9c01990d9015df2c6f818aa8f4297d42ee71c9502026bb074e713d496e26b67"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66f63278db425838b3c2b1c596654b31939427016ba030e951b292e32b99553e"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-win32.whl", hash = "sha256:0b0f658414ee4e4b8cbcd4a9bb0fd743c5eeb81fc858ca517217a8013d282c96"}, | |||
| {file = "SQLAlchemy-2.0.31-cp312-cp312-win_amd64.whl", hash = "sha256:fa4b1af3e619b5b0b435e333f3967612db06351217c58bfb50cee5f003db2a5a"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f43e93057cf52a227eda401251c72b6fbe4756f35fa6bfebb5d73b86881e59b0"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d337bf94052856d1b330d5fcad44582a30c532a2463776e1651bd3294ee7e58b"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c06fb43a51ccdff3b4006aafee9fcf15f63f23c580675f7734245ceb6b6a9e05"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:b6e22630e89f0e8c12332b2b4c282cb01cf4da0d26795b7eae16702a608e7ca1"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:79a40771363c5e9f3a77f0e28b3302801db08040928146e6808b5b7a40749c88"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-win32.whl", hash = "sha256:501ff052229cb79dd4c49c402f6cb03b5a40ae4771efc8bb2bfac9f6c3d3508f"}, | |||
| {file = "SQLAlchemy-2.0.31-cp37-cp37m-win_amd64.whl", hash = "sha256:597fec37c382a5442ffd471f66ce12d07d91b281fd474289356b1a0041bdf31d"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc6d69f8829712a4fd799d2ac8d79bdeff651c2301b081fd5d3fe697bd5b4ab9"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23b9fbb2f5dd9e630db70fbe47d963c7779e9c81830869bd7d137c2dc1ad05fb"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21c97efcbb9f255d5c12a96ae14da873233597dfd00a3a0c4ce5b3e5e79704"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a6a9837589c42b16693cf7bf836f5d42218f44d198f9343dd71d3164ceeeac"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc251477eae03c20fae8db9c1c23ea2ebc47331bcd73927cdcaecd02af98d3c3"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2fd17e3bb8058359fa61248c52c7b09a97cf3c820e54207a50af529876451808"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-win32.whl", hash = "sha256:c76c81c52e1e08f12f4b6a07af2b96b9b15ea67ccdd40ae17019f1c373faa227"}, | |||
| {file = "SQLAlchemy-2.0.31-cp38-cp38-win_amd64.whl", hash = "sha256:4b600e9a212ed59355813becbcf282cfda5c93678e15c25a0ef896b354423238"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b6cf796d9fcc9b37011d3f9936189b3c8074a02a4ed0c0fbbc126772c31a6d4"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78fe11dbe37d92667c2c6e74379f75746dc947ee505555a0197cfba9a6d4f1a4"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc47dc6185a83c8100b37acda27658fe4dbd33b7d5e7324111f6521008ab4fe"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a41514c1a779e2aa9a19f67aaadeb5cbddf0b2b508843fcd7bafdf4c6864005"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:afb6dde6c11ea4525318e279cd93c8734b795ac8bb5dda0eedd9ebaca7fa23f1"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3f9faef422cfbb8fd53716cd14ba95e2ef655400235c3dfad1b5f467ba179c8c"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-win32.whl", hash = "sha256:fc6b14e8602f59c6ba893980bea96571dd0ed83d8ebb9c4479d9ed5425d562e9"}, | |||
| {file = "SQLAlchemy-2.0.31-cp39-cp39-win_amd64.whl", hash = "sha256:3cb8a66b167b033ec72c3812ffc8441d4e9f5f78f5e31e54dcd4c90a4ca5bebc"}, | |||
| {file = "SQLAlchemy-2.0.31-py3-none-any.whl", hash = "sha256:69f3e3c08867a8e4856e92d7afb618b95cdee18e0bc1647b77599722c9a28911"}, | |||
| {file = "SQLAlchemy-2.0.31.tar.gz", hash = "sha256:b607489dd4a54de56984a0c7656247504bd5523d9d0ba799aef59d4add009484"}, | |||
| ] | |||
| [package.dependencies] | |||
| greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b48154678e76445c7ded1896715ce05319f74b1e73cf82d4f8b59b46e9c0ddc"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2753743c2afd061bb95a61a51bbb6a1a11ac1c44292fad898f10c9839a7f75b2"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7bfc726d167f425d4c16269a9a10fe8630ff6d14b683d588044dcef2d0f6be7"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f61ada6979223013d9ab83a3ed003ded6959eae37d0d685db2c147e9143797"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a365eda439b7a00732638f11072907c1bc8e351c7665e7e5da91b169af794af"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bba002a9447b291548e8d66fd8c96a6a7ed4f2def0bb155f4f0a1309fd2735d5"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-win32.whl", hash = "sha256:0138c5c16be3600923fa2169532205d18891b28afa817cb49b50e08f62198bb8"}, | |||
| {file = "SQLAlchemy-2.0.30-cp310-cp310-win_amd64.whl", hash = "sha256:99650e9f4cf3ad0d409fed3eec4f071fadd032e9a5edc7270cd646a26446feeb"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:955991a09f0992c68a499791a753523f50f71a6885531568404fa0f231832aa0"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f69e4c756ee2686767eb80f94c0125c8b0a0b87ede03eacc5c8ae3b54b99dc46"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c9db1ce00e59e8dd09d7bae852a9add716efdc070a3e2068377e6ff0d6fdaa"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1429a4b0f709f19ff3b0cf13675b2b9bfa8a7e79990003207a011c0db880a13"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:efedba7e13aa9a6c8407c48facfdfa108a5a4128e35f4c68f20c3407e4376aa9"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16863e2b132b761891d6c49f0a0f70030e0bcac4fd208117f6b7e053e68668d0"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-win32.whl", hash = "sha256:2ecabd9ccaa6e914e3dbb2aa46b76dede7eadc8cbf1b8083c94d936bcd5ffb49"}, | |||
| {file = "SQLAlchemy-2.0.30-cp311-cp311-win_amd64.whl", hash = "sha256:0b3f4c438e37d22b83e640f825ef0f37b95db9aa2d68203f2c9549375d0b2260"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5a79d65395ac5e6b0c2890935bad892eabb911c4aa8e8015067ddb37eea3d56c"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a5baf9267b752390252889f0c802ea13b52dfee5e369527da229189b8bd592e"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cb5a646930c5123f8461f6468901573f334c2c63c795b9af350063a736d0134"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:296230899df0b77dec4eb799bcea6fbe39a43707ce7bb166519c97b583cfcab3"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c62d401223f468eb4da32627bffc0c78ed516b03bb8a34a58be54d618b74d472"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3b69e934f0f2b677ec111b4d83f92dc1a3210a779f69bf905273192cf4ed433e"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-win32.whl", hash = "sha256:77d2edb1f54aff37e3318f611637171e8ec71472f1fdc7348b41dcb226f93d90"}, | |||
| {file = "SQLAlchemy-2.0.30-cp312-cp312-win_amd64.whl", hash = "sha256:b6c7ec2b1f4969fc19b65b7059ed00497e25f54069407a8701091beb69e591a5"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a8e3b0a7e09e94be7510d1661339d6b52daf202ed2f5b1f9f48ea34ee6f2d57"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b60203c63e8f984df92035610c5fb76d941254cf5d19751faab7d33b21e5ddc0"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1dc3eabd8c0232ee8387fbe03e0a62220a6f089e278b1f0aaf5e2d6210741ad"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:40ad017c672c00b9b663fcfcd5f0864a0a97828e2ee7ab0c140dc84058d194cf"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e42203d8d20dc704604862977b1470a122e4892791fe3ed165f041e4bf447a1b"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-win32.whl", hash = "sha256:2a4f4da89c74435f2bc61878cd08f3646b699e7d2eba97144030d1be44e27584"}, | |||
| {file = "SQLAlchemy-2.0.30-cp37-cp37m-win_amd64.whl", hash = "sha256:b6bf767d14b77f6a18b6982cbbf29d71bede087edae495d11ab358280f304d8e"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc0c53579650a891f9b83fa3cecd4e00218e071d0ba00c4890f5be0c34887ed3"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:311710f9a2ee235f1403537b10c7687214bb1f2b9ebb52702c5aa4a77f0b3af7"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:408f8b0e2c04677e9c93f40eef3ab22f550fecb3011b187f66a096395ff3d9fd"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37a4b4fb0dd4d2669070fb05b8b8824afd0af57587393015baee1cf9890242d9"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a943d297126c9230719c27fcbbeab57ecd5d15b0bd6bfd26e91bfcfe64220621"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0a089e218654e740a41388893e090d2e2c22c29028c9d1353feb38638820bbeb"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-win32.whl", hash = "sha256:fa561138a64f949f3e889eb9ab8c58e1504ab351d6cf55259dc4c248eaa19da6"}, | |||
| {file = "SQLAlchemy-2.0.30-cp38-cp38-win_amd64.whl", hash = "sha256:7d74336c65705b986d12a7e337ba27ab2b9d819993851b140efdf029248e818e"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8c62fe2480dd61c532ccafdbce9b29dacc126fe8be0d9a927ca3e699b9491a"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2383146973a15435e4717f94c7509982770e3e54974c71f76500a0136f22810b"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8409de825f2c3b62ab15788635ccaec0c881c3f12a8af2b12ae4910a0a9aeef6"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0094c5dc698a5f78d3d1539853e8ecec02516b62b8223c970c86d44e7a80f6c7"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:edc16a50f5e1b7a06a2dcc1f2205b0b961074c123ed17ebda726f376a5ab0953"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f7703c2010355dd28f53deb644a05fc30f796bd8598b43f0ba678878780b6e4c"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-win32.whl", hash = "sha256:1f9a727312ff6ad5248a4367358e2cf7e625e98b1028b1d7ab7b806b7d757513"}, | |||
| {file = "SQLAlchemy-2.0.30-cp39-cp39-win_amd64.whl", hash = "sha256:a0ef36b28534f2a5771191be6edb44cc2673c7b2edf6deac6562400288664221"}, | |||
| {file = "SQLAlchemy-2.0.30-py3-none-any.whl", hash = "sha256:7108d569d3990c71e26a42f60474b4c02c8586c4681af5fd67e51a044fdea86a"}, | |||
| {file = "SQLAlchemy-2.0.30.tar.gz", hash = "sha256:2b1708916730f4830bc69d6f49d37f7698b5bd7530aca7f04f785f8849e95255"}, | |||
| ] | |||
| [package.dependencies] | |||
| greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} | |||
| typing-extensions = ">=4.6.0" | |||
| [package.extras] | |||
| @@ -7579,13 +7620,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] | |||
| [[package]] | |||
| name = "tencentcloud-sdk-python-common" | |||
| version = "3.0.1172" | |||
| version = "3.0.1166" | |||
| description = "Tencent Cloud Common SDK for Python" | |||
| optional = false | |||
| python-versions = "*" | |||
| files = [ | |||
| {file = "tencentcloud-sdk-python-common-3.0.1172.tar.gz", hash = "sha256:37b3b9f4a53caa070379afb6910ac989823eacd35169701405ddafb12ea14e9e"}, | |||
| {file = "tencentcloud_sdk_python_common-3.0.1172-py2.py3-none-any.whl", hash = "sha256:8915ddc713bcd7512e9d528ec36ad3e527990ab06f5e89f63941f2e5c23f4675"}, | |||
| {file = "tencentcloud-sdk-python-common-3.0.1166.tar.gz", hash = "sha256:7e20a98f94cd82302f4f9a6c28cd1d1d90e1043767a9ff98eebe10def84ec7b9"}, | |||
| {file = "tencentcloud_sdk_python_common-3.0.1166-py2.py3-none-any.whl", hash = "sha256:e230159b275427c0ff95bd708df2ad625ab4a45ff495d9a89d4199d535ce68e9"}, | |||
| ] | |||
| [package.dependencies] | |||
| @@ -7593,17 +7634,17 @@ requests = ">=2.16.0" | |||
| [[package]] | |||
| name = "tencentcloud-sdk-python-hunyuan" | |||
| version = "3.0.1172" | |||
| version = "3.0.1166" | |||
| description = "Tencent Cloud Hunyuan SDK for Python" | |||
| optional = false | |||
| python-versions = "*" | |||
| files = [ | |||
| {file = "tencentcloud-sdk-python-hunyuan-3.0.1172.tar.gz", hash = "sha256:ae83b39c9da7302b10c4bffb7672ae95be72945b43e06a0b1ae9ac23bac2d43b"}, | |||
| {file = "tencentcloud_sdk_python_hunyuan-3.0.1172-py2.py3-none-any.whl", hash = "sha256:443908059ef1a00a798b7387f85e210d89c65b4f9db73629e53b3ec609b8528b"}, | |||
| {file = "tencentcloud-sdk-python-hunyuan-3.0.1166.tar.gz", hash = "sha256:9be5f6ca91facdc40da91a0b9c300a0c54a83cf3792305d0e83c4216ca2a2e18"}, | |||
| {file = "tencentcloud_sdk_python_hunyuan-3.0.1166-py2.py3-none-any.whl", hash = "sha256:572d41d034a68a898ac74dd4d92f6b764cdb2b993cf71e6fbc52a40e65b0b4b4"}, | |||
| ] | |||
| [package.dependencies] | |||
| tencentcloud-sdk-python-common = "3.0.1172" | |||
| tencentcloud-sdk-python-common = "3.0.1166" | |||
| [[package]] | |||
| name = "threadpoolctl" | |||
| @@ -9040,4 +9081,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] | |||
| [metadata] | |||
| lock-version = "2.0" | |||
| python-versions = "^3.10" | |||
| content-hash = "cac196b2ddb59d7873fb3380d87b622d002613d6dc1d271a5c15e46817a38c55" | |||
| content-hash = "59a9d41baa5454de6c9032c8d9ca81d79e5a7137c654b8765034aebb8ec29793" | |||
| @@ -187,6 +187,8 @@ tcvectordb = "1.3.2" | |||
| chromadb = "~0.5.1" | |||
| tenacity = "~8.3.0" | |||
| cos-python-sdk-v5 = "1.9.30" | |||
| langfuse = "^2.36.1" | |||
| langsmith = "^0.1.77" | |||
| novita-client = "^0.5.6" | |||
| opensearch-py = "2.4.0" | |||
| oracledb = "~2.2.1" | |||
| @@ -18,7 +18,8 @@ class AppGenerateService: | |||
| user: Union[Account, EndUser], | |||
| args: Any, | |||
| invoke_from: InvokeFrom, | |||
| streaming: bool = True) -> Union[dict, Generator[dict, None, None]]: | |||
| streaming: bool = True, | |||
| ) -> Union[dict, Generator[dict, None, None]]: | |||
| """ | |||
| App Content Generate | |||
| :param app_model: app model | |||
| @@ -96,7 +96,9 @@ class ConversationService: | |||
| # generate conversation name | |||
| try: | |||
| name = LLMGenerator.generate_conversation_name(app_model.tenant_id, message.query) | |||
| name = LLMGenerator.generate_conversation_name( | |||
| app_model.tenant_id, message.query, conversation.id, app_model.id | |||
| ) | |||
| conversation.name = name | |||
| except: | |||
| pass | |||
| @@ -7,6 +7,8 @@ from core.llm_generator.llm_generator import LLMGenerator | |||
| from core.memory.token_buffer_memory import TokenBufferMemory | |||
| from core.model_manager import ModelManager | |||
| from core.model_runtime.entities.model_entities import ModelType | |||
| from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName | |||
| from core.ops.utils import measure_time | |||
| from extensions.ext_database import db | |||
| from libs.infinite_scroll_pagination import InfiniteScrollPagination | |||
| from models.account import Account | |||
| @@ -262,9 +264,21 @@ class MessageService: | |||
| message_limit=3, | |||
| ) | |||
| questions = LLMGenerator.generate_suggested_questions_after_answer( | |||
| tenant_id=app_model.tenant_id, | |||
| histories=histories | |||
| with measure_time() as timer: | |||
| questions = LLMGenerator.generate_suggested_questions_after_answer( | |||
| tenant_id=app_model.tenant_id, | |||
| histories=histories | |||
| ) | |||
| # get tracing instance | |||
| trace_manager = TraceQueueManager(app_id=app_model.id) | |||
| trace_manager.add_trace_task( | |||
| TraceTask( | |||
| TraceTaskName.SUGGESTED_QUESTION_TRACE, | |||
| message_id=message_id, | |||
| suggested_question=questions, | |||
| timer=timer | |||
| ) | |||
| ) | |||
| return questions | |||
| @@ -0,0 +1,130 @@ | |||
| from core.ops.ops_trace_manager import OpsTraceManager, provider_config_map | |||
| from extensions.ext_database import db | |||
| from models.model import App, TraceAppConfig | |||
| class OpsService: | |||
| @classmethod | |||
| def get_tracing_app_config(cls, app_id: str, tracing_provider: str): | |||
| """ | |||
| Get tracing app config | |||
| :param app_id: app id | |||
| :param tracing_provider: tracing provider | |||
| :return: | |||
| """ | |||
| trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( | |||
| TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider | |||
| ).first() | |||
| if not trace_config_data: | |||
| return None | |||
| # decrypt_token and obfuscated_token | |||
| tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id | |||
| decrypt_tracing_config = OpsTraceManager.decrypt_tracing_config(tenant_id, tracing_provider, trace_config_data.tracing_config) | |||
| decrypt_tracing_config = OpsTraceManager.obfuscated_decrypt_token(tracing_provider, decrypt_tracing_config) | |||
| trace_config_data.tracing_config = decrypt_tracing_config | |||
| return trace_config_data.to_dict() | |||
| @classmethod | |||
| def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): | |||
| """ | |||
| Create tracing app config | |||
| :param app_id: app id | |||
| :param tracing_provider: tracing provider | |||
| :param tracing_config: tracing config | |||
| :return: | |||
| """ | |||
| if tracing_provider not in provider_config_map.keys() and tracing_provider != None: | |||
| return {"error": f"Invalid tracing provider: {tracing_provider}"} | |||
| config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ | |||
| provider_config_map[tracing_provider]['other_keys'] | |||
| default_config_instance = config_class(**tracing_config) | |||
| for key in other_keys: | |||
| if key in tracing_config and tracing_config[key] == "": | |||
| tracing_config[key] = getattr(default_config_instance, key, None) | |||
| # api check | |||
| if not OpsTraceManager.check_trace_config_is_effective(tracing_config, tracing_provider): | |||
| return {"error": "Invalid Credentials"} | |||
| # check if trace config already exists | |||
| trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( | |||
| TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider | |||
| ).first() | |||
| if trace_config_data: | |||
| return None | |||
| # get tenant id | |||
| tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id | |||
| tracing_config = OpsTraceManager.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) | |||
| trace_config_data = TraceAppConfig( | |||
| app_id=app_id, | |||
| tracing_provider=tracing_provider, | |||
| tracing_config=tracing_config, | |||
| ) | |||
| db.session.add(trace_config_data) | |||
| db.session.commit() | |||
| return {"result": "success"} | |||
| @classmethod | |||
| def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): | |||
| """ | |||
| Update tracing app config | |||
| :param app_id: app id | |||
| :param tracing_provider: tracing provider | |||
| :param tracing_config: tracing config | |||
| :return: | |||
| """ | |||
| if tracing_provider not in provider_config_map.keys(): | |||
| raise ValueError(f"Invalid tracing provider: {tracing_provider}") | |||
| # check if trace config already exists | |||
| current_trace_config = db.session.query(TraceAppConfig).filter( | |||
| TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider | |||
| ).first() | |||
| if not current_trace_config: | |||
| return None | |||
| # get tenant id | |||
| tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id | |||
| tracing_config = OpsTraceManager.encrypt_tracing_config( | |||
| tenant_id, tracing_provider, tracing_config, current_trace_config.tracing_config | |||
| ) | |||
| # api check | |||
| # decrypt_token | |||
| decrypt_tracing_config = OpsTraceManager.decrypt_tracing_config(tenant_id, tracing_provider, tracing_config) | |||
| if not OpsTraceManager.check_trace_config_is_effective(decrypt_tracing_config, tracing_provider): | |||
| raise ValueError("Invalid Credentials") | |||
| current_trace_config.tracing_config = tracing_config | |||
| db.session.commit() | |||
| return current_trace_config.to_dict() | |||
| @classmethod | |||
| def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): | |||
| """ | |||
| Delete tracing app config | |||
| :param app_id: app id | |||
| :param tracing_provider: tracing provider | |||
| :return: | |||
| """ | |||
| trace_config = db.session.query(TraceAppConfig).filter( | |||
| TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider | |||
| ).first() | |||
| if not trace_config: | |||
| return None | |||
| db.session.delete(trace_config) | |||
| db.session.commit() | |||
| return True | |||