| @@ -191,6 +191,22 @@ class LLMNode(BaseNode[LLMNodeData]): | |||
| # deduct quota | |||
| self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) | |||
| break | |||
| outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason} | |||
| yield RunCompletedEvent( | |||
| run_result=NodeRunResult( | |||
| status=WorkflowNodeExecutionStatus.SUCCEEDED, | |||
| inputs=node_inputs, | |||
| process_data=process_data, | |||
| outputs=outputs, | |||
| metadata={ | |||
| NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, | |||
| NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, | |||
| NodeRunMetadataKey.CURRENCY: usage.currency, | |||
| }, | |||
| llm_usage=usage, | |||
| ) | |||
| ) | |||
| except LLMNodeError as e: | |||
| yield RunCompletedEvent( | |||
| run_result=NodeRunResult( | |||
| @@ -211,23 +227,6 @@ class LLMNode(BaseNode[LLMNodeData]): | |||
| ) | |||
| ) | |||
| outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason} | |||
| yield RunCompletedEvent( | |||
| run_result=NodeRunResult( | |||
| status=WorkflowNodeExecutionStatus.SUCCEEDED, | |||
| inputs=node_inputs, | |||
| process_data=process_data, | |||
| outputs=outputs, | |||
| metadata={ | |||
| NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, | |||
| NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, | |||
| NodeRunMetadataKey.CURRENCY: usage.currency, | |||
| }, | |||
| llm_usage=usage, | |||
| ) | |||
| ) | |||
| def _invoke_llm( | |||
| self, | |||
| node_data_model: ModelConfig, | |||