| @@ -41,7 +41,7 @@ class ImagePromptMessageFile(PromptMessageFile): | |||
| class LCHumanMessageWithFiles(HumanMessage): | |||
| # content: Union[str, List[Union[str, Dict]]] | |||
| # content: Union[str, list[Union[str, Dict]]] | |||
| content: str | |||
| files: list[PromptMessageFile] | |||
| @@ -161,7 +161,7 @@ In `llm.py`, create an Anthropic LLM class, which we name `AnthropicLargeLanguag | |||
| ```python | |||
| def _invoke(self, model: str, credentials: dict, | |||
| prompt_messages: list[PromptMessage], model_parameters: dict, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, | |||
| stream: bool = True, user: Optional[str] = None) \ | |||
| -> Union[LLMResult, Generator]: | |||
| """ | |||
| @@ -127,7 +127,7 @@ provider_credential_schema: | |||
| ```python | |||
| def _invoke(self, model: str, credentials: dict, | |||
| prompt_messages: list[PromptMessage], model_parameters: dict, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, | |||
| stream: bool = True, user: Optional[str] = None) \ | |||
| -> Union[LLMResult, Generator]: | |||
| """ | |||
| @@ -128,7 +128,7 @@ class XinferenceProvider(Provider): | |||
| ```python | |||
| def _invoke(self, model: str, credentials: dict, | |||
| prompt_messages: list[PromptMessage], model_parameters: dict, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, | |||
| stream: bool = True, user: Optional[str] = None) \ | |||
| -> Union[LLMResult, Generator]: | |||
| """ | |||
| @@ -77,7 +77,7 @@ pricing: # 价格信息 | |||
| ```python | |||
| def _invoke(self, model: str, credentials: dict, | |||
| prompt_messages: list[PromptMessage], model_parameters: dict, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None, | |||
| tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, | |||
| stream: bool = True, user: Optional[str] = None) \ | |||
| -> Union[LLMResult, Generator]: | |||
| """ | |||
| @@ -243,7 +243,7 @@ class TenantService: | |||
| return ta | |||
| @staticmethod | |||
| def get_join_tenants(account: Account) -> List[Tenant]: | |||
| def get_join_tenants(account: Account) -> list[Tenant]: | |||
| """Get account join tenants""" | |||
| return db.session.query(Tenant).join( | |||
| TenantAccountJoin, Tenant.id == TenantAccountJoin.tenant_id | |||
| @@ -282,7 +282,7 @@ class TenantService: | |||
| account.current_tenant_id = tenant_account_join.tenant_id | |||
| @staticmethod | |||
| def get_tenant_members(tenant: Tenant) -> List[Account]: | |||
| def get_tenant_members(tenant: Tenant) -> list[Account]: | |||
| """Get tenant members""" | |||
| query = ( | |||
| db.session.query(Account, TenantAccountJoin.role) | |||
| @@ -303,7 +303,7 @@ class TenantService: | |||
| return updated_accounts | |||
| @staticmethod | |||
| def has_roles(tenant: Tenant, roles: List[TenantAccountJoinRole]) -> bool: | |||
| def has_roles(tenant: Tenant, roles: list[TenantAccountJoinRole]) -> bool: | |||
| """Check if user has any of the given roles for a tenant""" | |||
| if not all(isinstance(role, TenantAccountJoinRole) for role in roles): | |||
| raise ValueError('all roles must be TenantAccountJoinRole') | |||