| @@ -15,11 +15,11 @@ def handle(sender, **kwargs): | |||
| app_dataset_joins = db.session.query(AppDatasetJoin).filter(AppDatasetJoin.app_id == app.id).all() | |||
| removed_dataset_ids: set[int] = set() | |||
| removed_dataset_ids: set[str] = set() | |||
| if not app_dataset_joins: | |||
| added_dataset_ids = dataset_ids | |||
| else: | |||
| old_dataset_ids: set[int] = set() | |||
| old_dataset_ids: set[str] = set() | |||
| old_dataset_ids.update(app_dataset_join.dataset_id for app_dataset_join in app_dataset_joins) | |||
| added_dataset_ids = dataset_ids - old_dataset_ids | |||
| @@ -39,8 +39,8 @@ def handle(sender, **kwargs): | |||
| db.session.commit() | |||
| def get_dataset_ids_from_model_config(app_model_config: AppModelConfig) -> set[int]: | |||
| dataset_ids: set[int] = set() | |||
| def get_dataset_ids_from_model_config(app_model_config: AppModelConfig) -> set[str]: | |||
| dataset_ids: set[str] = set() | |||
| if not app_model_config: | |||
| return dataset_ids | |||
| @@ -17,11 +17,11 @@ def handle(sender, **kwargs): | |||
| dataset_ids = get_dataset_ids_from_workflow(published_workflow) | |||
| app_dataset_joins = db.session.query(AppDatasetJoin).filter(AppDatasetJoin.app_id == app.id).all() | |||
| removed_dataset_ids: set[int] = set() | |||
| removed_dataset_ids: set[str] = set() | |||
| if not app_dataset_joins: | |||
| added_dataset_ids = dataset_ids | |||
| else: | |||
| old_dataset_ids: set[int] = set() | |||
| old_dataset_ids: set[str] = set() | |||
| old_dataset_ids.update(app_dataset_join.dataset_id for app_dataset_join in app_dataset_joins) | |||
| added_dataset_ids = dataset_ids - old_dataset_ids | |||
| @@ -41,8 +41,8 @@ def handle(sender, **kwargs): | |||
| db.session.commit() | |||
| def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[int]: | |||
| dataset_ids: set[int] = set() | |||
| def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[str]: | |||
| dataset_ids: set[str] = set() | |||
| graph = published_workflow.graph_dict | |||
| if not graph: | |||
| return dataset_ids | |||
| @@ -60,7 +60,7 @@ def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[int]: | |||
| for node in knowledge_retrieval_nodes: | |||
| try: | |||
| node_data = KnowledgeRetrievalNodeData(**node.get("data", {})) | |||
| dataset_ids.update(int(dataset_id) for dataset_id in node_data.dataset_ids) | |||
| dataset_ids.update(dataset_id for dataset_id in node_data.dataset_ids) | |||
| except Exception as e: | |||
| continue | |||
| @@ -139,7 +139,7 @@ class AudioService: | |||
| return Response(stream_with_context(response), content_type="audio/mpeg") | |||
| return response | |||
| else: | |||
| if not text: | |||
| if text is None: | |||
| raise ValueError("Text is required") | |||
| response = invoke_tts(text, app_model, voice) | |||
| if isinstance(response, Generator): | |||
| @@ -452,7 +452,7 @@ class DatasetService: | |||
| class DocumentService: | |||
| DEFAULT_RULES = { | |||
| DEFAULT_RULES: dict[str, Any] = { | |||
| "mode": "custom", | |||
| "rules": { | |||
| "pre_processing_rules": [ | |||
| @@ -466,7 +466,7 @@ class DocumentService: | |||
| }, | |||
| } | |||
| DOCUMENT_METADATA_SCHEMA = { | |||
| DOCUMENT_METADATA_SCHEMA: dict[str, Any] = { | |||
| "book": { | |||
| "title": str, | |||
| "language": str, | |||