Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

validation_utils.py 24KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from collections import Counter
  17. from typing import Annotated, Any, Literal
  18. from uuid import UUID
  19. from flask import Request
  20. from pydantic import (
  21. BaseModel,
  22. ConfigDict,
  23. Field,
  24. StringConstraints,
  25. ValidationError,
  26. field_validator,
  27. )
  28. from pydantic_core import PydanticCustomError
  29. from werkzeug.exceptions import BadRequest, UnsupportedMediaType
  30. from api.constants import DATASET_NAME_LIMIT
  31. def validate_and_parse_json_request(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None, exclude_unset: bool = False) -> tuple[dict[str, Any] | None, str | None]:
  32. """
  33. Validates and parses JSON requests through a multi-stage validation pipeline.
  34. Implements a four-stage validation process:
  35. 1. Content-Type verification (must be application/json)
  36. 2. JSON syntax validation
  37. 3. Payload structure type checking
  38. 4. Pydantic model validation with error formatting
  39. Args:
  40. request (Request): Flask request object containing HTTP payload
  41. validator (type[BaseModel]): Pydantic model class for data validation
  42. extras (dict[str, Any] | None): Additional fields to merge into payload
  43. before validation. These fields will be removed from the final output
  44. exclude_unset (bool): Whether to exclude fields that have not been explicitly set
  45. Returns:
  46. tuple[Dict[str, Any] | None, str | None]:
  47. - First element:
  48. - Validated dictionary on success
  49. - None on validation failure
  50. - Second element:
  51. - None on success
  52. - Diagnostic error message on failure
  53. Raises:
  54. UnsupportedMediaType: When Content-Type header is not application/json
  55. BadRequest: For structural JSON syntax errors
  56. ValidationError: When payload violates Pydantic schema rules
  57. Examples:
  58. >>> validate_and_parse_json_request(valid_request, DatasetSchema)
  59. ({"name": "Dataset1", "format": "csv"}, None)
  60. >>> validate_and_parse_json_request(xml_request, DatasetSchema)
  61. (None, "Unsupported content type: Expected application/json, got text/xml")
  62. >>> validate_and_parse_json_request(bad_json_request, DatasetSchema)
  63. (None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding")
  64. Notes:
  65. 1. Validation Priority:
  66. - Content-Type verification precedes JSON parsing
  67. - Structural validation occurs before schema validation
  68. 2. Extra fields added via `extras` parameter are automatically removed
  69. from the final output after validation
  70. """
  71. try:
  72. payload = request.get_json() or {}
  73. except UnsupportedMediaType:
  74. return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
  75. except BadRequest:
  76. return None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding"
  77. if not isinstance(payload, dict):
  78. return None, f"Invalid request payload: expected object, got {type(payload).__name__}"
  79. try:
  80. if extras is not None:
  81. payload.update(extras)
  82. validated_request = validator(**payload)
  83. except ValidationError as e:
  84. return None, format_validation_error_message(e)
  85. parsed_payload = validated_request.model_dump(by_alias=True, exclude_unset=exclude_unset)
  86. if extras is not None:
  87. for key in list(parsed_payload.keys()):
  88. if key in extras:
  89. del parsed_payload[key]
  90. return parsed_payload, None
  91. def validate_and_parse_request_args(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None) -> tuple[dict[str, Any] | None, str | None]:
  92. """
  93. Validates and parses request arguments against a Pydantic model.
  94. This function performs a complete request validation workflow:
  95. 1. Extracts query parameters from the request
  96. 2. Merges with optional extra values (if provided)
  97. 3. Validates against the specified Pydantic model
  98. 4. Cleans the output by removing extra values
  99. 5. Returns either parsed data or an error message
  100. Args:
  101. request (Request): Web framework request object containing query parameters
  102. validator (type[BaseModel]): Pydantic model class for validation
  103. extras (dict[str, Any] | None): Optional additional values to include in validation
  104. but exclude from final output. Defaults to None.
  105. Returns:
  106. tuple[dict[str, Any] | None, str | None]:
  107. - First element: Validated/parsed arguments as dict if successful, None otherwise
  108. - Second element: Formatted error message if validation failed, None otherwise
  109. Behavior:
  110. - Query parameters are merged with extras before validation
  111. - Extras are automatically removed from the final output
  112. - All validation errors are formatted into a human-readable string
  113. Raises:
  114. TypeError: If validator is not a Pydantic BaseModel subclass
  115. Examples:
  116. Successful validation:
  117. >>> validate_and_parse_request_args(request, MyValidator)
  118. ({'param1': 'value'}, None)
  119. Failed validation:
  120. >>> validate_and_parse_request_args(request, MyValidator)
  121. (None, "param1: Field required")
  122. With extras:
  123. >>> validate_and_parse_request_args(request, MyValidator, extras={'internal_id': 123})
  124. ({'param1': 'value'}, None) # internal_id removed from output
  125. Notes:
  126. - Uses request.args.to_dict() for Flask-compatible parameter extraction
  127. - Maintains immutability of original request arguments
  128. - Preserves type conversion from Pydantic validation
  129. """
  130. args = request.args.to_dict(flat=True)
  131. try:
  132. if extras is not None:
  133. args.update(extras)
  134. validated_args = validator(**args)
  135. except ValidationError as e:
  136. return None, format_validation_error_message(e)
  137. parsed_args = validated_args.model_dump()
  138. if extras is not None:
  139. for key in list(parsed_args.keys()):
  140. if key in extras:
  141. del parsed_args[key]
  142. return parsed_args, None
  143. def format_validation_error_message(e: ValidationError) -> str:
  144. """
  145. Formats validation errors into a standardized string format.
  146. Processes pydantic ValidationError objects to create human-readable error messages
  147. containing field locations, error descriptions, and input values.
  148. Args:
  149. e (ValidationError): The validation error instance containing error details
  150. Returns:
  151. str: Formatted error messages joined by newlines. Each line contains:
  152. - Field path (dot-separated)
  153. - Error message
  154. - Truncated input value (max 128 chars)
  155. Example:
  156. >>> try:
  157. ... UserModel(name=123, email="invalid")
  158. ... except ValidationError as e:
  159. ... print(format_validation_error_message(e))
  160. Field: <name> - Message: <Input should be a valid string> - Value: <123>
  161. Field: <email> - Message: <value is not a valid email address> - Value: <invalid>
  162. """
  163. error_messages = []
  164. for error in e.errors():
  165. field = ".".join(map(str, error["loc"]))
  166. msg = error["msg"]
  167. input_val = error["input"]
  168. input_str = str(input_val)
  169. if len(input_str) > 128:
  170. input_str = input_str[:125] + "..."
  171. error_msg = f"Field: <{field}> - Message: <{msg}> - Value: <{input_str}>"
  172. error_messages.append(error_msg)
  173. return "\n".join(error_messages)
  174. def normalize_str(v: Any) -> Any:
  175. """
  176. Normalizes string values to a standard format while preserving non-string inputs.
  177. Performs the following transformations when input is a string:
  178. 1. Trims leading/trailing whitespace (str.strip())
  179. 2. Converts to lowercase (str.lower())
  180. Non-string inputs are returned unchanged, making this function safe for mixed-type
  181. processing pipelines.
  182. Args:
  183. v (Any): Input value to normalize. Accepts any Python object.
  184. Returns:
  185. Any: Normalized string if input was string-type, original value otherwise.
  186. Behavior Examples:
  187. String Input: " Admin " → "admin"
  188. Empty String: " " → "" (empty string)
  189. Non-String:
  190. - 123 → 123
  191. - None → None
  192. - ["User"] → ["User"]
  193. Typical Use Cases:
  194. - Standardizing user input
  195. - Preparing data for case-insensitive comparison
  196. - Cleaning API parameters
  197. - Normalizing configuration values
  198. Edge Cases:
  199. - Unicode whitespace is handled by str.strip()
  200. - Locale-independent lowercasing (str.lower())
  201. - Preserves falsy values (0, False, etc.)
  202. Example:
  203. >>> normalize_str(" ReadOnly ")
  204. 'readonly'
  205. >>> normalize_str(42)
  206. 42
  207. """
  208. if isinstance(v, str):
  209. stripped = v.strip()
  210. normalized = stripped.lower()
  211. return normalized
  212. return v
  213. def validate_uuid1_hex(v: Any) -> str:
  214. """
  215. Validates and converts input to a UUID version 1 hexadecimal string.
  216. This function performs strict validation and normalization:
  217. 1. Accepts either UUID objects or UUID-formatted strings
  218. 2. Verifies the UUID is version 1 (time-based)
  219. 3. Returns the 32-character hexadecimal representation
  220. Args:
  221. v (Any): Input value to validate. Can be:
  222. - UUID object (must be version 1)
  223. - String in UUID format (e.g. "550e8400-e29b-41d4-a716-446655440000")
  224. Returns:
  225. str: 32-character lowercase hexadecimal string without hyphens
  226. Example: "550e8400e29b41d4a716446655440000"
  227. Raises:
  228. PydanticCustomError: With code "invalid_UUID1_format" when:
  229. - Input is not a UUID object or valid UUID string
  230. - UUID version is not 1
  231. - String doesn't match UUID format
  232. Examples:
  233. Valid cases:
  234. >>> validate_uuid1_hex("550e8400-e29b-41d4-a716-446655440000")
  235. '550e8400e29b41d4a716446655440000'
  236. >>> validate_uuid1_hex(UUID('550e8400-e29b-41d4-a716-446655440000'))
  237. '550e8400e29b41d4a716446655440000'
  238. Invalid cases:
  239. >>> validate_uuid1_hex("not-a-uuid") # raises PydanticCustomError
  240. >>> validate_uuid1_hex(12345) # raises PydanticCustomError
  241. >>> validate_uuid1_hex(UUID(int=0)) # v4, raises PydanticCustomError
  242. Notes:
  243. - Uses Python's built-in UUID parser for format validation
  244. - Version check prevents accidental use of other UUID versions
  245. - Hyphens in input strings are automatically removed in output
  246. """
  247. try:
  248. uuid_obj = UUID(v) if isinstance(v, str) else v
  249. if uuid_obj.version != 1:
  250. raise PydanticCustomError("invalid_UUID1_format", "Must be a UUID1 format")
  251. return uuid_obj.hex
  252. except (AttributeError, ValueError, TypeError):
  253. raise PydanticCustomError("invalid_UUID1_format", "Invalid UUID1 format")
  254. class Base(BaseModel):
  255. model_config = ConfigDict(extra="forbid", strict=True)
  256. class RaptorConfig(Base):
  257. use_raptor: Annotated[bool, Field(default=False)]
  258. prompt: Annotated[
  259. str,
  260. StringConstraints(strip_whitespace=True, min_length=1),
  261. Field(
  262. default="Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize."
  263. ),
  264. ]
  265. max_token: Annotated[int, Field(default=256, ge=1, le=2048)]
  266. threshold: Annotated[float, Field(default=0.1, ge=0.0, le=1.0)]
  267. max_cluster: Annotated[int, Field(default=64, ge=1, le=1024)]
  268. random_seed: Annotated[int, Field(default=0, ge=0)]
  269. class GraphragConfig(Base):
  270. use_graphrag: Annotated[bool, Field(default=False)]
  271. entity_types: Annotated[list[str], Field(default_factory=lambda: ["organization", "person", "geo", "event", "category"])]
  272. method: Annotated[Literal["light", "general"], Field(default="light")]
  273. community: Annotated[bool, Field(default=False)]
  274. resolution: Annotated[bool, Field(default=False)]
  275. class ParserConfig(Base):
  276. auto_keywords: Annotated[int, Field(default=0, ge=0, le=32)]
  277. auto_questions: Annotated[int, Field(default=0, ge=0, le=10)]
  278. chunk_token_num: Annotated[int, Field(default=512, ge=1, le=2048)]
  279. delimiter: Annotated[str, Field(default=r"\n", min_length=1)]
  280. graphrag: Annotated[GraphragConfig, Field(default_factory=lambda: GraphragConfig(use_graphrag=False))]
  281. html4excel: Annotated[bool, Field(default=False)]
  282. layout_recognize: Annotated[str, Field(default="DeepDOC")]
  283. raptor: Annotated[RaptorConfig, Field(default_factory=lambda: RaptorConfig(use_raptor=False))]
  284. tag_kb_ids: Annotated[list[str], Field(default_factory=list)]
  285. topn_tags: Annotated[int, Field(default=1, ge=1, le=10)]
  286. filename_embd_weight: Annotated[float | None, Field(default=0.1, ge=0.0, le=1.0)]
  287. task_page_size: Annotated[int | None, Field(default=None, ge=1)]
  288. pages: Annotated[list[list[int]] | None, Field(default=None)]
  289. class CreateDatasetReq(Base):
  290. name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(...)]
  291. avatar: Annotated[str | None, Field(default=None, max_length=65535)]
  292. description: Annotated[str | None, Field(default=None, max_length=65535)]
  293. embedding_model: Annotated[str | None, Field(default=None, max_length=255, serialization_alias="embd_id")]
  294. permission: Annotated[Literal["me", "team"], Field(default="me", min_length=1, max_length=16)]
  295. chunk_method: Annotated[
  296. Literal["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
  297. Field(default="naive", min_length=1, max_length=32, serialization_alias="parser_id"),
  298. ]
  299. parser_config: Annotated[ParserConfig | None, Field(default=None)]
  300. @field_validator("avatar", mode="after")
  301. @classmethod
  302. def validate_avatar_base64(cls, v: str | None) -> str | None:
  303. """
  304. Validates Base64-encoded avatar string format and MIME type compliance.
  305. Implements a three-stage validation workflow:
  306. 1. MIME prefix existence check
  307. 2. MIME type format validation
  308. 3. Supported type verification
  309. Args:
  310. v (str): Raw avatar field value
  311. Returns:
  312. str: Validated Base64 string
  313. Raises:
  314. PydanticCustomError: For structural errors in these cases:
  315. - Missing MIME prefix header
  316. - Invalid MIME prefix format
  317. - Unsupported image MIME type
  318. Example:
  319. ```python
  320. # Valid case
  321. CreateDatasetReq(avatar="data:image/png;base64,iVBORw0KGg...")
  322. # Invalid cases
  323. CreateDatasetReq(avatar="image/jpeg;base64,...") # Missing 'data:' prefix
  324. CreateDatasetReq(avatar="data:video/mp4;base64,...") # Unsupported MIME type
  325. ```
  326. """
  327. if v is None:
  328. return v
  329. if "," in v:
  330. prefix, _ = v.split(",", 1)
  331. if not prefix.startswith("data:"):
  332. raise PydanticCustomError("format_invalid", "Invalid MIME prefix format. Must start with 'data:'")
  333. mime_type = prefix[5:].split(";")[0]
  334. supported_mime_types = ["image/jpeg", "image/png"]
  335. if mime_type not in supported_mime_types:
  336. raise PydanticCustomError("format_invalid", "Unsupported MIME type. Allowed: {supported_mime_types}", {"supported_mime_types": supported_mime_types})
  337. return v
  338. else:
  339. raise PydanticCustomError("format_invalid", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>")
  340. @field_validator("embedding_model", mode="before")
  341. @classmethod
  342. def normalize_embedding_model(cls, v: Any) -> Any:
  343. """Normalize embedding model string by stripping whitespace"""
  344. if isinstance(v, str):
  345. return v.strip()
  346. return v
  347. @field_validator("embedding_model", mode="after")
  348. @classmethod
  349. def validate_embedding_model(cls, v: str | None) -> str | None:
  350. """
  351. Validates embedding model identifier format compliance.
  352. Validation pipeline:
  353. 1. Structural format verification
  354. 2. Component non-empty check
  355. 3. Value normalization
  356. Args:
  357. v (str): Raw model identifier
  358. Returns:
  359. str: Validated <model_name>@<provider> format
  360. Raises:
  361. PydanticCustomError: For these violations:
  362. - Missing @ separator
  363. - Empty model_name/provider
  364. - Invalid component structure
  365. Examples:
  366. Valid: "text-embedding-3-large@openai"
  367. Invalid: "invalid_model" (no @)
  368. Invalid: "@openai" (empty model_name)
  369. Invalid: "text-embedding-3-large@" (empty provider)
  370. """
  371. if isinstance(v, str):
  372. if "@" not in v:
  373. raise PydanticCustomError("format_invalid", "Embedding model identifier must follow <model_name>@<provider> format")
  374. components = v.split("@", 1)
  375. if len(components) != 2 or not all(components):
  376. raise PydanticCustomError("format_invalid", "Both model_name and provider must be non-empty strings")
  377. model_name, provider = components
  378. if not model_name.strip() or not provider.strip():
  379. raise PydanticCustomError("format_invalid", "Model name and provider cannot be whitespace-only strings")
  380. return v
  381. # @field_validator("permission", mode="before")
  382. # @classmethod
  383. # def normalize_permission(cls, v: Any) -> Any:
  384. # return normalize_str(v)
  385. @field_validator("parser_config", mode="before")
  386. @classmethod
  387. def normalize_empty_parser_config(cls, v: Any) -> Any:
  388. """
  389. Normalizes empty parser configuration by converting empty dictionaries to None.
  390. This validator ensures consistent handling of empty parser configurations across
  391. the application by converting empty dicts to None values.
  392. Args:
  393. v (Any): Raw input value for the parser config field
  394. Returns:
  395. Any: Returns None if input is an empty dict, otherwise returns the original value
  396. Example:
  397. >>> normalize_empty_parser_config({})
  398. None
  399. >>> normalize_empty_parser_config({"key": "value"})
  400. {"key": "value"}
  401. """
  402. if v == {}:
  403. return None
  404. return v
  405. @field_validator("parser_config", mode="after")
  406. @classmethod
  407. def validate_parser_config_json_length(cls, v: ParserConfig | None) -> ParserConfig | None:
  408. """
  409. Validates serialized JSON length constraints for parser configuration.
  410. Implements a two-stage validation workflow:
  411. 1. Null check - bypass validation for empty configurations
  412. 2. Model serialization - convert Pydantic model to JSON string
  413. 3. Size verification - enforce maximum allowed payload size
  414. Args:
  415. v (ParserConfig | None): Raw parser configuration object
  416. Returns:
  417. ParserConfig | None: Validated configuration object
  418. Raises:
  419. PydanticCustomError: When serialized JSON exceeds 65,535 characters
  420. """
  421. if v is None:
  422. return None
  423. if (json_str := v.model_dump_json()) and len(json_str) > 65535:
  424. raise PydanticCustomError("string_too_long", "Parser config exceeds size limit (max 65,535 characters). Current size: {actual}", {"actual": len(json_str)})
  425. return v
  426. class UpdateDatasetReq(CreateDatasetReq):
  427. dataset_id: Annotated[str, Field(...)]
  428. name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(default="")]
  429. pagerank: Annotated[int, Field(default=0, ge=0, le=100)]
  430. @field_validator("dataset_id", mode="before")
  431. @classmethod
  432. def validate_dataset_id(cls, v: Any) -> str:
  433. return validate_uuid1_hex(v)
  434. class DeleteReq(Base):
  435. ids: Annotated[list[str] | None, Field(...)]
  436. @field_validator("ids", mode="after")
  437. @classmethod
  438. def validate_ids(cls, v_list: list[str] | None) -> list[str] | None:
  439. """
  440. Validates and normalizes a list of UUID strings with None handling.
  441. This post-processing validator performs:
  442. 1. None input handling (pass-through)
  443. 2. UUID version 1 validation for each list item
  444. 3. Duplicate value detection
  445. 4. Returns normalized UUID hex strings or None
  446. Args:
  447. v_list (list[str] | None): Input list that has passed initial validation.
  448. Either a list of UUID strings or None.
  449. Returns:
  450. list[str] | None:
  451. - None if input was None
  452. - List of normalized UUID hex strings otherwise:
  453. * 32-character lowercase
  454. * Valid UUID version 1
  455. * Unique within list
  456. Raises:
  457. PydanticCustomError: With structured error details when:
  458. - "invalid_UUID1_format": Any string fails UUIDv1 validation
  459. - "duplicate_uuids": If duplicate IDs are detected
  460. Validation Rules:
  461. - None input returns None
  462. - Empty list returns empty list
  463. - All non-None items must be valid UUIDv1
  464. - No duplicates permitted
  465. - Original order preserved
  466. Examples:
  467. Valid cases:
  468. >>> validate_ids(None)
  469. None
  470. >>> validate_ids([])
  471. []
  472. >>> validate_ids(["550e8400-e29b-41d4-a716-446655440000"])
  473. ["550e8400e29b41d4a716446655440000"]
  474. Invalid cases:
  475. >>> validate_ids(["invalid"])
  476. # raises PydanticCustomError(invalid_UUID1_format)
  477. >>> validate_ids(["550e...", "550e..."])
  478. # raises PydanticCustomError(duplicate_uuids)
  479. Security Notes:
  480. - Validates UUID version to prevent version spoofing
  481. - Duplicate check prevents data injection
  482. - None handling maintains pipeline integrity
  483. """
  484. if v_list is None:
  485. return None
  486. ids_list = []
  487. for v in v_list:
  488. try:
  489. ids_list.append(validate_uuid1_hex(v))
  490. except PydanticCustomError as e:
  491. raise e
  492. duplicates = [item for item, count in Counter(ids_list).items() if count > 1]
  493. if duplicates:
  494. duplicates_str = ", ".join(duplicates)
  495. raise PydanticCustomError("duplicate_uuids", "Duplicate ids: '{duplicate_ids}'", {"duplicate_ids": duplicates_str})
  496. return ids_list
  497. class DeleteDatasetReq(DeleteReq): ...
  498. class BaseListReq(BaseModel):
  499. model_config = ConfigDict(extra="forbid")
  500. id: Annotated[str | None, Field(default=None)]
  501. name: Annotated[str | None, Field(default=None)]
  502. page: Annotated[int, Field(default=1, ge=1)]
  503. page_size: Annotated[int, Field(default=30, ge=1)]
  504. orderby: Annotated[Literal["create_time", "update_time"], Field(default="create_time")]
  505. desc: Annotated[bool, Field(default=True)]
  506. @field_validator("id", mode="before")
  507. @classmethod
  508. def validate_id(cls, v: Any) -> str:
  509. return validate_uuid1_hex(v)
  510. class ListDatasetReq(BaseListReq): ...