You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. from abc import ABC, abstractmethod
  2. from enum import StrEnum, auto
  3. from pydantic import BaseModel, Field
  4. from core.extension.extensible import Extensible, ExtensionModule
  5. class ModerationAction(StrEnum):
  6. DIRECT_OUTPUT = auto()
  7. OVERRIDDEN = auto()
  8. class ModerationInputsResult(BaseModel):
  9. flagged: bool = False
  10. action: ModerationAction
  11. preset_response: str = ""
  12. inputs: dict = Field(default_factory=dict)
  13. query: str = ""
  14. class ModerationOutputsResult(BaseModel):
  15. flagged: bool = False
  16. action: ModerationAction
  17. preset_response: str = ""
  18. text: str = ""
  19. class Moderation(Extensible, ABC):
  20. """
  21. The base class of moderation.
  22. """
  23. module: ExtensionModule = ExtensionModule.MODERATION
  24. def __init__(self, app_id: str, tenant_id: str, config: dict | None = None):
  25. super().__init__(tenant_id, config)
  26. self.app_id = app_id
  27. @classmethod
  28. @abstractmethod
  29. def validate_config(cls, tenant_id: str, config: dict):
  30. """
  31. Validate the incoming form config data.
  32. :param tenant_id: the id of workspace
  33. :param config: the form config data
  34. :return:
  35. """
  36. raise NotImplementedError
  37. @abstractmethod
  38. def moderation_for_inputs(self, inputs: dict, query: str = "") -> ModerationInputsResult:
  39. """
  40. Moderation for inputs.
  41. After the user inputs, this method will be called to perform sensitive content review
  42. on the user inputs and return the processed results.
  43. :param inputs: user inputs
  44. :param query: query string (required in chat app)
  45. :return:
  46. """
  47. raise NotImplementedError
  48. @abstractmethod
  49. def moderation_for_outputs(self, text: str) -> ModerationOutputsResult:
  50. """
  51. Moderation for outputs.
  52. When LLM outputs content, the front end will pass the output content (may be segmented)
  53. to this method for sensitive content review, and the output content will be shielded if the review fails.
  54. :param text: LLM output content
  55. :return:
  56. """
  57. raise NotImplementedError
  58. @classmethod
  59. def _validate_inputs_and_outputs_config(cls, config: dict, is_preset_response_required: bool):
  60. # inputs_config
  61. inputs_config = config.get("inputs_config")
  62. if not isinstance(inputs_config, dict):
  63. raise ValueError("inputs_config must be a dict")
  64. # outputs_config
  65. outputs_config = config.get("outputs_config")
  66. if not isinstance(outputs_config, dict):
  67. raise ValueError("outputs_config must be a dict")
  68. inputs_config_enabled = inputs_config.get("enabled")
  69. outputs_config_enabled = outputs_config.get("enabled")
  70. if not inputs_config_enabled and not outputs_config_enabled:
  71. raise ValueError("At least one of inputs_config or outputs_config must be enabled")
  72. # preset_response
  73. if not is_preset_response_required:
  74. return
  75. if inputs_config_enabled:
  76. if not inputs_config.get("preset_response"):
  77. raise ValueError("inputs_config.preset_response is required")
  78. if len(inputs_config.get("preset_response", "0")) > 100:
  79. raise ValueError("inputs_config.preset_response must be less than 100 characters")
  80. if outputs_config_enabled:
  81. if not outputs_config.get("preset_response"):
  82. raise ValueError("outputs_config.preset_response is required")
  83. if len(outputs_config.get("preset_response", "0")) > 100:
  84. raise ValueError("outputs_config.preset_response must be less than 100 characters")
  85. class ModerationError(Exception):
  86. pass