Source code for langchain_experimental.comprehend_moderation.base_moderation_exceptions
[docs]classModerationPiiError(Exception):"""Exception raised if PII entities are detected. Attributes: message -- explanation of the error """def__init__(self,message:str="The prompt contains PII entities and cannot be processed"):self.message=messagesuper().__init__(self.message)
[docs]classModerationToxicityError(Exception):"""Exception raised if Toxic entities are detected. Attributes: message -- explanation of the error """def__init__(self,message:str="The prompt contains toxic content and cannot be processed"):self.message=messagesuper().__init__(self.message)
[docs]classModerationPromptSafetyError(Exception):"""Exception raised if Unsafe prompts are detected. Attributes: message -- explanation of the error """def__init__(self,message:str=("The prompt is unsafe and cannot be processed"),):self.message=messagesuper().__init__(self.message)