|
| 1 | +package com.aallam.openai.azure.api.filtering |
| 2 | + |
| 3 | +import com.aallam.openai.azure.api.core.ResponseError |
| 4 | +import kotlinx.serialization.SerialName |
| 5 | +import kotlinx.serialization.Serializable |
| 6 | + |
| 7 | +/** |
| 8 | + * Information about content filtering evaluated against input data to Azure OpenAI. |
| 9 | + */ |
| 10 | +@Serializable |
| 11 | +public data class ContentFilterResultDetailsForPrompt( |
| 12 | + |
| 13 | + /** |
| 14 | + * Describes language related to anatomical organs and genitals, romantic relationships, |
| 15 | + * acts portrayed in erotic or affectionate terms, physical sexual acts, including |
| 16 | + * those portrayed as an assault or a forced sexual violent act against one’s will, |
| 17 | + * prostitution, pornography, and abuse. |
| 18 | + */ |
| 19 | + @SerialName("sexual") |
| 20 | + val sexual: ContentFilterResult? = null, |
| 21 | + |
| 22 | + /** |
| 23 | + * Describes language related to physical actions intended to hurt, injure, damage, or |
| 24 | + * kill someone or something; describes weapons, etc. |
| 25 | + */ |
| 26 | + @SerialName("violence") |
| 27 | + val violence: ContentFilterResult? = null, |
| 28 | + |
| 29 | + /** |
| 30 | + * Describes language attacks or uses that include pejorative or discriminatory language |
| 31 | + * with reference to a person or identity group on the basis of certain differentiating |
| 32 | + * attributes of these groups including but not limited to race, ethnicity, nationality, |
| 33 | + * gender identity and expression, sexual orientation, religion, immigration status, ability |
| 34 | + * status, personal appearance, and body size. |
| 35 | + */ |
| 36 | + @SerialName("hate") |
| 37 | + val hate: ContentFilterResult? = null, |
| 38 | + |
| 39 | + /** |
| 40 | + * Describes language related to physical actions intended to purposely hurt, injure, |
| 41 | + * or damage one’s body, or kill oneself. |
| 42 | + */ |
| 43 | + @SerialName("self_harm") |
| 44 | + val selfHarm: ContentFilterResult? = null, |
| 45 | + |
| 46 | + /** |
| 47 | + * Describes whether profanity was detected. |
| 48 | + */ |
| 49 | + @SerialName("profanity") |
| 50 | + val profanity: ContentFilterDetectionResult? = null, |
| 51 | + |
| 52 | + /** |
| 53 | + * Describes detection results against configured custom blocklists. |
| 54 | + */ |
| 55 | + @SerialName("custom_blocklists") |
| 56 | + val customBlocklists: List<ContentFilterBlocklistIdResult>? = null, |
| 57 | + |
| 58 | + /** |
| 59 | + * Describes an error returned if the content filtering system is |
| 60 | + * down or otherwise unable to complete the operation in time. |
| 61 | + */ |
| 62 | + @SerialName("error") |
| 63 | + val error: ResponseError? = null, |
| 64 | + |
| 65 | + /** |
| 66 | + * Whether a jailbreak attempt was detected in the prompt. |
| 67 | + */ |
| 68 | + @SerialName("jailbreak") |
| 69 | + val jailbreak: ContentFilterDetectionResult? = null |
| 70 | +) |
0 commit comments