From ab83747acd75497b35115c794a38048bfd55036a Mon Sep 17 00:00:00 2001 From: chenhan Date: Sun, 10 Nov 2024 20:29:24 +0800 Subject: [PATCH 1/2] feat(moderation): support new request format --- moderation.go | 94 +++++++++++++++++-- moderation_test.go | 222 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 257 insertions(+), 59 deletions(-) diff --git a/moderation.go b/moderation.go index a0e09c0e..4b8f12f4 100644 --- a/moderation.go +++ b/moderation.go @@ -26,6 +26,13 @@ var ( ErrModerationInvalidModel = errors.New("this model is not supported with moderation, please use text-moderation-stable or text-moderation-latest instead") //nolint:lll ) +type ModerationItemType string + +const ( + ModerationItemTypeText ModerationItemType = "text" + ModerationItemTypeImageURL ModerationItemType = "image_url" +) + var validModerationModel = map[string]struct{}{ ModerationOmniLatest: {}, ModerationOmni20240926: {}, @@ -39,11 +46,63 @@ type ModerationRequest struct { Model string `json:"model,omitempty"` } +func (m ModerationRequest) Convert() ModerationRequestV2 { + return ModerationRequestV2{ + Input: m.Input, + Model: m.Model, + } +} + +type ModerationStrArrayRequest struct { + Input []string `json:"input,omitempty"` + Model string `json:"model,omitempty"` +} + +func (m ModerationStrArrayRequest) Convert() ModerationRequestV2 { + return ModerationRequestV2{ + Input: m.Input, + Model: m.Model, + } +} + +type ModerationArrayRequest struct { + Input []ModerationRequestItem `json:"input,omitempty"` + Model string `json:"model,omitempty"` +} + +func (m ModerationArrayRequest) Convert() ModerationRequestV2 { + return ModerationRequestV2{ + Input: m.Input, + Model: m.Model, + } +} + +type ModerationRequestItem struct { + Type ModerationItemType `json:"type"` + + ImageURL ModerationImageURL `json:"image_url,omitempty"` + Text string `json:"text,omitempty"` +} + +type ModerationImageURL struct { + URL string `json:"url,omitempty"` +} + +type ModerationRequestV2 struct { + Input any `json:"input,omitempty"` + Model string `json:"model,omitempty"` +} + +type ModerationRequestConverter interface { + Convert() ModerationRequestV2 +} + // Result represents one of possible moderation results. type Result struct { - Categories ResultCategories `json:"categories"` - CategoryScores ResultCategoryScores `json:"category_scores"` - Flagged bool `json:"flagged"` + Categories ResultCategories `json:"categories"` + CategoryScores ResultCategoryScores `json:"category_scores"` + Flagged bool `json:"flagged"` + CategoryAppliedInputTypes CategoryAppliedInputType `json:"category_applied_input_types"` } // ResultCategories represents Categories of Result. @@ -59,6 +118,8 @@ type ResultCategories struct { SexualMinors bool `json:"sexual/minors"` Violence bool `json:"violence"` ViolenceGraphic bool `json:"violence/graphic"` + Illicit bool `json:"illicit"` + IllicitViolent bool `json:"illicit/violent"` } // ResultCategoryScores represents CategoryScores of Result. @@ -74,6 +135,24 @@ type ResultCategoryScores struct { SexualMinors float32 `json:"sexual/minors"` Violence float32 `json:"violence"` ViolenceGraphic float32 `json:"violence/graphic"` + Illicit float32 `json:"illicit"` + IllicitViolent float32 `json:"illicit/violent"` +} + +type CategoryAppliedInputType struct { + Harassment []ModerationItemType `json:"harassment"` + HarassmentThreatening []ModerationItemType `json:"harassment/threatening"` + Sexual []ModerationItemType `json:"sexual"` + Hate []ModerationItemType `json:"hate"` + HateThreatening []ModerationItemType `json:"hate/threatening"` + Illicit []ModerationItemType `json:"illicit"` + IllicitViolent []ModerationItemType `json:"illicit/violent"` + SelfHarmIntent []ModerationItemType `json:"self-harm/intent"` + SelfHarmInstructions []ModerationItemType `json:"self-harm/instructions"` + SelfHarm []ModerationItemType `json:"self-harm"` + SexualMinors []ModerationItemType `json:"sexual/minors"` + Violence []ModerationItemType `json:"violence"` + ViolenceGraphic []ModerationItemType `json:"violence/graphic"` } // ModerationResponse represents a response structure for moderation API. @@ -87,15 +166,18 @@ type ModerationResponse struct { // Moderations — perform a moderation api call over a string. // Input can be an array or slice but a string will reduce the complexity. -func (c *Client) Moderations(ctx context.Context, request ModerationRequest) (response ModerationResponse, err error) { - if _, ok := validModerationModel[request.Model]; len(request.Model) > 0 && !ok { +func (c *Client) Moderations(ctx context.Context, + request ModerationRequestConverter) (response ModerationResponse, err error) { + realRequest := request.Convert() + + if _, ok := validModerationModel[realRequest.Model]; len(realRequest.Model) > 0 && !ok { err = ErrModerationInvalidModel return } req, err := c.newRequest( ctx, http.MethodPost, - c.fullURL("/moderations", withModel(request.Model)), + c.fullURL("/moderations", withModel(realRequest.Model)), withBody(&request), ) if err != nil { diff --git a/moderation_test.go b/moderation_test.go index a97f25bc..40f33606 100644 --- a/moderation_test.go +++ b/moderation_test.go @@ -19,12 +19,42 @@ import ( func TestModerations(t *testing.T) { client, server, teardown := setupOpenAITestServer() defer teardown() + server.RegisterHandler("/v1/moderations", handleModerationEndpoint) - _, err := client.Moderations(context.Background(), openai.ModerationRequest{ - Model: openai.ModerationTextStable, - Input: "I want to kill them.", - }) - checks.NoError(t, err, "Moderation error") + + var requestInputs = []openai.ModerationRequestConverter{ + openai.ModerationRequest{ + Model: openai.ModerationTextStable, + Input: "I want to kill them.", + }, + openai.ModerationStrArrayRequest{ + Input: []string{ + "I want to kill them.", + "Hello World", + }, + Model: openai.ModerationTextStable, + }, + openai.ModerationArrayRequest{ + Input: []openai.ModerationRequestItem{ + { + Type: openai.ModerationItemTypeText, + Text: "I want to kill them.", + }, + { + Type: openai.ModerationItemTypeImageURL, + ImageURL: openai.ModerationImageURL{ + URL: "https://cdn.openai.com/API/images/guides/image_variation_original.webp", + }, + }, + }, + Model: openai.ModerationOmniLatest, + }, + } + + for _, input := range requestInputs { + _, err := client.Moderations(context.Background(), input) + checks.NoError(t, err, "Moderation error") + } } // TestModerationsWithIncorrectModel Tests passing valid and invalid models to moderations endpoint. @@ -73,83 +103,169 @@ func handleModerationEndpoint(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } - var moderationReq openai.ModerationRequest + var moderationReq openai.ModerationArrayRequest if moderationReq, err = getModerationBody(r); err != nil { http.Error(w, "could not read request", http.StatusInternalServerError) return } - resCat := openai.ResultCategories{} - resCatScore := openai.ResultCategoryScores{} - switch { - case strings.Contains(moderationReq.Input, "hate"): - resCat = openai.ResultCategories{Hate: true} - resCatScore = openai.ResultCategoryScores{Hate: 1} + res := openai.ModerationResponse{ + ID: strconv.Itoa(int(time.Now().Unix())), + Model: moderationReq.Model, + } - case strings.Contains(moderationReq.Input, "hate more"): - resCat = openai.ResultCategories{HateThreatening: true} - resCatScore = openai.ResultCategoryScores{HateThreatening: 1} + for i := range moderationReq.Input { + var ( + resCat = openai.ResultCategories{} + resCatScore = openai.ResultCategoryScores{} + resCatApplied = openai.CategoryAppliedInputType{} + ) - case strings.Contains(moderationReq.Input, "harass"): - resCat = openai.ResultCategories{Harassment: true} - resCatScore = openai.ResultCategoryScores{Harassment: 1} + switch { + case strings.Contains(moderationReq.Input[i].Text, "hate"): + resCat = openai.ResultCategories{Hate: true} + resCatScore = openai.ResultCategoryScores{Hate: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Hate: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "harass hard"): - resCat = openai.ResultCategories{Harassment: true} - resCatScore = openai.ResultCategoryScores{HarassmentThreatening: 1} + case strings.Contains(moderationReq.Input[i].Text, "hate more"): + resCat = openai.ResultCategories{HateThreatening: true} + resCatScore = openai.ResultCategoryScores{HateThreatening: 1} + resCatApplied = openai.CategoryAppliedInputType{ + HarassmentThreatening: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "suicide"): - resCat = openai.ResultCategories{SelfHarm: true} - resCatScore = openai.ResultCategoryScores{SelfHarm: 1} + case strings.Contains(moderationReq.Input[i].Text, "harass"): + resCat = openai.ResultCategories{Harassment: true} + resCatScore = openai.ResultCategoryScores{Harassment: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Harassment: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "wanna suicide"): - resCat = openai.ResultCategories{SelfHarmIntent: true} - resCatScore = openai.ResultCategoryScores{SelfHarm: 1} + case strings.Contains(moderationReq.Input[i].Text, "harass hard"): + resCat = openai.ResultCategories{Harassment: true} + resCatScore = openai.ResultCategoryScores{HarassmentThreatening: 1} + resCatApplied = openai.CategoryAppliedInputType{ + HarassmentThreatening: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "drink bleach"): - resCat = openai.ResultCategories{SelfHarmInstructions: true} - resCatScore = openai.ResultCategoryScores{SelfHarmInstructions: 1} + case strings.Contains(moderationReq.Input[i].Text, "suicide"): + resCat = openai.ResultCategories{SelfHarm: true} + resCatScore = openai.ResultCategoryScores{SelfHarm: 1} + resCatApplied = openai.CategoryAppliedInputType{ + SelfHarm: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "porn"): - resCat = openai.ResultCategories{Sexual: true} - resCatScore = openai.ResultCategoryScores{Sexual: 1} + case strings.Contains(moderationReq.Input[i].Text, "wanna suicide"): + resCat = openai.ResultCategories{SelfHarmIntent: true} + resCatScore = openai.ResultCategoryScores{SelfHarm: 1} + resCatApplied = openai.CategoryAppliedInputType{ + SelfHarmIntent: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "child porn"): - resCat = openai.ResultCategories{SexualMinors: true} - resCatScore = openai.ResultCategoryScores{SexualMinors: 1} + case strings.Contains(moderationReq.Input[i].Text, "drink bleach"): + resCat = openai.ResultCategories{SelfHarmInstructions: true} + resCatScore = openai.ResultCategoryScores{SelfHarmInstructions: 1} + resCatApplied = openai.CategoryAppliedInputType{ + SelfHarmInstructions: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "kill"): - resCat = openai.ResultCategories{Violence: true} - resCatScore = openai.ResultCategoryScores{Violence: 1} + case strings.Contains(moderationReq.Input[i].Text, "porn"): + resCat = openai.ResultCategories{Sexual: true} + resCatScore = openai.ResultCategoryScores{Sexual: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Sexual: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - case strings.Contains(moderationReq.Input, "corpse"): - resCat = openai.ResultCategories{ViolenceGraphic: true} - resCatScore = openai.ResultCategoryScores{ViolenceGraphic: 1} - } + case strings.Contains(moderationReq.Input[i].Text, "child porn"): + resCat = openai.ResultCategories{SexualMinors: true} + resCatScore = openai.ResultCategoryScores{SexualMinors: 1} + resCatApplied = openai.CategoryAppliedInputType{ + SexualMinors: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - result := openai.Result{Categories: resCat, CategoryScores: resCatScore, Flagged: true} + case strings.Contains(moderationReq.Input[i].Text, "kill"): + resCat = openai.ResultCategories{Violence: true} + resCatScore = openai.ResultCategoryScores{Violence: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Violence: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } - res := openai.ModerationResponse{ - ID: strconv.Itoa(int(time.Now().Unix())), - Model: moderationReq.Model, + case strings.Contains(moderationReq.Input[i].Text, "corpse"): + resCat = openai.ResultCategories{ViolenceGraphic: true} + resCatScore = openai.ResultCategoryScores{ViolenceGraphic: 1} + resCatApplied = openai.CategoryAppliedInputType{ + ViolenceGraphic: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } + + case strings.Contains(moderationReq.Input[i].Text, "how to shoplift"): + resCat = openai.ResultCategories{Illicit: true} + resCatScore = openai.ResultCategoryScores{Illicit: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Illicit: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } + + case strings.Contains(moderationReq.Input[i].Text, "how to buy gun"): + resCat = openai.ResultCategories{IllicitViolent: true} + resCatScore = openai.ResultCategoryScores{IllicitViolent: 1} + resCatApplied = openai.CategoryAppliedInputType{ + IllicitViolent: []openai.ModerationItemType{openai.ModerationItemTypeText}, + } + } + + result := openai.Result{ + Categories: resCat, + CategoryScores: resCatScore, + Flagged: true, + CategoryAppliedInputTypes: resCatApplied, + } + res.Results = append(res.Results, result) } - res.Results = append(res.Results, result) resBytes, _ = json.Marshal(res) fmt.Fprintln(w, string(resBytes)) } // getModerationBody Returns the body of the request to do a moderation. -func getModerationBody(r *http.Request) (openai.ModerationRequest, error) { - moderation := openai.ModerationRequest{} +func getModerationBody(r *http.Request) (openai.ModerationArrayRequest, error) { + var ( + moderation = openai.ModerationRequest{} + strArrayInput = openai.ModerationStrArrayRequest{} + moderationArrayRequest = openai.ModerationArrayRequest{} + ) // read the request body reqBody, err := io.ReadAll(r.Body) if err != nil { - return openai.ModerationRequest{}, err + return openai.ModerationArrayRequest{}, err } err = json.Unmarshal(reqBody, &moderation) + if err == nil { + return openai.ModerationArrayRequest{ + Input: []openai.ModerationRequestItem{ + { + Type: openai.ModerationItemTypeText, + Text: moderation.Input, + }, + }, + Model: "", + }, nil + } + err = json.Unmarshal(reqBody, &strArrayInput) + if err == nil { + moderationArrayRequest.Model = strArrayInput.Model + for i := range strArrayInput.Input { + moderationArrayRequest.Input = append(moderationArrayRequest.Input, openai.ModerationRequestItem{ + Type: openai.ModerationItemTypeText, + Text: strArrayInput.Input[i], + }) + } + return moderationArrayRequest, nil + } + err = json.Unmarshal(reqBody, &moderationArrayRequest) if err != nil { - return openai.ModerationRequest{}, err + return openai.ModerationArrayRequest{}, err } - return moderation, nil + + return moderationArrayRequest, nil } From afd0628c83fe9b931480f28a7ec69f48b10e4afb Mon Sep 17 00:00:00 2001 From: chenhan Date: Sun, 15 Dec 2024 19:26:55 +0800 Subject: [PATCH 2/2] test(moderation): add testcase for ModerationItemTypeImageURL --- moderation_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/moderation_test.go b/moderation_test.go index 40f33606..0dd0b7e5 100644 --- a/moderation_test.go +++ b/moderation_test.go @@ -49,6 +49,17 @@ func TestModerations(t *testing.T) { }, Model: openai.ModerationOmniLatest, }, + openai.ModerationArrayRequest{ + Input: []openai.ModerationRequestItem{ + { + Type: openai.ModerationItemTypeImageURL, + ImageURL: openai.ModerationImageURL{ + URL: "https://cdn.openai.com/API/images/harass.png", + }, + }, + }, + Model: openai.ModerationOmniLatest, + }, } for _, input := range requestInputs { @@ -212,6 +223,13 @@ func handleModerationEndpoint(w http.ResponseWriter, r *http.Request) { resCatApplied = openai.CategoryAppliedInputType{ IllicitViolent: []openai.ModerationItemType{openai.ModerationItemTypeText}, } + case moderationReq.Input[i].Type == openai.ModerationItemTypeImageURL && + moderationReq.Input[i].ImageURL.URL == "https://cdn.openai.com/API/images/harass.png": + resCat = openai.ResultCategories{Harassment: true} + resCatScore = openai.ResultCategoryScores{Harassment: 1} + resCatApplied = openai.CategoryAppliedInputType{ + Harassment: []openai.ModerationItemType{openai.ModerationItemTypeImageURL}, + } } result := openai.Result{