From b144cd1dc762aa7f1b1b313506d324c4f06eaa96 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Sun, 25 Feb 2024 15:48:00 -0800 Subject: [PATCH 01/38] added Dockerfile for example server --- v3/examples/server/Dockerfile | 51 +++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 v3/examples/server/Dockerfile diff --git a/v3/examples/server/Dockerfile b/v3/examples/server/Dockerfile new file mode 100644 index 000000000..42c4a0d13 --- /dev/null +++ b/v3/examples/server/Dockerfile @@ -0,0 +1,51 @@ +# If it is more convenient for you to run an instrumented test server in a Docker +# container, you can use this Dockerfile to build an image for that purpose. +# +# To build this image, have this Dockerfile in the current directory and run: +# docker build -t go-agent-test . +# +# To run a test, run the following: +# docker run -e NEW_RELIC_LICENSE_KEY="YOUR_KEY_HERE" -p 127.0.0.1:8000:8000 go-agent-test +# then drive traffic to it on localhost port 8000 +# +# This running application will write debugging logs showing all interaction +# with the collector on its standard output. +# +# The following HTTP endpoints can be accessed on port 8000 to invoke different +# instrumented server features: +# / +# /add_attribute +# /add_span_attribute +# /async +# /background +# /background_log +# /browser +# /custom_event +# /custommetric +# /external +# /ignore +# /log +# /message +# /mysql +# /notice_error +# /notice_error_with_attributes +# /notice_expected_error +# /roundtripper +# /segments +# /set_name +# /version +# +FROM golang:1.22 +MAINTAINER Steve Willoughby +WORKDIR /go +RUN git clone https://github.com/newrelic/go-agent +WORKDIR /go/go-agent/v3 +RUN go mod tidy +WORKDIR /go/go-agent/v3/examples/server +RUN go mod tidy +RUN go build +EXPOSE 8000 +CMD ["/go/go-agent/v3/examples/server/server"] +# +# END +# From eb118e8f14dcf52519f2c25093a1de467309e1db Mon Sep 17 00:00:00 2001 From: profawxhawk Date: Mon, 4 Mar 2024 01:51:46 +0530 Subject: [PATCH 02/38] add nil error check in wrap function --- v3/integrations/nrpkgerrors/nrkpgerrors_test.go | 2 ++ v3/integrations/nrpkgerrors/nrpkgerrors.go | 13 ++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/v3/integrations/nrpkgerrors/nrkpgerrors_test.go b/v3/integrations/nrpkgerrors/nrkpgerrors_test.go index 17d1d62c0..3742bf33b 100644 --- a/v3/integrations/nrpkgerrors/nrkpgerrors_test.go +++ b/v3/integrations/nrpkgerrors/nrkpgerrors_test.go @@ -57,6 +57,7 @@ func TestWrappedStackTrace(t *testing.T) { {Error: theta(basicError{}), ExpectTopFrame: ""}, {Error: basicNRError(basicError{}), ExpectTopFrame: ""}, {Error: withAttributes(basicError{}), ExpectTopFrame: "", ExpectAttributes: map[string]interface{}{"testAttribute": 1, "foo": 2}}, + {Error: nil, ExpectTopFrame: ""}, } for idx, tc := range testcases { @@ -117,6 +118,7 @@ func TestWrappedErrorClass(t *testing.T) { {Error: alpha(basicError{}), ExpectClass: "nrpkgerrors.basicError"}, {Error: wrapWithClass(basicError{}, "zip"), ExpectClass: "zip"}, {Error: alpha(wrapWithClass(basicError{}, "zip")), ExpectClass: "nrpkgerrors.basicError"}, + {Error: nil, ExpectClass: "*errors.fundamental"}, } for idx, tc := range testcases { diff --git a/v3/integrations/nrpkgerrors/nrpkgerrors.go b/v3/integrations/nrpkgerrors/nrpkgerrors.go index 65af40e4b..76d22d536 100644 --- a/v3/integrations/nrpkgerrors/nrpkgerrors.go +++ b/v3/integrations/nrpkgerrors/nrpkgerrors.go @@ -5,7 +5,6 @@ // // This package improves the class and stack-trace fields of pkg/error errors // when they are recorded with Transaction.NoticeError. -// package nrpkgerrors import ( @@ -76,10 +75,22 @@ func errorClass(e error) string { return fmt.Sprintf("%T", cause) } +var ( + errNilError = errors.New("nil error") +) + // Wrap wraps a pkg/errors error so that when noticed by // newrelic.Transaction.NoticeError it gives an improved stacktrace and class // type. func Wrap(e error) error { + if e == nil { + return newrelic.Error{ + Message: errNilError.Error(), + Class: errorClass(errNilError), + Stack: stackTrace(errNilError), + } + } + attributes := make(map[string]interface{}) switch error := e.(type) { case newrelic.Error: From 43f0d5eb40ea7f1fefe76d181871eec9ed1e11bd Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 5 Mar 2024 14:08:24 -0500 Subject: [PATCH 03/38] Update CHANGELOG.md fix EOL policy link --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 740cafe07..a9b75e1f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ ### Support statement We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. -See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol-policy) for details about supported versions of the Go agent and third-party components. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. ## 3.29.1 ### Added @@ -19,7 +19,7 @@ See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol ### Support statement We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. -See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol-policy) for details about supported versions of the Go agent and third-party components. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. ## 3.29.0 ### Added @@ -31,7 +31,7 @@ See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol ### Support statement We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. -See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol-policy) for details about supported versions of the Go agent and third-party components. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. ## 3.28.1 ### Added @@ -43,7 +43,7 @@ Fixed an issue where `nil` `Request.Body` could be set to non-`nil` `request.Bod More Secure URL Redaction ### Support statement We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. -See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol-policy) for details about supported versions of the Go agent and third-party components. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. ## 3.28.0 ### Fixed @@ -57,7 +57,7 @@ See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. -See the [Go agent EOL Policy](/docs/apm/agents/go-agent/get-started/go-agent-eol-policy) for details about supported versions of the Go agent and third-party components. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. ## 3.27.0 From e4e5c45022b71ea0ca366449472ea82e64411972 Mon Sep 17 00:00:00 2001 From: Bharath Kumar Thulasidoss Date: Wed, 6 Mar 2024 17:14:17 +0530 Subject: [PATCH 04/38] Update v3/integrations/nrpkgerrors/nrpkgerrors.go Co-authored-by: Emilio Garcia --- v3/integrations/nrpkgerrors/nrpkgerrors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v3/integrations/nrpkgerrors/nrpkgerrors.go b/v3/integrations/nrpkgerrors/nrpkgerrors.go index 76d22d536..e332ffac8 100644 --- a/v3/integrations/nrpkgerrors/nrpkgerrors.go +++ b/v3/integrations/nrpkgerrors/nrpkgerrors.go @@ -76,7 +76,7 @@ func errorClass(e error) string { } var ( - errNilError = errors.New("nil error") + errNilError = errors.New("nil") ) // Wrap wraps a pkg/errors error so that when noticed by From 3ca0b8af771d21dbae8747d9da284148d7aa2bcc Mon Sep 17 00:00:00 2001 From: mirackara Date: Tue, 19 Mar 2024 09:01:02 -0500 Subject: [PATCH 05/38] Updated host to collector --- v3/internal/utilization/addresses.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v3/internal/utilization/addresses.go b/v3/internal/utilization/addresses.go index 791b52726..aa2e7268d 100644 --- a/v3/internal/utilization/addresses.go +++ b/v3/internal/utilization/addresses.go @@ -47,8 +47,8 @@ func nonlocalIPAddressesByInterface() (map[string][]string, error) { // * The UDP connection interface is more likely to contain unique external IPs. func utilizationIPs() ([]string, error) { // Port choice designed to match - // https://source.datanerd.us/java-agent/java_agent/blob/master/newrelic-agent/src/main/java/com/newrelic/agent/config/Hostname.java#L110 - conn, err := net.Dial("udp", "newrelic.com:10002") + // https://github.com/newrelic/newrelic-java-agent/blob/main/newrelic-agent/src/main/java/com/newrelic/agent/config/Hostname.java#L120 + conn, err := net.Dial("udp", "collector.newrelic.com:10002") if err != nil { return nil, err } From 4da434473e63ed62213a8ad6f413be68e20ac4a2 Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Tue, 19 Mar 2024 11:31:10 -0500 Subject: [PATCH 06/38] OpenAI Integration (#860) * OpenAI integration --- .github/workflows/ci.yaml | 1 + .../chatcompletion/chatcompletion_example.go | 64 ++ .../chatcompletionfeedback.go | 68 ++ .../chatcompletionstreaming.go | 83 +++ .../examples/embeddings/embeddings_example.go | 59 ++ v3/integrations/nropenai/go.mod | 21 + v3/integrations/nropenai/nropenai.go | 491 +++++++++++++ v3/integrations/nropenai/nropenai_test.go | 684 ++++++++++++++++++ v3/newrelic/application.go | 27 +- v3/newrelic/config.go | 14 + v3/newrelic/config_options.go | 16 + v3/newrelic/config_test.go | 18 + v3/newrelic/internal_test.go | 20 + 13 files changed, 1565 insertions(+), 1 deletion(-) create mode 100644 v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go create mode 100644 v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go create mode 100644 v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go create mode 100644 v3/integrations/nropenai/examples/embeddings/embeddings_example.go create mode 100644 v3/integrations/nropenai/go.mod create mode 100644 v3/integrations/nropenai/nropenai.go create mode 100644 v3/integrations/nropenai/nropenai_test.go diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9fd0c4ad3..efaae3ba6 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -66,6 +66,7 @@ jobs: - dirs: v3/integrations/nrmongo - dirs: v3/integrations/nrgraphqlgo,v3/integrations/nrgraphqlgo/example - dirs: v3/integrations/nrmssql + - dirs: v3/integrations/nropenai steps: - name: Checkout Code uses: actions/checkout@v2 diff --git a/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go b/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go new file mode 100644 index 000000000..6aeef8da1 --- /dev/null +++ b/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/nropenai" + "github.com/newrelic/go-agent/v3/newrelic" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + // Start New Relic Application + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Basic OpenAI App"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), + // Enable AI Monitoring + // NOTE - If High Security Mode is enabled, AI Monitoring will always be disabled + newrelic.ConfigAIMonitoringEnabled(true), + ) + if nil != err { + panic(err) + } + app.WaitForConnection(10 * time.Second) + + // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure + cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) + + // Create OpenAI Client - Additionally, NRNewClient(authToken string) can be used + client := nropenai.NRNewClientWithConfig(cfg) + + // Add any custom attributes + // NOTE: Attributes must start with "llm.", otherwise they will be ignored + client.AddCustomAttributes(map[string]interface{}{ + "llm.foo": "bar", + "llm.pi": 3.14, + }) + + // GPT Request + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0.7, + MaxTokens: 150, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "What is 8*5", + }, + }, + } + // NRCreateChatCompletion returns a wrapped version of openai.ChatCompletionResponse + resp, err := nropenai.NRCreateChatCompletion(client, req, app) + + if err != nil { + panic(err) + } + + fmt.Println(resp.ChatCompletionResponse.Choices[0].Message.Content) + + // Shutdown Application + app.Shutdown(5 * time.Second) +} diff --git a/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go b/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go new file mode 100644 index 000000000..24eba417d --- /dev/null +++ b/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go @@ -0,0 +1,68 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/nropenai" + "github.com/newrelic/go-agent/v3/newrelic" + openai "github.com/sashabaranov/go-openai" +) + +// Simulates feedback being sent to New Relic. Feedback on a chat completion requires +// having access to the ChatCompletionResponseWrapper which is returned by the NRCreateChatCompletion function. +func SendFeedback(app *newrelic.Application, resp nropenai.ChatCompletionResponseWrapper) { + trace_id := resp.TraceID + rating := "5" + category := "informative" + message := "The response was concise yet thorough." + customMetadata := map[string]interface{}{ + "foo": "bar", + "pi": 3.14, + } + + app.RecordLLMFeedbackEvent(trace_id, rating, category, message, customMetadata) +} + +func main() { + // Start New Relic Application + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Basic OpenAI App"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigAIMonitoringEnabled(true), + ) + if nil != err { + panic(err) + } + app.WaitForConnection(10 * time.Second) + + // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure + cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) + client := nropenai.NRNewClientWithConfig(cfg) + // GPT Request + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0.7, + MaxTokens: 150, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "What is 8*5", + }, + }, + } + // NRCreateChatCompletion returns a wrapped version of openai.ChatCompletionResponse + resp, err := nropenai.NRCreateChatCompletion(client, req, app) + + if err != nil { + panic(err) + } + // Print the contents of the message + fmt.Println("Message Response: ", resp.ChatCompletionResponse.Choices[0].Message.Content) + SendFeedback(app, resp) + + // Shutdown Application + app.Shutdown(5 * time.Second) +} diff --git a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go new file mode 100644 index 000000000..4745aae3b --- /dev/null +++ b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go @@ -0,0 +1,83 @@ +package main + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/nropenai" + "github.com/newrelic/go-agent/v3/newrelic" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + // Start New Relic Application + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Basic OpenAI App"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), + // Enable AI Monitoring + // NOTE - If High Security Mode is enabled, AI Monitoring will always be disabled + newrelic.ConfigAIMonitoringEnabled(true), + ) + if nil != err { + panic(err) + } + app.WaitForConnection(10 * time.Second) + + // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure + cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) + + // Create OpenAI Client - Additionally, NRNewClient(authToken string) can be used + client := nropenai.NRNewClientWithConfig(cfg) + + // Add any custom attributes + // NOTE: Attributes must start with "llm.", otherwise they will be ignored + client.AddCustomAttributes(map[string]interface{}{ + "llm.foo": "bar", + "llm.pi": 3.14, + }) + + // GPT Request + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0.7, + MaxTokens: 1500, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Say this is a test", + }, + }, + Stream: true, + } + ctx := context.Background() + + stream, err := nropenai.NRCreateChatCompletionStream(client, ctx, req, app) + + if err != nil { + panic(err) + } + defer stream.Close() + + fmt.Printf("Stream response: ") + for { + var response openai.ChatCompletionStreamResponse + response, err = stream.Recv() + if errors.Is(err, io.EOF) { + fmt.Println("\nStream finished") + break + } + if err != nil { + fmt.Printf("\nStream error: %v\n", err) + return + } + + fmt.Printf(response.Choices[0].Delta.Content) + } + // Shutdown Application + app.Shutdown(5 * time.Second) +} diff --git a/v3/integrations/nropenai/examples/embeddings/embeddings_example.go b/v3/integrations/nropenai/examples/embeddings/embeddings_example.go new file mode 100644 index 000000000..ff50d7428 --- /dev/null +++ b/v3/integrations/nropenai/examples/embeddings/embeddings_example.go @@ -0,0 +1,59 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/nropenai" + "github.com/newrelic/go-agent/v3/newrelic" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + // Start New Relic Application + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Basic OpenAI App"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), + // Enable AI Monitoring + newrelic.ConfigAIMonitoringEnabled(true), + ) + if nil != err { + panic(err) + } + app.WaitForConnection(10 * time.Second) + + // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure + cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) + + // Create OpenAI Client - Additionally, NRNewClient(authToken string) can be used + client := nropenai.NRNewClientWithConfig(cfg) + + // Add any custom attributes + // NOTE: Attributes must start with "llm.", otherwise they will be ignored + client.CustomAttributes = map[string]interface{}{ + "llm.foo": "bar", + "llm.pi": 3.14, + } + + fmt.Println("Creating Embedding Request...") + // Create Embeddings + embeddingReq := openai.EmbeddingRequest{ + Input: []string{ + "The food was delicious and the waiter", + "Other examples of embedding request", + }, + Model: openai.AdaEmbeddingV2, + EncodingFormat: openai.EmbeddingEncodingFormatFloat, + } + resp, err := nropenai.NRCreateEmbedding(client, embeddingReq, app) + if err != nil { + panic(err) + } + + fmt.Println("Embedding Created!") + fmt.Println(resp.Usage.PromptTokens) + // Shutdown Application + app.Shutdown(5 * time.Second) +} diff --git a/v3/integrations/nropenai/go.mod b/v3/integrations/nropenai/go.mod new file mode 100644 index 000000000..338ad0aef --- /dev/null +++ b/v3/integrations/nropenai/go.mod @@ -0,0 +1,21 @@ +module github.com/newrelic/go-agent/v3/integrations/nropenai + +go 1.21.0 + +require ( + github.com/google/uuid v1.6.0 + github.com/newrelic/go-agent/v3 v3.30.0 + github.com/sashabaranov/go-openai v1.20.2 +) + +require ( + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + +replace github.com/newrelic/go-agent/v3 => ../.. \ No newline at end of file diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go new file mode 100644 index 000000000..33054cec9 --- /dev/null +++ b/v3/integrations/nropenai/nropenai.go @@ -0,0 +1,491 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package nropenai + +import ( + "context" + "errors" + "reflect" + "runtime/debug" + "strings" + "time" + + "github.com/google/uuid" + "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/sashabaranov/go-openai" +) + +func init() { + // Get current go-openai version + info, ok := debug.ReadBuildInfo() + if info != nil && ok { + for _, module := range info.Deps { + if module != nil && strings.Contains(module.Path, "go-openai") { + internal.TrackUsage("Go", "ML", "OpenAI", module.Version) + return + } + } + } + + internal.TrackUsage("Go", "ML", "OpenAI", "unknown") + +} + +var ( + errAIMonitoringDisabled = errors.New("AI Monitoring is set to disabled or High Security Mode is enabled. Please enable AI Monitoring and ensure High Security Mode is disabled") +) + +type OpenAIClient interface { + CreateChatCompletion(ctx context.Context, request openai.ChatCompletionRequest) (response openai.ChatCompletionResponse, err error) + CreateChatCompletionStream(ctx context.Context, request openai.ChatCompletionRequest) (stream *openai.ChatCompletionStream, err error) + CreateEmbeddings(ctx context.Context, conv openai.EmbeddingRequestConverter) (res openai.EmbeddingResponse, err error) +} + +// Wrapper for OpenAI Configuration +type ConfigWrapper struct { + Config *openai.ClientConfig + LicenseKeyLastFour string +} + +// Wrapper for OpenAI Client with Custom Attributes that can be set for all LLM Events +type ClientWrapper struct { + Client OpenAIClient + LicenseKeyLastFour string + // Set of Custom Attributes that get tied to all LLM Events + CustomAttributes map[string]interface{} +} + +func FormatAPIKey(apiKey string) string { + return "sk-" + apiKey[len(apiKey)-4:] +} + +// Default Config +func NRDefaultConfig(authToken string) *ConfigWrapper { + cfg := openai.DefaultConfig(authToken) + return &ConfigWrapper{ + Config: &cfg, + LicenseKeyLastFour: FormatAPIKey(authToken), + } +} + +// Azure Config +func NRDefaultAzureConfig(apiKey, baseURL string) *ConfigWrapper { + cfg := openai.DefaultAzureConfig(apiKey, baseURL) + return &ConfigWrapper{ + Config: &cfg, + LicenseKeyLastFour: FormatAPIKey(apiKey), + } +} + +// Call to Create Client Wrapper +func NRNewClient(authToken string) *ClientWrapper { + client := openai.NewClient(authToken) + return &ClientWrapper{ + Client: client, + LicenseKeyLastFour: FormatAPIKey(authToken), + } +} + +// NewClientWithConfig creates new OpenAI API client for specified config. +func NRNewClientWithConfig(config *ConfigWrapper) *ClientWrapper { + client := openai.NewClientWithConfig(*config.Config) + return &ClientWrapper{ + Client: client, + LicenseKeyLastFour: config.LicenseKeyLastFour, + } +} + +// Adds Custom Attributes to the ClientWrapper +func (cw *ClientWrapper) AddCustomAttributes(attributes map[string]interface{}) { + if cw.CustomAttributes == nil { + cw.CustomAttributes = make(map[string]interface{}) + } + + for key, value := range attributes { + if strings.HasPrefix(key, "llm.") { + cw.CustomAttributes[key] = value + } + } +} + +func AppendCustomAttributesToEvent(cw *ClientWrapper, data map[string]interface{}) map[string]interface{} { + for k, v := range cw.CustomAttributes { + data[k] = v + } + return data +} + +// If multiple messages are sent, only the first message is used for the "content" field +func GetInput(any interface{}) any { + v := reflect.ValueOf(any) + if v.Kind() == reflect.Array || v.Kind() == reflect.Slice { + if v.Len() > 0 { + // Return the first element + return v.Index(0).Interface() + } + // Input passed in is empty + return "" + } + return any + +} + +// Wrapper for ChatCompletionResponse that is returned from NRCreateChatCompletion. It also includes the TraceID of the transaction for linking a chat response with it's feedback +type ChatCompletionResponseWrapper struct { + ChatCompletionResponse openai.ChatCompletionResponse + TraceID string +} + +// Wrapper for ChatCompletionStream that is returned from NRCreateChatCompletionStream +type ChatCompletionStreamWrapper struct { + stream *openai.ChatCompletionStream + txn *newrelic.Transaction +} + +// Wrapper for Recv() method that calls the underlying stream's Recv() method +func (w *ChatCompletionStreamWrapper) Recv() (openai.ChatCompletionStreamResponse, error) { + response, err := w.stream.Recv() + + if err != nil { + return response, err + } + + return response, nil + +} + +func (w *ChatCompletionStreamWrapper) Close() { + w.stream.Close() +} + +// NRCreateChatCompletionSummary captures the request and response data for a chat completion request and records a custom event in New Relic. It also captures the completion messages +// With a call to NRCreateChatCompletionMessage +func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Application, cw *ClientWrapper, req openai.ChatCompletionRequest) ChatCompletionResponseWrapper { + // Get App Config for setting App Name Attribute + appConfig, configErr := app.Config() + if !configErr { + appConfig.AppName = "Unknown" + } + uuid := uuid.New() + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + transactionID := traceID[:16] + + ChatCompletionSummaryData := map[string]interface{}{} + + // Start span + chatCompletionSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") + // Track Total time taken for the chat completion or embedding call to complete in milliseconds + start := time.Now() + resp, err := cw.Client.CreateChatCompletion( + context.Background(), + req, + ) + duration := time.Since(start).Milliseconds() + chatCompletionSpan.End() + if err != nil { + ChatCompletionSummaryData["error"] = true + // notice error with custom attributes + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "OpenAIError", + Attributes: map[string]interface{}{ + "http.status": resp.Header().Get("Status"), + "error.code": resp.Header().Get("Error-Code"), + "completion_id": uuid.String(), + }, + }) + } + + // ratelimitLimitTokensUsageBased, ratelimitResetTokensUsageBased, and ratelimitRemainingTokensUsageBased are not in the response + // Request Headers + ChatCompletionSummaryData["request.temperature"] = req.Temperature + ChatCompletionSummaryData["request.max_tokens"] = req.MaxTokens + ChatCompletionSummaryData["request.model"] = req.Model + ChatCompletionSummaryData["model"] = req.Model + ChatCompletionSummaryData["duration"] = duration + + // Response Data + ChatCompletionSummaryData["response.number_of_messages"] = len(resp.Choices) + ChatCompletionSummaryData["response.model"] = resp.Model + ChatCompletionSummaryData["request_id"] = resp.ID + ChatCompletionSummaryData["response.organization"] = resp.Header().Get("Openai-Organization") + ChatCompletionSummaryData["response.number_of_messages"] = len(resp.Choices) + ChatCompletionSummaryData["response.usage.total_tokens"] = resp.Usage.TotalTokens + ChatCompletionSummaryData["response.usage.prompt_tokens"] = resp.Usage.PromptTokens + ChatCompletionSummaryData["response.usage.completion_tokens"] = resp.Usage.CompletionTokens + if len(resp.Choices) > 0 { + finishReason, err := resp.Choices[0].FinishReason.MarshalJSON() + if err != nil { + ChatCompletionSummaryData["error"] = true + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "OpenAIError", + }) + } else { + ChatCompletionSummaryData["response.choices.finish_reason"] = string(finishReason) + } + } + + // Response Headers + ChatCompletionSummaryData["response.headers.llmVersion"] = resp.Header().Get("Openai-Version") + ChatCompletionSummaryData["response.headers.ratelimitLimitRequests"] = resp.Header().Get("X-Ratelimit-Limit-Requests") + ChatCompletionSummaryData["response.headers.ratelimitLimitTokens"] = resp.Header().Get("X-Ratelimit-Limit-Tokens") + ChatCompletionSummaryData["response.headers.ratelimitResetTokens"] = resp.Header().Get("X-Ratelimit-Reset-Tokens") + ChatCompletionSummaryData["response.headers.ratelimitResetRequests"] = resp.Header().Get("X-Ratelimit-Reset-Requests") + ChatCompletionSummaryData["response.headers.ratelimitRemainingTokens"] = resp.Header().Get("X-Ratelimit-Remaining-Tokens") + ChatCompletionSummaryData["response.headers.ratelimitRemainingRequests"] = resp.Header().Get("X-Ratelimit-Remaining-Requests") + + // New Relic Attributes + ChatCompletionSummaryData["id"] = uuid.String() + ChatCompletionSummaryData["span_id"] = spanID + ChatCompletionSummaryData["transaction_id"] = transactionID + ChatCompletionSummaryData["trace_id"] = traceID + ChatCompletionSummaryData["api_key_last_four_digits"] = cw.LicenseKeyLastFour + ChatCompletionSummaryData["vendor"] = "OpenAI" + ChatCompletionSummaryData["ingest_source"] = "Go" + ChatCompletionSummaryData["appName"] = appConfig.AppName + + // Record any custom attributes if they exist + ChatCompletionSummaryData = AppendCustomAttributesToEvent(cw, ChatCompletionSummaryData) + + // Record Custom Event + app.RecordCustomEvent("LlmChatCompletionSummary", ChatCompletionSummaryData) + + // Capture completion messages + NRCreateChatCompletionMessage(txn, app, resp, uuid, cw) + txn.End() + + return ChatCompletionResponseWrapper{ + ChatCompletionResponse: resp, + TraceID: traceID, + } +} + +// NRCreateChatCompletionMessage captures the completion messages and records a custom event in New Relic for each message +func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper) { + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + transactionID := traceID[:16] + appCfg, configErr := app.Config() + if !configErr { + appCfg.AppName = "Unknown" + } + chatCompletionMessageSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessage") + for i, choice := range resp.Choices { + ChatCompletionMessageData := map[string]interface{}{} + // if the response doesn't have an ID, use the UUID from the summary + if resp.ID == "" { + ChatCompletionMessageData["id"] = uuid.String() + } else { + ChatCompletionMessageData["id"] = resp.ID + } + + // Response Data + ChatCompletionMessageData["response.model"] = resp.Model + + if appCfg.AIMonitoring.RecordContent.Enabled { + ChatCompletionMessageData["content"] = choice.Message.Content + } + + ChatCompletionMessageData["role"] = choice.Message.Role + + // Request Headers + ChatCompletionMessageData["request_id"] = resp.Header().Get("X-Request-Id") + + // New Relic Attributes + ChatCompletionMessageData["sequence"] = i + ChatCompletionMessageData["vendor"] = "openai" + ChatCompletionMessageData["ingest_source"] = "go" + ChatCompletionMessageData["span_id"] = spanID + ChatCompletionMessageData["trace_id"] = traceID + ChatCompletionMessageData["transaction_id"] = transactionID + // TO:DO completion_id set in CompletionSummary which is a UUID generated by the agent to identify the event + // TO:DO - llm.conversation_id + + // If custom attributes are set, add them to the data + ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) + + // Record Custom Event for each message + app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + + } + + chatCompletionMessageSpan.End() +} + +// NRCreateChatCompletion is a wrapper for the OpenAI CreateChatCompletion method. +// If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateChatCompletion method and return the response with no New Relic instrumentation +func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, app *newrelic.Application) (ChatCompletionResponseWrapper, error) { + config, cfgErr := app.Config() + if !cfgErr { + config.AppName = "Unknown" + } + resp := ChatCompletionResponseWrapper{} + // If AI Monitoring is disabled, do not start a transaction but still perform the request + if !config.AIMonitoring.Enabled { + chatresp, err := cw.Client.CreateChatCompletion(context.Background(), req) + resp.ChatCompletionResponse = chatresp + if err != nil { + + return resp, err + } + return resp, errAIMonitoringDisabled + } + // Start NR Transaction + txn := app.StartTransaction("OpenAIChatCompletion") + resp = NRCreateChatCompletionSummary(txn, app, cw, req) + + return resp, nil +} + +// NRCreateEmbedding is a wrapper for the OpenAI CreateEmbedding method. +// If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateEmbedding method and return the response with no New Relic instrumentation +func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newrelic.Application) (openai.EmbeddingResponse, error) { + config, cfgErr := app.Config() + if !cfgErr { + config.AppName = "Unknown" + } + + resp := openai.EmbeddingResponse{} + + // If AI Monitoring is disabled, do not start a transaction but still perform the request + if !config.AIMonitoring.Enabled { + resp, err := cw.Client.CreateEmbeddings(context.Background(), req) + if err != nil { + + return resp, err + } + return resp, errAIMonitoringDisabled + } + + // Start NR Transaction + txn := app.StartTransaction("OpenAIEmbedding") + + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + transactionID := traceID[:16] + EmbeddingsData := map[string]interface{}{} + uuid := uuid.New() + + embeddingSpan := txn.StartSegment("Llm/completion/OpenAI/CreateEmbedding") + start := time.Now() + resp, err := cw.Client.CreateEmbeddings(context.Background(), req) + duration := time.Since(start).Milliseconds() + embeddingSpan.End() + + if err != nil { + EmbeddingsData["error"] = true + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "OpenAIError", + Attributes: map[string]interface{}{ + "http.status": resp.Header().Get("Status"), + "error.code": resp.Header().Get("Error-Code"), + "embedding_id": uuid.String(), + }, + }) + } + + // Request Data + if config.AIMonitoring.RecordContent.Enabled { + EmbeddingsData["input"] = GetInput(req.Input) + } + EmbeddingsData["api_key_last_four_digits"] = cw.LicenseKeyLastFour + EmbeddingsData["request.model"] = string(req.Model) + EmbeddingsData["duration"] = duration + + // Response Data + EmbeddingsData["response.model"] = string(resp.Model) + EmbeddingsData["response.usage.total_tokens"] = resp.Usage.TotalTokens + EmbeddingsData["response.usage.prompt_tokens"] = resp.Usage.PromptTokens + + // Response Headers + EmbeddingsData["response.organization"] = resp.Header().Get("Openai-Organization") + EmbeddingsData["response.headers.llmVersion"] = resp.Header().Get("Openai-Version") + EmbeddingsData["response.headers.ratelimitLimitRequests"] = resp.Header().Get("X-Ratelimit-Limit-Requests") + EmbeddingsData["response.headers.ratelimitLimitTokens"] = resp.Header().Get("X-Ratelimit-Limit-Tokens") + EmbeddingsData["response.headers.ratelimitResetTokens"] = resp.Header().Get("X-Ratelimit-Reset-Tokens") + EmbeddingsData["response.headers.ratelimitResetRequests"] = resp.Header().Get("X-Ratelimit-Reset-Requests") + EmbeddingsData["response.headers.ratelimitRemainingTokens"] = resp.Header().Get("X-Ratelimit-Remaining-Tokens") + EmbeddingsData["response.headers.ratelimitRemainingRequests"] = resp.Header().Get("X-Ratelimit-Remaining-Requests") + + EmbeddingsData = AppendCustomAttributesToEvent(cw, EmbeddingsData) + + // New Relic Attributes + EmbeddingsData["id"] = uuid.String() + EmbeddingsData["vendor"] = "OpenAI" + EmbeddingsData["ingest_source"] = "Go" + EmbeddingsData["span_id"] = spanID + EmbeddingsData["transaction_id"] = transactionID + EmbeddingsData["trace_id"] = traceID + + app.RecordCustomEvent("LlmEmbedding", EmbeddingsData) + txn.End() + return resp, nil +} + +func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req openai.ChatCompletionRequest, app *newrelic.Application) (*ChatCompletionStreamWrapper, error) { + config, cfgErr := app.Config() + if !cfgErr { + config.AppName = "Unknown" + } + + // If AI Monitoring OR AIMonitoring.Streaming is disabled, do not start a transaction but still perform the request + if !config.AIMonitoring.Enabled || !config.AIMonitoring.Streaming.Enabled { + stream, err := cw.Client.CreateChatCompletionStream(ctx, req) + if err != nil { + + return &ChatCompletionStreamWrapper{stream: stream}, err + } + return &ChatCompletionStreamWrapper{stream: stream}, errAIMonitoringDisabled + } + + txn := app.StartTransaction("OpenAIChatCompletionStream") + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + transactionID := traceID[:16] + StreamingData := map[string]interface{}{} + uuid := uuid.New() + + streamSpan := txn.StartSegment("Llm/completion/OpenAI/stream") + start := time.Now() + stream, err := cw.Client.CreateChatCompletionStream(ctx, req) + duration := time.Since(start).Milliseconds() + streamSpan.End() + + if err != nil { + StreamingData["error"] = true + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "OpenAIError", + }) + txn.End() + return nil, err + } + + // Request Data + StreamingData["api_key_last_four_digits"] = cw.LicenseKeyLastFour + StreamingData["request.model"] = string(req.Model) + StreamingData["request.temperature"] = req.Temperature + StreamingData["request.max_tokens"] = req.MaxTokens + StreamingData["model"] = req.Model + + StreamingData["duration"] = duration + + // New Relic Attributes + StreamingData["id"] = uuid.String() + StreamingData["span_id"] = spanID + StreamingData["transaction_id"] = transactionID + StreamingData["trace_id"] = traceID + StreamingData["api_key_last_four_digits"] = cw.LicenseKeyLastFour + StreamingData["vendor"] = "OpenAI" + StreamingData["ingest_source"] = "Go" + StreamingData["appName"] = config.AppName + app.RecordCustomEvent("LlmChatCompletionSummary", StreamingData) + txn.End() + return &ChatCompletionStreamWrapper{stream: stream, txn: txn}, nil + +} diff --git a/v3/integrations/nropenai/nropenai_test.go b/v3/integrations/nropenai/nropenai_test.go new file mode 100644 index 000000000..6648de26f --- /dev/null +++ b/v3/integrations/nropenai/nropenai_test.go @@ -0,0 +1,684 @@ +package nropenai + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/internal/integrationsupport" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/sashabaranov/go-openai" +) + +type MockOpenAIClient struct { + MockCreateChatCompletionResp openai.ChatCompletionResponse + MockCreateEmbeddingsResp openai.EmbeddingResponse + MockCreateChatCompletionStream *openai.ChatCompletionStream + MockCreateChatCompletionErr error +} + +// Mock CreateChatCompletion function that returns a mock response +func (m *MockOpenAIClient) CreateChatCompletion(ctx context.Context, req openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error) { + + MockResponse := openai.ChatCompletionResponse{ + ID: "chatcmpl-123", + Object: "chat.completion", + Created: 1677652288, + Model: openai.GPT3Dot5Turbo, + SystemFingerprint: "fp_44709d6fcb", + Usage: openai.Usage{ + PromptTokens: 9, + CompletionTokens: 12, + TotalTokens: 21, + }, + Choices: []openai.ChatCompletionChoice{ + { + Index: 0, + Message: openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleAssistant, + Content: "\n\nHello there, how may I assist you today?", + }, + }, + }, + } + hdrs := http.Header{} + hdrs.Add("X-Request-Id", "chatcmpl-123") + hdrs.Add("ratelimit-limit-tokens", "100") + hdrs.Add("Openai-Version", "2020-10-01") + hdrs.Add("X-Ratelimit-Limit-Requests", "10000") + hdrs.Add("X-Ratelimit-Limit-Tokens", "100") + hdrs.Add("X-Ratelimit-Reset-Tokens", "100") + hdrs.Add("X-Ratelimit-Reset-Requests", "10000") + hdrs.Add("X-Ratelimit-Remaining-Tokens", "100") + hdrs.Add("X-Ratelimit-Remaining-Requests", "10000") + hdrs.Add("Openai-Organization", "user-123") + + if req.Messages[0].Content == "testError" { + mockRespErr := openai.ChatCompletionResponse{} + hdrs.Add("Status", "404") + hdrs.Add("Error-Code", "404") + mockRespErr.SetHeader(hdrs) + return mockRespErr, errors.New("test error") + } + MockResponse.SetHeader(hdrs) + + return MockResponse, m.MockCreateChatCompletionErr +} + +func (m *MockOpenAIClient) CreateEmbeddings(ctx context.Context, conv openai.EmbeddingRequestConverter) (res openai.EmbeddingResponse, err error) { + MockResponse := openai.EmbeddingResponse{ + Model: openai.AdaEmbeddingV2, + Usage: openai.Usage{ + PromptTokens: 9, + CompletionTokens: 12, + TotalTokens: 21, + }, + Data: []openai.Embedding{ + { + Embedding: []float32{0.1, 0.2, 0.3}, + }, + }, + } + hdrs := http.Header{} + hdrs.Add("X-Request-Id", "chatcmpl-123") + hdrs.Add("ratelimit-limit-tokens", "100") + hdrs.Add("Openai-Version", "2020-10-01") + hdrs.Add("X-Ratelimit-Limit-Requests", "10000") + hdrs.Add("X-Ratelimit-Limit-Tokens", "100") + hdrs.Add("X-Ratelimit-Reset-Tokens", "100") + hdrs.Add("X-Ratelimit-Reset-Requests", "10000") + hdrs.Add("X-Ratelimit-Remaining-Tokens", "100") + hdrs.Add("X-Ratelimit-Remaining-Requests", "10000") + hdrs.Add("Openai-Organization", "user-123") + cv := conv.Convert() + if cv.Input == "testError" { + mockRespErr := openai.EmbeddingResponse{} + hdrs.Add("Status", "404") + hdrs.Add("Error-Code", "404") + mockRespErr.SetHeader(hdrs) + return mockRespErr, errors.New("test error") + } + + MockResponse.SetHeader(hdrs) + + return MockResponse, m.MockCreateChatCompletionErr +} + +func (m *MockOpenAIClient) CreateChatCompletionStream(ctx context.Context, request openai.ChatCompletionRequest) (stream *openai.ChatCompletionStream, err error) { + if request.Messages[0].Content == "testError" { + return m.MockCreateChatCompletionStream, errors.New("test error") + } + return m.MockCreateChatCompletionStream, m.MockCreateChatCompletionErr +} + +func TestFormatAPIKey(t *testing.T) { + dummyAPIKey := "sk-12345678900abcdefghijklmnop" + formattedKey := FormatAPIKey(dummyAPIKey) + if formattedKey != "sk-mnop" { + t.Errorf("Formatted API key is incorrect: expected: %s actual: %s", "sk-mnop", formattedKey) + + } +} +func TestDefaultConfig(t *testing.T) { + dummyAPIKey := "sk-12345678900abcdefghijklmnop" + cfg := NRDefaultConfig(dummyAPIKey) + // Default Values + if cfg.LicenseKeyLastFour != "sk-mnop" { + t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", cfg.LicenseKeyLastFour) + } + if cfg.Config.OrgID != "" { + t.Errorf("OrgID is incorrect: expected: %s actual: %s", "", cfg.Config.OrgID) + } + // Default Value set by openai package + if cfg.Config.APIType != openai.APITypeOpenAI { + t.Errorf("API Type is incorrect: expected: %s actual: %s", openai.APITypeOpenAI, cfg.Config.APIType) + } +} + +func TestDefaultConfigAzure(t *testing.T) { + dummyAPIKey := "sk-12345678900abcdefghijklmnop" + baseURL := "https://azure-base-url.com" + cfg := NRDefaultAzureConfig(dummyAPIKey, baseURL) + // Default Values + if cfg.LicenseKeyLastFour != "sk-mnop" { + t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", cfg.LicenseKeyLastFour) + } + if cfg.Config.BaseURL != baseURL { + t.Errorf("baseURL is incorrect: expected: %s actual: %s", baseURL, cfg.Config.BaseURL) + } + // Default Value set by openai package + if cfg.Config.APIType != openai.APITypeAzure { + t.Errorf("API Type is incorrect: expected: %s actual: %s", openai.APITypeAzure, cfg.Config.APIType) + } +} + +func TestNRNewClient(t *testing.T) { + dummyAPIKey := "sk-12345678900abcdefghijklmnop" + client := NRNewClient(dummyAPIKey) + if client.LicenseKeyLastFour != "sk-mnop" { + t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", client.LicenseKeyLastFour) + } +} + +func TestNRNewClientWithConfigs(t *testing.T) { + // Regular Config + dummyAPIKey := "sk-12345678900abcdefghijklmnop" + cfg := NRDefaultConfig(dummyAPIKey) + client := NRNewClientWithConfig(cfg) + if client.LicenseKeyLastFour != "sk-mnop" { + t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", client.LicenseKeyLastFour) + } + // Azure Config + baseURL := "https://azure-base-url.com" + azureCfg := NRDefaultAzureConfig(dummyAPIKey, baseURL) + azureClient := NRNewClientWithConfig(azureCfg) + if azureClient.LicenseKeyLastFour != "sk-mnop" { + t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", azureClient.LicenseKeyLastFour) + } + if azureCfg.Config.BaseURL != baseURL { + t.Errorf("baseURL is incorrect: expected: %s actual: %s", baseURL, azureCfg.Config.BaseURL) + } + // Default Value set by openai package + if azureCfg.Config.APIType != openai.APITypeAzure { + t.Errorf("API Type is incorrect: expected: %s actual: %s", openai.APITypeAzure, azureCfg.Config.APIType) + } +} + +func TestAddCustomAttributes(t *testing.T) { + client := NRNewClient("sk-12345678900abcdefghijklmnop") + client.AddCustomAttributes(map[string]interface{}{ + "llm.foo": "bar", + }) + if client.CustomAttributes["llm.foo"] != "bar" { + t.Errorf("Custom attribute is incorrect: expected: %s actual: %s", "bar", client.CustomAttributes["llm.foo"]) + } +} +func TestAddCustomAttributesIncorrectPrefix(t *testing.T) { + client := NRNewClient("sk-12345678900abcdefghijklmnop") + client.AddCustomAttributes(map[string]interface{}{ + "msdwmdoawd.foo": "bar", + }) + if len(client.CustomAttributes) != 0 { + t.Errorf("Custom attribute is incorrect: expected: %d actual: %d", 0, len(client.CustomAttributes)) + } +} + +func TestNRCreateChatCompletion(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 150, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "What is 8*5", + }, + }, + } + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + resp, err := NRCreateChatCompletion(cw, req, app.Application) + if err != nil { + t.Error(err) + } + if resp.ChatCompletionResponse.Choices[0].Message.Content != "\n\nHello there, how may I assist you today?" { + t.Errorf("Chat completion response is incorrect: expected: %s actual: %s", "\n\nHello there, how may I assist you today?", resp.ChatCompletionResponse.Choices[0].Message.Content) + } + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionSummary", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "ingest_source": "Go", + "vendor": "OpenAI", + "model": "gpt-3.5-turbo", + "id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "appName": "my app", + "duration": 0, + "response.choices.finish_reason": internal.MatchAnything, + "request.temperature": 0, + "api_key_last_four_digits": "sk-mnop", + "request_id": "chatcmpl-123", + "request.model": "gpt-3.5-turbo", + "request.max_tokens": 150, + "response.number_of_messages": 1, + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.usage.completion_tokens": 12, + "response.model": "gpt-3.5-turbo", + "response.usage.total_tokens": 21, + "response.usage.prompt_tokens": 9, + "response.headers.ratelimitRemainingTokens": "100", + "response.headers.ratelimitRemainingRequests": "10000", + "response.headers.ratelimitResetTokens": "100", + "response.headers.ratelimitResetRequests": "10000", + "response.headers.ratelimitLimitTokens": "100", + "response.headers.ratelimitLimitRequests": "10000", + }, + }, + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionMessage", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "trace_id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "id": "chatcmpl-123", + "sequence": 0, + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + "request_id": "chatcmpl-123", + "vendor": "openai", + "ingest_source": "go", + "response.model": "gpt-3.5-turbo", + }, + AgentAttributes: map[string]interface{}{}, + }, + }) + +} + +func TestNRCreateChatCompletionAIMonitoringNotEnabled(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 150, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "What is 8*5", + }, + }, + } + app := integrationsupport.NewTestApp(nil) + resp, err := NRCreateChatCompletion(cw, req, app.Application) + if err != errAIMonitoringDisabled { + t.Error(err) + } + // If AI Monitoring is disabled, no events should be sent, but a response from OpenAI should still be returned + if resp.ChatCompletionResponse.Choices[0].Message.Content != "\n\nHello there, how may I assist you today?" { + t.Errorf("Chat completion response is incorrect: expected: %s actual: %s", "\n\nHello there, how may I assist you today?", resp.ChatCompletionResponse.Choices[0].Message.Content) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) + +} + +func TestNRCreateChatCompletionError(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 150, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "testError", + }, + }, + } + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + _, err := NRCreateChatCompletion(cw, req, app.Application) + if err != nil { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionSummary", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "error": true, + "ingest_source": "Go", + "vendor": "OpenAI", + "model": "gpt-3.5-turbo", + "id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "appName": "my app", + "duration": 0, + "request.temperature": 0, + "api_key_last_four_digits": "sk-mnop", + "request_id": "", + "request.model": "gpt-3.5-turbo", + "request.max_tokens": 150, + "response.number_of_messages": 0, + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.usage.completion_tokens": 0, + "response.model": "", + "response.usage.total_tokens": 0, + "response.usage.prompt_tokens": 0, + "response.headers.ratelimitRemainingTokens": "100", + "response.headers.ratelimitRemainingRequests": "10000", + "response.headers.ratelimitResetTokens": "100", + "response.headers.ratelimitResetRequests": "10000", + "response.headers.ratelimitLimitTokens": "100", + "response.headers.ratelimitLimitRequests": "10000", + }, + }, + }) + app.ExpectErrorEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "TransactionError", + "transactionName": "OtherTransaction/Go/OpenAIChatCompletion", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "error.class": "OpenAIError", + "error.message": "test error", + }, + UserAttributes: map[string]interface{}{ + "error.code": "404", + "http.status": "404", + "completion_id": internal.MatchAnything, + }, + }}) +} +func TestNRCreateEmbedding(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + embeddingReq := openai.EmbeddingRequest{ + Input: []string{ + "The food was delicious and the waiter", + "Other examples of embedding request", + }, + Model: openai.AdaEmbeddingV2, + EncodingFormat: openai.EmbeddingEncodingFormatFloat, + } + + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + + _, err := NRCreateEmbedding(cw, embeddingReq, app.Application) + if err != nil { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmEmbedding", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "ingest_source": "Go", + "vendor": "OpenAI", + "id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "duration": 0, + "api_key_last_four_digits": "sk-mnop", + "request.model": "text-embedding-ada-002", + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.model": "text-embedding-ada-002", + "response.usage.total_tokens": 21, + "response.usage.prompt_tokens": 9, + "input": "The food was delicious and the waiter", + "response.headers.ratelimitRemainingTokens": "100", + "response.headers.ratelimitRemainingRequests": "10000", + "response.headers.ratelimitResetTokens": "100", + "response.headers.ratelimitResetRequests": "10000", + "response.headers.ratelimitLimitTokens": "100", + "response.headers.ratelimitLimitRequests": "10000", + }, + }, + }) + +} + +func TestNRCreateEmbeddingAIMonitoringNotEnabled(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + embeddingReq := openai.EmbeddingRequest{ + Input: []string{ + "The food was delicious and the waiter", + "Other examples of embedding request", + }, + Model: openai.AdaEmbeddingV2, + EncodingFormat: openai.EmbeddingEncodingFormatFloat, + } + + app := integrationsupport.NewTestApp(nil) + + resp, err := NRCreateEmbedding(cw, embeddingReq, app.Application) + if err != errAIMonitoringDisabled { + t.Error(err) + } + // If AI Monitoring is disabled, no events should be sent, but a response from OpenAI should still be returned + app.ExpectCustomEvents(t, []internal.WantEvent{}) + if resp.Data[0].Embedding[0] != 0.1 { + t.Errorf("Embedding response is incorrect: expected: %f actual: %f", 0.1, resp.Data[0].Embedding[0]) + } + +} +func TestNRCreateEmbeddingError(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + embeddingReq := openai.EmbeddingRequest{ + Input: "testError", + Model: openai.AdaEmbeddingV2, + EncodingFormat: openai.EmbeddingEncodingFormatFloat, + } + + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + + _, err := NRCreateEmbedding(cw, embeddingReq, app.Application) + if err != nil { + t.Error(err) + } + + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmEmbedding", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "ingest_source": "Go", + "vendor": "OpenAI", + "id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "duration": 0, + "api_key_last_four_digits": "sk-mnop", + "request.model": "text-embedding-ada-002", + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "error": true, + "response.model": "", + "response.usage.total_tokens": 0, + "response.usage.prompt_tokens": 0, + "input": "testError", + "response.headers.ratelimitRemainingTokens": "100", + "response.headers.ratelimitRemainingRequests": "10000", + "response.headers.ratelimitResetTokens": "100", + "response.headers.ratelimitResetRequests": "10000", + "response.headers.ratelimitLimitTokens": "100", + "response.headers.ratelimitLimitRequests": "10000", + }, + }, + }) + + app.ExpectErrorEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "TransactionError", + "transactionName": "OtherTransaction/Go/OpenAIEmbedding", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "error.class": "OpenAIError", + "error.message": "test error", + }, + UserAttributes: map[string]interface{}{ + "error.code": "404", + "http.status": "404", + "embedding_id": internal.MatchAnything, + }, + }}) +} + +func TestNRCreateStream(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 1500, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Say this is a test", + }, + }, + Stream: true, + } + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + _, err := NRCreateChatCompletionStream(cw, context.Background(), req, app.Application) + if err != nil { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionSummary", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "ingest_source": "Go", + "vendor": "OpenAI", + "model": "gpt-3.5-turbo", + "id": internal.MatchAnything, + "transaction_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "appName": "my app", + "duration": 0, + "request.temperature": 0, + "api_key_last_four_digits": "sk-mnop", + "request.max_tokens": 1500, + "request.model": "gpt-3.5-turbo", + }, + }, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "Transaction", + "name": "OtherTransaction/Go/OpenAIChatCompletionStream", + "timestamp": internal.MatchAnything, + "traceId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "guid": internal.MatchAnything, + }, + }, + }) +} + +func TestNRCreateStreamAIMonitoringNotEnabled(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 1500, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Say this is a test", + }, + }, + Stream: true, + } + app := integrationsupport.NewTestApp(nil) + _, err := NRCreateChatCompletionStream(cw, context.Background(), req, app.Application) + if err != errAIMonitoringDisabled { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) + app.ExpectTxnEvents(t, []internal.WantEvent{}) +} + +func TestNRCreateStreamError(t *testing.T) { + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + LicenseKeyLastFour: "sk-mnop", + } + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Temperature: 0, + MaxTokens: 1500, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "testError", + }, + }, + Stream: true, + } + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + _, err := NRCreateChatCompletionStream(cw, context.Background(), req, app.Application) + if err.Error() != "test error" { + t.Error(err) + } + + app.ExpectErrorEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "TransactionError", + "transactionName": "OtherTransaction/Go/OpenAIChatCompletionStream", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "error.class": "OpenAIError", + "error.message": "test error", + }, + }}) + +} diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 0249c738f..f7bdf0058 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -48,6 +48,32 @@ func (app *Application) RecordCustomEvent(eventType string, params map[string]in } } +// RecordLlmFeedbackEvent adds a LLM Feedback event. +// An error is logged if eventType or params is invalid. +func (app *Application) RecordLLMFeedbackEvent(trace_id string, rating any, category string, message string, metadata map[string]interface{}) { + if app == nil || app.app == nil { + return + } + CustomEventData := map[string]interface{}{ + "trace_id": trace_id, + "rating": rating, + "category": category, + "message": message, + "ingest_source": "Go", + } + for k, v := range metadata { + CustomEventData[k] = v + } + // if rating is an int or string, record the event + err := app.app.RecordCustomEvent("LlmFeedbackMessage", CustomEventData) + if err != nil { + app.app.Error("unable to record custom event", map[string]interface{}{ + "event-type": "LlmFeedbackMessage", + "reason": err.Error(), + }) + } +} + // RecordCustomMetric records a custom metric. The metric name you // provide will be prefixed by "Custom/". Custom metrics are not // currently supported in serverless mode. @@ -136,7 +162,6 @@ func (app *Application) Shutdown(timeout time.Duration) { // a boolean true value is returned as the second return value. If it is // false, then the Config data returned is the standard default configuration. // This usually occurs if the Application is not yet fully initialized. -// func (app *Application) Config() (Config, bool) { if app == nil || app.app == nil { return defaultConfig(), false diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index 5d79bf8fa..d0461ca1c 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -235,6 +235,17 @@ type Config struct { DynoNamePrefixesToShorten []string } + // AIMonitoring controls the behavior of AI monitoring features. + AIMonitoring struct { + Enabled bool + // Indicates whether streams will be instrumented + Streaming struct { + Enabled bool + } + RecordContent struct { + Enabled bool + } + } // CrossApplicationTracer controls behavior relating to cross application // tracing (CAT). In the case where CrossApplicationTracer and // DistributedTracer are both enabled, DistributedTracer takes precedence. @@ -667,6 +678,9 @@ func defaultConfig() Config { c.Heroku.UseDynoNames = true c.Heroku.DynoNamePrefixesToShorten = []string{"scheduler", "run"} + c.AIMonitoring.Enabled = false + c.AIMonitoring.Streaming.Enabled = true + c.AIMonitoring.RecordContent.Enabled = true c.InfiniteTracing.TraceObserver.Port = 443 c.InfiniteTracing.SpanEvents.QueueSize = 10000 diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 5b9261e17..91ba00b59 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -236,6 +236,22 @@ func ConfigAppLogDecoratingEnabled(enabled bool) ConfigOption { } } +func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled && !cfg.HighSecurity { + cfg.AIMonitoring.Enabled = true + } else { + cfg.AIMonitoring.Enabled = false + } + } +} + +func ConfigAIMonitoringRecordContentEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + cfg.AIMonitoring.RecordContent.Enabled = enabled + } +} + // ConfigAppLogMetricsEnabled enables or disables the collection of metrics // data for logs seen by an instrumented logging framework // default: true diff --git a/v3/newrelic/config_test.go b/v3/newrelic/config_test.go index 37eb88159..9400a4e20 100644 --- a/v3/newrelic/config_test.go +++ b/v3/newrelic/config_test.go @@ -130,6 +130,15 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "agent_version":"0.2.2", "host":"my-hostname", "settings":{ + "AIMonitoring": { + "Enabled": false, + "RecordContent": { + "Enabled": true + }, + "Streaming": { + "Enabled": true + } + }, "AppName":"my appname", "ApplicationLogging": { "Enabled": true, @@ -326,6 +335,15 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "agent_version":"0.2.2", "host":"my-hostname", "settings":{ + "AIMonitoring": { + "Enabled": false, + "RecordContent": { + "Enabled": true + }, + "Streaming": { + "Enabled": true + } + }, "AppName":"my appname", "ApplicationLogging": { "Enabled": true, diff --git a/v3/newrelic/internal_test.go b/v3/newrelic/internal_test.go index 994ce2c2b..a1deb7878 100644 --- a/v3/newrelic/internal_test.go +++ b/v3/newrelic/internal_test.go @@ -289,6 +289,26 @@ func testApp(replyfn func(*internal.ConnectReply), cfgfn func(*Config), t testin } } +func TestRecordLLMFeedbackEventSuccess(t *testing.T) { + app := testApp(nil, nil, t) + app.RecordLLMFeedbackEvent("traceid", "5", "informative", "message", validParams) + app.expectNoLoggedErrors(t) + app.ExpectCustomEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "LlmFeedbackMessage", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "trace_id": "traceid", + "rating": "5", + "category": "informative", + "message": "message", + "ingest_source": "Go", + "zip": 1, + "zap": 2, + }, + }}) +} func TestRecordCustomEventSuccess(t *testing.T) { app := testApp(nil, nil, t) app.RecordCustomEvent("myType", validParams) From 9ab7142870dc8200ea5bacf4a38edcfc289569c8 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 20 Mar 2024 08:05:37 -0700 Subject: [PATCH 07/38] wip --- v3/integrations/nrawssdk-v2/LICENSE.txt | 206 ------- v3/integrations/nrawssdk-v2/README.md | 10 - v3/integrations/nrawssdk-v2/example/main.go | 67 --- v3/integrations/nrawssdk-v2/go.mod | 18 - v3/integrations/nrawssdk-v2/nrawssdk.go | 144 ----- v3/integrations/nrawssdk-v2/nrawssdk_test.go | 580 ------------------- v3/newrelic/application.go | 20 + v3/newrelic/config.go | 16 + v3/newrelic/config_options.go | 42 ++ 9 files changed, 78 insertions(+), 1025 deletions(-) delete mode 100644 v3/integrations/nrawssdk-v2/LICENSE.txt delete mode 100644 v3/integrations/nrawssdk-v2/README.md delete mode 100644 v3/integrations/nrawssdk-v2/example/main.go delete mode 100644 v3/integrations/nrawssdk-v2/go.mod delete mode 100644 v3/integrations/nrawssdk-v2/nrawssdk.go delete mode 100644 v3/integrations/nrawssdk-v2/nrawssdk_test.go diff --git a/v3/integrations/nrawssdk-v2/LICENSE.txt b/v3/integrations/nrawssdk-v2/LICENSE.txt deleted file mode 100644 index cee548c2d..000000000 --- a/v3/integrations/nrawssdk-v2/LICENSE.txt +++ /dev/null @@ -1,206 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -Versions 3.8.0 and above for this project are licensed under Apache 2.0. For -prior versions of this project, please see the LICENCE.txt file in the root -directory of that version for more information. diff --git a/v3/integrations/nrawssdk-v2/README.md b/v3/integrations/nrawssdk-v2/README.md deleted file mode 100644 index fe4869102..000000000 --- a/v3/integrations/nrawssdk-v2/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# v3/integrations/nrawssdk-v2 [![GoDoc](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2?status.svg)](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2) - -Package `nrawssdk` instruments https://github.com/aws/aws-sdk-go-v2 requests. - -```go -import "github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2" -``` - -For more information, see -[godocs](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2). diff --git a/v3/integrations/nrawssdk-v2/example/main.go b/v3/integrations/nrawssdk-v2/example/main.go deleted file mode 100644 index 3f740302e..000000000 --- a/v3/integrations/nrawssdk-v2/example/main.go +++ /dev/null @@ -1,67 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" - nraws "github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2" - "github.com/newrelic/go-agent/v3/newrelic" -) - -func main() { - - // Create a New Relic application. This will look for your license key in an - // environment variable called NEW_RELIC_LICENSE_KEY. This example turns on - // Distributed Tracing, but that's not required. - app, err := newrelic.NewApplication( - newrelic.ConfigFromEnvironment(), - newrelic.ConfigAppName("Example App"), - newrelic.ConfigInfoLogger(os.Stdout), - newrelic.ConfigDistributedTracerEnabled(true), - ) - if nil != err { - fmt.Println(err) - os.Exit(1) - } - - // For demo purposes only. Don't use the app.WaitForConnection call in - // production unless this is a very short-lived process and the caller - // doesn't block or exit if there's an error. - app.WaitForConnection(5 * time.Second) - - // Start recording a New Relic transaction - txn := app.StartTransaction("My sample transaction") - - ctx := context.Background() - awsConfig, err := config.LoadDefaultConfig(ctx) - if err != nil { - log.Fatal(err) - } - - // Instrument all new AWS clients with New Relic - nraws.AppendMiddlewares(&awsConfig.APIOptions, nil) - - s3Client := s3.NewFromConfig(awsConfig) - output, err := s3Client.ListBuckets(ctx, nil) - if err != nil { - log.Fatal(err) - } - - for _, object := range output.Buckets { - log.Printf("Bucket name is %s\n", aws.ToString(object.Name)) - } - - // End the New Relic transaction - txn.End() - - // Force all the harvests and shutdown. Like the app.WaitForConnection call - // above, this is for the purposes of this demo only and can be safely - // removed for longer-running processes. - app.Shutdown(10 * time.Second) -} diff --git a/v3/integrations/nrawssdk-v2/go.mod b/v3/integrations/nrawssdk-v2/go.mod deleted file mode 100644 index 1597772e1..000000000 --- a/v3/integrations/nrawssdk-v2/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2 - -// As of May 2021, the aws-sdk-go-v2 go.mod file uses 1.15: -// https://github.com/aws/aws-sdk-go-v2/blob/master/go.mod -go 1.19 - -require ( - github.com/aws/aws-sdk-go-v2 v1.16.15 - github.com/aws/aws-sdk-go-v2/config v1.17.6 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.17.0 - github.com/aws/aws-sdk-go-v2/service/lambda v1.24.5 - github.com/aws/aws-sdk-go-v2/service/s3 v1.27.10 - github.com/aws/smithy-go v1.13.3 - github.com/newrelic/go-agent/v3 v3.30.0 -) - - -replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrawssdk-v2/nrawssdk.go b/v3/integrations/nrawssdk-v2/nrawssdk.go deleted file mode 100644 index 8ff3a8ab6..000000000 --- a/v3/integrations/nrawssdk-v2/nrawssdk.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nrawssdk instruments requests made by the -// https://github.com/aws/aws-sdk-go-v2 library. -// -// For most operations, external segments and spans are automatically created -// for display in the New Relic UI on the External services section. For -// DynamoDB operations, datastore segements and spans are created and will be -// displayed on the Databases page. All operations will also be displayed on -// transaction traces and distributed traces. -// -// To use this integration, simply apply the AppendMiddlewares fuction to the apiOptions in -// your AWS Config object before performing any AWS operations. See -// example/main.go for a working sample. -package nrawssdk - -import ( - "context" - "strconv" - - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithymiddle "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "github.com/newrelic/go-agent/v3/internal/integrationsupport" - "github.com/newrelic/go-agent/v3/newrelic" -) - -type nrMiddleware struct { - txn *newrelic.Transaction -} - -type endable interface{ End() } - -// See https://aws.github.io/aws-sdk-go-v2/docs/middleware/ for a description of -// AWS SDK V2 middleware. -func (m nrMiddleware) deserializeMiddleware(stack *smithymiddle.Stack) error { - return stack.Deserialize.Add(smithymiddle.DeserializeMiddlewareFunc("NRDeserializeMiddleware", func( - ctx context.Context, in smithymiddle.DeserializeInput, next smithymiddle.DeserializeHandler) ( - out smithymiddle.DeserializeOutput, metadata smithymiddle.Metadata, err error) { - - txn := m.txn - if txn == nil { - txn = newrelic.FromContext(ctx) - } - - smithyRequest := in.Request.(*smithyhttp.Request) - - // The actual http.Request is inside the smithyhttp.Request - httpRequest := smithyRequest.Request - serviceName := awsmiddle.GetServiceID(ctx) - operation := awsmiddle.GetOperationName(ctx) - region := awsmiddle.GetRegion(ctx) - - var segment endable - // Service name capitalization is different for v1 and v2. - if serviceName == "dynamodb" || serviceName == "DynamoDB" { - segment = &newrelic.DatastoreSegment{ - Product: newrelic.DatastoreDynamoDB, - Collection: "", // AWS SDK V2 doesn't expose TableName - Operation: operation, - ParameterizedQuery: "", - QueryParameters: nil, - Host: httpRequest.URL.Host, - PortPathOrID: httpRequest.URL.Port(), - DatabaseName: "", - StartTime: txn.StartSegmentNow(), - } - } else { - segment = newrelic.StartExternalSegment(txn, httpRequest) - } - - // Hand off execution to other middlewares and then perform the request - out, metadata, err = next.HandleDeserialize(ctx, in) - - // After the request - response, ok := out.RawResponse.(*smithyhttp.Response) - - if ok { - // Set additional span attributes - integrationsupport.AddAgentSpanAttribute(txn, - newrelic.AttributeResponseCode, strconv.Itoa(response.StatusCode)) - integrationsupport.AddAgentSpanAttribute(txn, - newrelic.SpanAttributeAWSOperation, operation) - integrationsupport.AddAgentSpanAttribute(txn, - newrelic.SpanAttributeAWSRegion, region) - requestID, ok := awsmiddle.GetRequestIDMetadata(metadata) - if ok { - integrationsupport.AddAgentSpanAttribute(txn, - newrelic.AttributeAWSRequestID, requestID) - } - } - segment.End() - return out, metadata, err - }), - smithymiddle.Before) -} - -// AppendMiddlewares inserts New Relic middleware in the given `apiOptions` for -// the AWS SDK V2 for Go. It must be called only once per AWS configuration. -// -// If `txn` is provided as nil, the New Relic transaction will be retrieved -// using `newrelic.FromContext`. -// -// Additional attributes will be added to transaction trace segments and span -// events: aws.region, aws.requestId, and aws.operation. In addition, -// http.statusCode will be added to span events. -// -// To see segments and spans for all AWS invocations, call AppendMiddlewares -// with the AWS Config `apiOptions` and provide nil for `txn`. For example: -// -// awsConfig, err := config.LoadDefaultConfig(ctx) -// if err != nil { -// log.Fatal(err) -// } -// nraws.AppendMiddlewares(&awsConfig.APIOptions, nil) -// -// If do not want the transaction to be retrived from the context, you can -// explicitly set `txn`. For example: -// -// awsConfig, err := config.LoadDefaultConfig(ctx) -// if err != nil { -// log.Fatal(err) -// } -// -// ... -// -// txn := loadNewRelicTransaction() -// nraws.AppendMiddlewares(&awsConfig.APIOptions, txn) -func AppendMiddlewares(apiOptions *[]func(*smithymiddle.Stack) error, txn *newrelic.Transaction) { - m := nrMiddleware{txn: txn} - *apiOptions = append(*apiOptions, m.deserializeMiddleware) -} diff --git a/v3/integrations/nrawssdk-v2/nrawssdk_test.go b/v3/integrations/nrawssdk-v2/nrawssdk_test.go deleted file mode 100644 index 79b1f389a..000000000 --- a/v3/integrations/nrawssdk-v2/nrawssdk_test.go +++ /dev/null @@ -1,580 +0,0 @@ -// Copyright 2020 New Relic Corporation. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package nrawssdk - -import ( - "bytes" - "context" - "errors" - "io/ioutil" - "net/http" - "testing" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/dynamodb" - "github.com/aws/aws-sdk-go-v2/service/lambda" - "github.com/aws/aws-sdk-go-v2/service/lambda/types" - "github.com/newrelic/go-agent/v3/internal" - "github.com/newrelic/go-agent/v3/internal/integrationsupport" - "github.com/newrelic/go-agent/v3/newrelic" -) - -func testApp() integrationsupport.ExpectApp { - return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, integrationsupport.DTEnabledCfgFn, newrelic.ConfigCodeLevelMetricsEnabled(false)) -} - -type fakeTransport struct{} - -func (t fakeTransport) RoundTrip(r *http.Request) (*http.Response, error) { - return &http.Response{ - Status: "200 OK", - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - Header: http.Header{ - "X-Amzn-Requestid": []string{requestID}, - }, - }, nil -} - -type fakeCredsWithoutContext struct{} - -func (c fakeCredsWithoutContext) Retrieve() (aws.Credentials, error) { - return aws.Credentials{}, nil -} - -type fakeCredsWithContext struct{} - -func (c fakeCredsWithContext) Retrieve(ctx context.Context) (aws.Credentials, error) { - return aws.Credentials{}, nil -} - -var fakeCreds = func() interface{} { - var c interface{} = fakeCredsWithoutContext{} - if _, ok := c.(aws.CredentialsProvider); ok { - return c - } - return fakeCredsWithContext{} -}() - -func newConfig(ctx context.Context, txn *newrelic.Transaction) aws.Config { - cfg, _ := config.LoadDefaultConfig(ctx) - cfg.Credentials = fakeCreds.(aws.CredentialsProvider) - cfg.Region = awsRegion - cfg.HTTPClient = &http.Client{ - Transport: &fakeTransport{}, - } - - AppendMiddlewares(&cfg.APIOptions, txn) - - return cfg -} - -const ( - requestID = "testing request id" - txnName = "aws-txn" - awsRegion = "us-west-2" -) - -var ( - genericSpan = internal.WantEvent{ - Intrinsics: map[string]interface{}{ - "name": "OtherTransaction/Go/" + txnName, - "transaction.name": "OtherTransaction/Go/" + txnName, - "sampled": true, - "category": "generic", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "nr.entryPoint": true, - "traceId": internal.MatchAnything, - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{}, - } - externalSpan = internal.WantEvent{ - Intrinsics: map[string]interface{}{ - "name": "External/lambda.us-west-2.amazonaws.com/http/POST", - "sampled": true, - "category": "http", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "traceId": internal.MatchAnything, - "parentId": internal.MatchAnything, - "component": "http", - "span.kind": "client", - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{ - "aws.operation": "Invoke", - "aws.region": awsRegion, - "aws.requestId": requestID, - "http.method": "POST", - "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", - "http.statusCode": "200", - }, - } - externalSpanNoRequestID = internal.WantEvent{ - Intrinsics: map[string]interface{}{ - "name": "External/lambda.us-west-2.amazonaws.com/http/POST", - "sampled": true, - "category": "http", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "traceId": internal.MatchAnything, - "parentId": internal.MatchAnything, - "component": "http", - "span.kind": "client", - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{ - "aws.operation": "Invoke", - "aws.region": awsRegion, - "http.method": "POST", - "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", - "http.statusCode": "200", - }, - } - datastoreSpan = internal.WantEvent{ - Intrinsics: map[string]interface{}{ - "name": "Datastore/operation/DynamoDB/DescribeTable", - "sampled": true, - "category": "datastore", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "traceId": internal.MatchAnything, - "parentId": internal.MatchAnything, - "component": "DynamoDB", - "span.kind": "client", - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{ - "aws.operation": "DescribeTable", - "aws.region": awsRegion, - "aws.requestId": requestID, - "db.statement": "'DescribeTable' on 'unknown' using 'DynamoDB'", - "peer.address": "dynamodb.us-west-2.amazonaws.com:unknown", - "peer.hostname": "dynamodb.us-west-2.amazonaws.com", - "http.statusCode": "200", - }, - } - txnMetrics = []internal.WantMetric{ - {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, - {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, - {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, - {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, - {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, - {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, - } - externalMetrics = append(txnMetrics, []internal.WantMetric{ - {Name: "External/all", Scope: "", Forced: true, Data: nil}, - {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, - {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: nil}, - {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, - }...) - datastoreMetrics = append(txnMetrics, []internal.WantMetric{ - {Name: "Datastore/DynamoDB/all", Scope: "", Forced: true, Data: nil}, - {Name: "Datastore/DynamoDB/allOther", Scope: "", Forced: true, Data: nil}, - {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, - {Name: "Datastore/allOther", Scope: "", Forced: true, Data: nil}, - {Name: "Datastore/instance/DynamoDB/dynamodb.us-west-2.amazonaws.com/unknown", Scope: "", Forced: false, Data: nil}, - {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "", Forced: false, Data: nil}, - {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "OtherTransaction/Go/aws-txn", Forced: false, Data: nil}, - }...) -) - -type testTableEntry struct { - Name string - - BuildContext func(txn *newrelic.Transaction) context.Context - BuildConfig func(ctx context.Context, txn *newrelic.Transaction) aws.Config -} - -func runTestTable(t *testing.T, table []*testTableEntry, executeEntry func(t *testing.T, entry *testTableEntry)) { - for _, entry := range table { - entry := entry // Pin range variable - - t.Run(entry.Name, func(t *testing.T) { - executeEntry(t, entry) - }) - } -} - -func TestInstrumentRequestExternal(t *testing.T) { - runTestTable(t, - []*testTableEntry{ - { - Name: "with manually set transaction", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return context.Background() - }, - BuildConfig: newConfig, - }, - { - Name: "with transaction set in context", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return newrelic.NewContext(context.Background(), txn) - }, - BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { - return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context - }, - }, - }, - - func(t *testing.T, entry *testTableEntry) { - app := testApp() - txn := app.StartTransaction(txnName) - ctx := entry.BuildContext(txn) - - client := lambda.NewFromConfig(entry.BuildConfig(ctx, txn)) - - input := &lambda.InvokeInput{ - ClientContext: aws.String("MyApp"), - FunctionName: aws.String("non-existent-function"), - InvocationType: types.InvocationTypeRequestResponse, - LogType: types.LogTypeTail, - Payload: []byte("{}"), - } - - _, err := client.Invoke(ctx, input) - if err != nil { - t.Error(err) - } - - txn.End() - - app.ExpectMetrics(t, externalMetrics) - app.ExpectSpanEvents(t, []internal.WantEvent{ - externalSpan, genericSpan}) - }, - ) -} - -func TestInstrumentRequestDatastore(t *testing.T) { - runTestTable(t, - []*testTableEntry{ - { - Name: "with manually set transaction", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return context.Background() - }, - BuildConfig: newConfig, - }, - { - Name: "with transaction set in context", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return newrelic.NewContext(context.Background(), txn) - }, - BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { - return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context - }, - }, - }, - - func(t *testing.T, entry *testTableEntry) { - app := testApp() - txn := app.StartTransaction(txnName) - ctx := entry.BuildContext(txn) - - client := dynamodb.NewFromConfig(entry.BuildConfig(ctx, txn)) - - input := &dynamodb.DescribeTableInput{ - TableName: aws.String("thebesttable"), - } - - _, err := client.DescribeTable(ctx, input) - if err != nil { - t.Error(err) - } - - txn.End() - - app.ExpectMetrics(t, datastoreMetrics) - app.ExpectSpanEvents(t, []internal.WantEvent{ - datastoreSpan, genericSpan}) - }, - ) -} - -type firstFailingTransport struct { - failing bool -} - -func (t *firstFailingTransport) RoundTrip(r *http.Request) (*http.Response, error) { - if t.failing { - t.failing = false - return nil, errors.New("Oops this failed") - } - - return &http.Response{ - Status: "200 OK", - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - Header: http.Header{ - "X-Amzn-Requestid": []string{requestID}, - }, - }, nil -} - -func TestRetrySend(t *testing.T) { - runTestTable(t, - []*testTableEntry{ - { - Name: "with manually set transaction", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return context.Background() - }, - BuildConfig: newConfig, - }, - { - Name: "with transaction set in context", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return newrelic.NewContext(context.Background(), txn) - }, - BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { - return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context - }, - }, - }, - - func(t *testing.T, entry *testTableEntry) { - app := testApp() - txn := app.StartTransaction(txnName) - ctx := entry.BuildContext(txn) - - cfg := entry.BuildConfig(ctx, txn) - - cfg.HTTPClient = &http.Client{ - Transport: &firstFailingTransport{failing: true}, - } - - customRetry := retry.NewStandard(func(o *retry.StandardOptions) { - o.MaxAttempts = 2 - }) - client := lambda.NewFromConfig(cfg, func(o *lambda.Options) { - o.Retryer = customRetry - }) - - input := &lambda.InvokeInput{ - ClientContext: aws.String("MyApp"), - FunctionName: aws.String("non-existent-function"), - InvocationType: types.InvocationTypeRequestResponse, - LogType: types.LogTypeTail, - Payload: []byte("{}"), - } - - _, err := client.Invoke(ctx, input) - if err != nil { - t.Error(err) - } - - txn.End() - - app.ExpectMetrics(t, externalMetrics) - - app.ExpectSpanEvents(t, []internal.WantEvent{ - { - Intrinsics: map[string]interface{}{ - "name": "External/lambda.us-west-2.amazonaws.com/http/POST", - "sampled": true, - "category": "http", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "traceId": internal.MatchAnything, - "parentId": internal.MatchAnything, - "component": "http", - "span.kind": "client", - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{ - "aws.operation": "Invoke", - "aws.region": awsRegion, - "http.method": "POST", - "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", - "http.statusCode": "0", - }, - }, { - Intrinsics: map[string]interface{}{ - "name": "External/lambda.us-west-2.amazonaws.com/http/POST", - "sampled": true, - "category": "http", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "traceId": internal.MatchAnything, - "parentId": internal.MatchAnything, - "component": "http", - "span.kind": "client", - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{ - "aws.operation": "Invoke", - "aws.region": awsRegion, - "aws.requestId": requestID, - "http.method": "POST", - "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", - "http.statusCode": "200", - }, - }, { - Intrinsics: map[string]interface{}{ - "name": "OtherTransaction/Go/" + txnName, - "transaction.name": "OtherTransaction/Go/" + txnName, - "sampled": true, - "category": "generic", - "priority": internal.MatchAnything, - "guid": internal.MatchAnything, - "transactionId": internal.MatchAnything, - "nr.entryPoint": true, - "traceId": internal.MatchAnything, - }, - UserAttributes: map[string]interface{}{}, - AgentAttributes: map[string]interface{}{}, - }}) - }, - ) -} - -func TestRequestSentTwice(t *testing.T) { - runTestTable(t, - []*testTableEntry{ - { - Name: "with manually set transaction", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return context.Background() - }, - BuildConfig: newConfig, - }, - { - Name: "with transaction set in context", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return newrelic.NewContext(context.Background(), txn) - }, - BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { - return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context - }, - }, - }, - - func(t *testing.T, entry *testTableEntry) { - app := testApp() - txn := app.StartTransaction(txnName) - ctx := entry.BuildContext(txn) - - client := lambda.NewFromConfig(entry.BuildConfig(ctx, txn)) - - input := &lambda.InvokeInput{ - ClientContext: aws.String("MyApp"), - FunctionName: aws.String("non-existent-function"), - InvocationType: types.InvocationTypeRequestResponse, - LogType: types.LogTypeTail, - Payload: []byte("{}"), - } - - _, firstErr := client.Invoke(ctx, input) - if firstErr != nil { - t.Error(firstErr) - } - - _, secondErr := client.Invoke(ctx, input) - if secondErr != nil { - t.Error(secondErr) - } - - txn.End() - - app.ExpectMetrics(t, []internal.WantMetric{ - {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, - {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, - {Name: "External/all", Scope: "", Forced: true, Data: []float64{2}}, - {Name: "External/allOther", Scope: "", Forced: true, Data: []float64{2}}, - {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: []float64{2}}, - {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: []float64{2}}, - {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, - {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, - {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, - {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, - }) - app.ExpectSpanEvents(t, []internal.WantEvent{ - externalSpan, externalSpan, genericSpan}) - }, - ) -} - -type noRequestIDTransport struct{} - -func (t *noRequestIDTransport) RoundTrip(r *http.Request) (*http.Response, error) { - return &http.Response{ - Status: "200 OK", - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil -} - -func TestNoRequestIDFound(t *testing.T) { - runTestTable(t, - []*testTableEntry{ - { - Name: "with manually set transaction", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return context.Background() - }, - BuildConfig: newConfig, - }, - { - Name: "with transaction set in context", - - BuildContext: func(txn *newrelic.Transaction) context.Context { - return newrelic.NewContext(context.Background(), txn) - }, - BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { - return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context - }, - }, - }, - - func(t *testing.T, entry *testTableEntry) { - app := testApp() - txn := app.StartTransaction(txnName) - ctx := entry.BuildContext(txn) - - cfg := entry.BuildConfig(ctx, txn) - cfg.HTTPClient = &http.Client{ - Transport: &noRequestIDTransport{}, - } - client := lambda.NewFromConfig(cfg) - - input := &lambda.InvokeInput{ - ClientContext: aws.String("MyApp"), - FunctionName: aws.String("non-existent-function"), - InvocationType: types.InvocationTypeRequestResponse, - LogType: types.LogTypeTail, - Payload: []byte("{}"), - } - _, err := client.Invoke(ctx, input) - if err != nil { - t.Error(err) - } - - txn.End() - - app.ExpectMetrics(t, externalMetrics) - app.ExpectSpanEvents(t, []internal.WantEvent{ - externalSpanNoRequestID, genericSpan}) - }, - ) -} diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 0249c738f..0f4ddb4f3 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -6,6 +6,8 @@ package newrelic import ( "os" "time" + + "golang.org/x/exp/slices" ) // Application represents your application. All methods on Application are nil @@ -15,6 +17,24 @@ type Application struct { app *app } +// IsAIMonitoringEnabled returns true if monitoring for the specified mode of the named integration is enabled. +func (app *Application) IsAIMonitoringEnabled(integration string, streaming bool) bool { + if app == nil || app.app == nil || app.app.run == nil { + return false + } + aiconf := app.app.run.Config.AIMonitoring + if !aiconf.Enabled { + return false + } + if aiconf.IncludeOnly != nil && integration != "" && !slices.Contains(aiconf.IncludeOnly, integration) { + return false + } + if streaming && !aiconf.Streaming { + return false + } + return true +} + // StartTransaction begins a Transaction with the given name. func (app *Application) StartTransaction(name string, opts ...TraceOption) *Transaction { if app == nil { diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index 5d79bf8fa..db5d6b2ee 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -450,6 +450,17 @@ type Config struct { } // Security is used to post security configuration on UI. Security interface{} `json:"Security,omitempty"` + + // AIMonitoring controls instrumentation and reporting of AI model invocations and feedback. + AIMonitoring struct { + // Enabled controls whether any AI instrumentation is to be performed. + Enabled bool + // Streaming enables streaming mode instrumentation. + Streaming bool + // IncludeOnly is the list of specific integrations to enable. If empty, all integrations which + // were compiled into the application will be active whenever Enabled is true. + IncludeOnly []string + } } // CodeLevelMetricsScope is a bit-encoded value. Each such value describes @@ -679,6 +690,11 @@ func defaultConfig() Config { // Module Dependency Metrics c.ModuleDependencyMetrics.Enabled = true c.ModuleDependencyMetrics.RedactIgnoredPrefixes = true + + // AI Monitoring + c.AIMonitoring.Enabled = false + c.AIMonitoring.Streaming = false + c.AIMonitoring.IncludeOnly = nil return c } diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 5b9261e17..83f515c8c 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -60,6 +60,40 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { return func(cfg *Config) { cfg.DistributedTracer.ReservoirLimit = limit } } +// ConfigAIMonitoringEnabled turns on or off the collection of AI Monitoring metrics. +func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + cfg.AIMonitoring.Enabled = enabled + } +} + +// ConfigAIMonitoringStreaming turns on or off the collection of AI Monitoring streaming mode metrics. +func ConfigAIMonitoringStreaming(enabled bool) ConfigOption { + return func(cfg *Config) { + cfg.AIMonitoring.Streaming = enabled + } +} + +// ConfigAIMonitoringIncludeOnly sets the list of specific AI integrations to enable, if not the entire list. +// A nil slice of integration names means not to restrict which ones are enabled. +func ConfigAIMonitoringIncludeOnly(integrations []string) ConfigOption { + return func(cfg *Config) { + cfg.AIMonitoring.IncludeOnly = integrations + } +} + +const ( + AIMonitoringBedrock = "nrawsbedrock" +) + +// ConfigAIMonitoringIncludeOnlyString is like ConfigAIMonitoringIncludeOnly except that it takes a single +// comma-separated list of names. +func ConfigAIMonitoringIncludeOnlyString(integrations string) ConfigOption { + return func(cfg *Config) { + cfg.AIMonitoring.IncludeOnly = strings.Split(integrations, ",") + } +} + // ConfigCodeLevelMetricsEnabled turns on or off the collection of code // level metrics entirely. func ConfigCodeLevelMetricsEnabled(enabled bool) ConfigOption { @@ -363,6 +397,9 @@ func ConfigDebugLogger(w io.Writer) ConfigOption { // NEW_RELIC_APPLICATION_LOGGING_METRICS_ENABLED sets ApplicationLogging.Metrics.Enabled. Set to false to disable the collection of application log metrics. // NEW_RELIC_APPLICATION_LOGGING_LOCAL_DECORATING_ENABLED sets ApplicationLogging.LocalDecoration.Enabled. Set to true to enable local log decoration. // NEW_RELIC_APPLICATION_LOGGING_FORWARDING_MAX_SAMPLES_STORED sets ApplicationLogging.LogForwarding.Limit. Set to 0 to prevent captured logs from being forwarded. +// NEW_RELIC_AI_MONITORING_ENABLED sets AIMonitoring.Enabled +// NEW_RELIC_AI_MONITORING_STREAMING sets AIMonitoring.Streaming +// NEW_RELIC_AI_MONITORING_INCLUDE_ONLY sets AIMonitoring.IncludeOnly // // This function is strict and will assign Config.Error if any of the // environment variables cannot be parsed. @@ -426,6 +463,11 @@ func configFromEnvironment(getenv func(string) string) ConfigOption { assignInt(&cfg.ApplicationLogging.Forwarding.MaxSamplesStored, "NEW_RELIC_APPLICATION_LOGGING_FORWARDING_MAX_SAMPLES_STORED") assignBool(&cfg.ApplicationLogging.Metrics.Enabled, "NEW_RELIC_APPLICATION_LOGGING_METRICS_ENABLED") assignBool(&cfg.ApplicationLogging.LocalDecorating.Enabled, "NEW_RELIC_APPLICATION_LOGGING_LOCAL_DECORATING_ENABLED") + assignBool(&cfg.AIMonitoring.Enabled, "NEW_RELIC_AI_MONITORING_ENABLED") + assignBool(&cfg.AIMonitoring.Streaming, "NEW_RELIC_AI_MONITORING_STREAMING") + if env := getenv("NEW_RELIC_AI_MONITORING_INCLUDE_ONLY"); env != "" { + cfg.AIMonitoring.IncludeOnly = strings.Split(env, ",") + } if env := getenv("NEW_RELIC_LABELS"); env != "" { if labels := getLabels(getenv("NEW_RELIC_LABELS")); len(labels) > 0 { From fcaddba29c816e5c9c481c546a534f4159fa9dda Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Thu, 21 Mar 2024 04:24:47 -0700 Subject: [PATCH 08/38] wip bedrock --- v3/go.mod | 8 + v3/integrations/nrawsbedrock/LICENSE.txt | 206 ++++++++ v3/integrations/nrawsbedrock/README.md | 12 + v3/integrations/nrawsbedrock/example/main.go | 89 ++++ v3/integrations/nrawsbedrock/go.mod | 36 ++ v3/integrations/nrawsbedrock/nrawsbedrock.go | 519 +++++++++++++++++++ v3/newrelic/application.go | 43 +- v3/newrelic/config.go | 16 - v3/newrelic/config_options.go | 49 +- v3/newrelic/internal_app.go | 3 + 10 files changed, 927 insertions(+), 54 deletions(-) create mode 100644 v3/integrations/nrawsbedrock/LICENSE.txt create mode 100644 v3/integrations/nrawsbedrock/README.md create mode 100644 v3/integrations/nrawsbedrock/example/main.go create mode 100644 v3/integrations/nrawsbedrock/go.mod create mode 100644 v3/integrations/nrawsbedrock/nrawsbedrock.go diff --git a/v3/go.mod b/v3/go.mod index fda43a82c..d98016abe 100644 --- a/v3/go.mod +++ b/v3/go.mod @@ -4,9 +4,17 @@ go 1.19 require ( github.com/golang/protobuf v1.5.3 + golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81 google.golang.org/grpc v1.56.3 ) +require ( + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) retract v3.22.0 // release process error corrected in v3.22.1 diff --git a/v3/integrations/nrawsbedrock/LICENSE.txt b/v3/integrations/nrawsbedrock/LICENSE.txt new file mode 100644 index 000000000..cee548c2d --- /dev/null +++ b/v3/integrations/nrawsbedrock/LICENSE.txt @@ -0,0 +1,206 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Versions 3.8.0 and above for this project are licensed under Apache 2.0. For +prior versions of this project, please see the LICENCE.txt file in the root +directory of that version for more information. diff --git a/v3/integrations/nrawsbedrock/README.md b/v3/integrations/nrawsbedrock/README.md new file mode 100644 index 000000000..a1038aea7 --- /dev/null +++ b/v3/integrations/nrawsbedrock/README.md @@ -0,0 +1,12 @@ +# v3/integrations/nrawsbedrock [![GoDoc](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawsbedrock?status.svg)](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawsbedrock) + +Package `nrawsbedrock` instruments https://github.com/aws/aws-sdk-go-v2/service/bedrockruntime requests. + +This integration works independently of the `nrawssdk-v2` integration, which instruments AWS middleware components generally, while this one instruments Bedrock AI model invocations specifically and in detail. + +```go +import "github.com/newrelic/go-agent/v3/integrations/nrawsbedrock" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawsbedrock). diff --git a/v3/integrations/nrawsbedrock/example/main.go b/v3/integrations/nrawsbedrock/example/main.go new file mode 100644 index 000000000..813ec3568 --- /dev/null +++ b/v3/integrations/nrawsbedrock/example/main.go @@ -0,0 +1,89 @@ +// +// Example Bedrock client application with New Relic instrumentation +// +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/bedrock" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/newrelic/go-agent/v3/integrations/nrawsbedrock" + "github.com/newrelic/go-agent/v3/newrelic" +) + +const region = "us-east-2" + +func main() { + sdkConfig, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) + if err != nil { + panic(err) + } + bedrockClient := bedrock.NewFromConfig(sdkConfig) + result, err := bedrockClient.ListFoundationModels(context.TODO(), &bedrock.ListFoundationModelsInput{}) + if err != nil { + panic(err) + } + if len(result.ModelSummaries) == 0 { + fmt.Println("no models found") + } + for _, modelSummary := range result.ModelSummaries { + fmt.Printf("Name: %30s | Provider: %20s | ID: %s\n", *modelSummary.ModelName, *modelSummary.ProviderName, *modelSummary.ModelId) + } + + // Create a New Relic application. This will look for your license key in an + // environment variable called NEW_RELIC_LICENSE_KEY. This example turns on + // Distributed Tracing, but that's not required. + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("Example App"), + newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigDistributedTracerEnabled(true), + newrelic.ConfigAIMonitoringEnabled(true), + ) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // For demo purposes only. Don't use the app.WaitForConnection call in + // production unless this is a very short-lived process and the caller + // doesn't block or exit if there's an error. + app.WaitForConnection(5 * time.Second) + + // Start recording a New Relic transaction + txn := app.StartTransaction("My sample transaction") + + model := "amazon.titan-text-lite-v1" + //model := "amazon.titan-embed-g1-text-02" + //model := "amazon.titan-text-express-v1" + brc := bedrockruntime.NewFromConfig(sdkConfig) + //output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ + output, err := nrawsbedrock.InvokeModel(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + Body: []byte(`{ + "inputText": "What is your quest?", + "textGenerationConfig": { + "temperature": 0.5, + "maxTokenCount": 100 + } + }`), + ModelId: &model, + }) + if err != nil { + fmt.Printf("error: %v\n", err) + } else { + fmt.Printf("%v\n", output) + } + + // End the New Relic transaction + txn.End() + + // Force all the harvests and shutdown. Like the app.WaitForConnection call + // above, this is for the purposes of this demo only and can be safely + // removed for longer-running processes. + app.Shutdown(10 * time.Second) +} diff --git a/v3/integrations/nrawsbedrock/go.mod b/v3/integrations/nrawsbedrock/go.mod new file mode 100644 index 000000000..6d69a27b5 --- /dev/null +++ b/v3/integrations/nrawsbedrock/go.mod @@ -0,0 +1,36 @@ +module github.com/newrelic/go-agent/v3/integrations/nrawsbedrock + +go 1.19 + +require ( + github.com/aws/aws-sdk-go-v2/config v1.27.4 + github.com/aws/aws-sdk-go-v2/service/bedrock v1.7.3 + github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.7.1 + github.com/google/uuid v1.3.0 + github.com/newrelic/go-agent/v3 v3.30.0 +) + +require ( + github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect + github.com/aws/smithy-go v1.20.1 // indirect + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + +replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go new file mode 100644 index 000000000..c1d80a5e1 --- /dev/null +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -0,0 +1,519 @@ +// Copyright New Relic, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nrawsbedrock instruments AI model invocation requests made by the +// https://github.com/aws/aws-sdk-go-v2/service/bedrockruntime library. +// +// To use this integration, enable the New Relic AIMonitoring configuration options +// in your application, import this integration, and use the model invocation calls +// from this library in place of the corresponding ones from the AWS Bedrock +// runtime library. +// +// See example/main.go for a working sample. +package nrawsbedrock + +import ( + "context" + "encoding/json" + "runtime/debug" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/google/uuid" + "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/internal/integrationsupport" + "github.com/newrelic/go-agent/v3/newrelic" +) + +var reportStreamingDisabled func() + +func init() { + reportStreamingDisabled = sync.OnceFunc(func() { + internal.TrackUsage("Go", "ML", "Streaming", "Disabled") + }) + + // Get the version of the AWS Bedrock library we're using + info, ok := debug.ReadBuildInfo() + if info != nil && ok { + for _, module := range info.Deps { + if module != nil && strings.Contains(module.Path, "/aws/aws-sdk-go-v2/service/bedrockruntime") { + if len(module.Version) > 1 && module.Version[0] == 'v' { + internal.TrackUsage("Go", "ML", "Bedrock", module.Version[1:]) + } else { + internal.TrackUsage("Go", "ML", "Bedrock", module.Version) + } + return + } + } + } + internal.TrackUsage("Go", "ML", "Bedrock", "unknown") +} + +// +// isEnabled determines if AI Monitoring is enabled in the app's options. +// It returns true if we should proceed with instrumentation. Additionally, +// it sets the Go/ML/Streaming/Disabled supportability metric if we discover +// that streaming is disabled, but ONLY does so the first time we try. Since +// we need to initialize the app and load options before we know if that one +// gets sent, we have to wait until later on to report that. +// +func isEnabled(app *newrelic.Application, streaming bool) (bool, bool) { + if app == nil { + return false, false + } + config, _ := app.Config() + if !config.AIMonitoring.Streaming.Enabled { + if reportStreamingDisabled != nil { + reportStreamingDisabled() + } + if streaming { + // we asked for streaming but it's not enabled + return false, false + } + } + + return config.AIMonitoring.Enabled, config.AIMonitoring.RecordContent.Enabled +} + +// ResponseStream tracks the model invocation throughout its lifetime until all stream events +// are processed. +type ResponseStream struct { + // The request parameters that started the invocation + ctx context.Context + app *newrelic.Application + client *bedrockruntime.Client + params *bedrockruntime.InvokeModelInput + + // The model output + response *bedrockruntime.InvokeModelWithResponseStreamOutput +} + +// +// InvokeModelWithResponseStream works as InvokeModel does, but the response is returned as a stream. +// In summary, given a bedrockruntime.Client b, where you would normally call the AWS method +// b.InvokeModelWithResponseStream(c, p, f...) +// You instead invoke the New Relic InvokeModelWithResponseStream function as: +// nrbedrock.InvokeModelWithResponseStream(app, b, c, p, f...) +// where app is your New Relic Application value. +// +// Either start a transaction on your own and add it to the context c passed into this function, or +// a transaction will be started for you that lasts only for the duration of the model invocation. +// +// You may elect to have our function collect the stream's output for you, instrumenting the operations +// along the way. To do this, pass a callback function with the invocation signature +// callback(app *newrelic.Application, txn *newrelic.Transaction, ctx context.Context, part []byte) error +// This will be called for every event read from the response stream, allowing your application +// to collect and use that streamed data as it arrives. If your callback function returns a non-nil error +// value, the stream reading will terminate immediately. +// +// Alternatively, if a nil value is passed for the callback function, we will assume that you want to do all +// the stream reading yourself. In that case you will need to make calls to the following functions as you +// read data in order for the instrumentation to function correctly. +// +// RecordStreamResponseEvent(app, txn, ctx, part) +// for every part read from the output stream. +// +// CompleteStreamResponse(app, txn, ctx) +// after reading all of the stream data you will be reading. This finishes up the instrumentation +// for the model invocation. +// +/* +func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { +} + +func (s *ResponseStream) RecordEvent(data []byte) error {} +func (s *ResponseStream) Close() error {} + +func InvokeModelWithResponseStream(app *newrelic.Application, callback func(*newrelic.Application, *newrelic.Transaction, context.Context, []byte) error, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelWithResponseStreamOutput, error) { +} + +func RecordStreamResponseEvent() {} +func CompleteStreamResponse() {} +*/ + +// +// InvokeModel provides an instrumented interface through which to call the AWS Bedrock InvokeModel function. +// Where you would normally invoke the InvokeModel method on a bedrockruntime.Client value b from AWS as: +// b.InvokeModel(c, p, f...) +// You instead invoke the New Relic InvokeModel function as: +// nrbedrock.InvokeModel(app, b, c, p, f...) +// where app is the New Relic Application value returned from NewApplication when you started +// your application. If you start a transaction and add it to the passed context value c in the above +// invocation, the instrumentation will be recorded on that transaction, including a segment for the Bedrock +// call itself. If you don't, a new transaction will be started for you, which will be terminated when the +// InvokeModel function exits. +// +// If the transaction is unable to be created or used, the Bedrock call will be made anyway, without instrumentation. +// +func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) { + var txn *newrelic.Transaction // the transaction to record in, or nil if we aren't instrumenting this time + + // spanID := txn.GetTraceMetadata().SpanID + // traceID := txn.GetTraceMetadata().TraceID + // transactionID := traceID[:16] + // uuid := uuid.New() + + // params: .Body []byte .ModelId *string .Accept *string ("application/json") .ContentType *string ("application/json") + // output: .Body []byte .ContentType *string .ResultMetadata middleware.Metadata (.Get(key any)->any|nil, .Has(key any)->bool, .Set(key, value any) + aiEnabled, recordContentEnabled := isEnabled(app, false) + if aiEnabled { + txn = newrelic.FromContext(ctx) + if txn == nil { + if txn = app.StartTransaction("InvokeModel"); txn != nil { + defer txn.End() + } + } + } + + var embedding bool + id_key := "completion_id" + + if txn != nil { + integrationsupport.AddAgentAttribute(txn, "llm", "", true) + if params.ModelId != nil { + if embedding = strings.Contains(*params.ModelId, "embed"); embedding { + defer txn.StartSegment("Llm/embedding/Bedrock/InvokeModel").End() + id_key = "embedding_id" + } else { + defer txn.StartSegment("Llm/completion/Bedrock/InvokeModel").End() + } + } else { + // we don't have a model! + txn = nil + } + } + + start := time.Now() + output, err := brc.InvokeModel(ctx, params, optFns...) + duration := time.Since(start).Milliseconds() + + if txn != nil { + md := txn.GetTraceMetadata() + uuid := uuid.New() + meta := map[string]any{ + "id": uuid.String(), + "span_id": md.SpanID, + "trace_id": md.TraceID, + "request.model": *params.ModelId, + "response.model": *params.ModelId, + "vendor": "bedrock", + "ingest_source": "Go", + "duration": duration, + } + + if err != nil { + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "BedrockError", + Attributes: map[string]any{ + id_key: uuid.String(), + }, + }) + meta["error"] = true + } + + // go fishing in the request and response JSON strings to find values we want to + // record with our instrumentation + + var requestData, responseData map[string]any + var inputString string + + if params.Body != nil && json.Unmarshal(params.Body, &requestData) == nil { + if recordContentEnabled { + if s, ok := requestData["inputText"]; ok { + inputString, _ = s.(string) + } else if s, ok := requestData["prompt"]; ok { + inputString, _ = s.(string) + } else if ss, ok := requestData["texts"]; ok { + if slist, ok := ss.([]string); ok { + inputString = strings.Join(slist, ",") + } + } + } + + if cfg, ok := requestData["textGenerationConfig"]; ok { + if cfgMap, ok := cfg.(map[string]any); ok { + if t, ok := cfgMap["temperature"]; ok { + meta["request.temperature"] = t + } + if m, ok := cfgMap["maxTokenCount"]; ok { + meta["request.max_tokens"] = m + } + } + } else if t, ok := requestData["temperature"]; ok { + meta["request.temperature"] = t + } + if m, ok := requestData["max_tokens_to_sample"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["max_tokens"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["maxTokens"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["max_gen_len"]; ok { + meta["request.max_tokens"] = m + } + } + + var stopReason string + if output != nil && output.Body != nil { + if json.Unmarshal(output.Body, &responseData) == nil { + if recordContentEnabled && inputString == "" { + if s, ok := responseData["prompt"]; ok { + inputString, _ = s.(string) + } + } + if id, ok := responseData["id"]; ok { + meta["request_id"] = id + } + + if s, ok := responseData["stop_reason"]; ok { + stopReason, _ = s.(string) + } else if rs, ok := responseData["results"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if reason, ok := crv["completionReason"]; ok { + stopReason, _ = reason.(string) + break + } + } + } + } + if stopReason == "" { + if rs, ok := responseData["outputs"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if reason, ok := crv["stop_reason"]; ok { + stopReason, _ = reason.(string) + break + } + } + } + } + } + if stopReason == "" { + if rs, ok := responseData["generations"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if reason, ok := crv["finish_reason"]; ok { + stopReason, _ = reason.(string) + break + } + } + } + } + } + } + } + + /* ESC "llm.*" */ + var userCount int + if app.HasLLMTokenCountCallback() { + userCount, _ = app.InvokeLLMTokenCountCallback(*params.ModelId, inputString) + } + + if embedding { + if userCount > 0 { + meta["token_count"] = userCount + } + if inputString != "" { + meta["input"] = inputString + } + app.RecordCustomEvent("LlmEmbedding", meta) + } else { + if stopReason != "" { + meta["response.choices.finish_reason"] = stopReason + } + app.RecordCustomEvent("LlmChatCompletionSummary", meta) + delete(meta, "duration") + if userCount > 0 { + meta["token_count"] = userCount + } + if inputString != "" { + meta["content"] = inputString + } + /* + C "role" //role of creator + C "sequence" //0.. + C "completion_id" // id of S event that this is connected to + C "is_response" // if result of chat completion and not input message + */ + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + } + return output, nil +} + +/*** +We support: + Anthropic Claude + anthropic.claude-v2 + anthropic.claude-v2:1 + anthropic.claude-3-sonnet-... + anthropic.claude-3-haiku-... + anthropic.claude-instant-v1 + Amazon Titan + amazon.titan-text-express-v1 + amazon.titan-text-lite-v1 +E amazon.titan-embed-text-v1 + Meta Llama 2 + meta.llama2-13b-chat-v1 + meta.llama2-70b-chat-v1 + Cohere Command + cohere.command-text-v14 + cohere.command-light-text-v14 +E cohere.embed-english-v3 +E cohere.embed-multilingual-v3 + texts:[string] embeddings:[1024 floats] + input_type:s => id:s + truncate:s response_type:s + texts:[s] + AI21 Labs Jurassic + ai21.j2-mid-v1 + ai21.j2-ultra-v1 + +only text-based models +send LLM events as custom events ONLY when there is a transaction active +attrs limited to 4095 normally but LLM events are an exception to this. NO limits. +MAY limit other but MUST leave these unlimited: + LlmChatCompletionMessage event, attr content + LlmEmbedding event, attr input + +Events recorded: + LlmEmbedding (creation of an embedding) + id UUID we generate + request_id from response headers usually + span_id GUID assoc'd with activespan + trace_id current trace ID + input input to the embedding creation call + request.model model name e.g. gpt-3.5-turbo + response.model model name returned in response + response.organization org ID returned in response or headers + token_count value from LLMTokenCountCallback or omitted + vendor "bedrock" + ingest_source "Go" + duration total time taken for chat completiong in mS + error true if error occurred or omitted + llm. **custom** + response.headers. **response** + LlmChatCompletionSummary (high-level data about creation of chat completion including request, response, and call info) + id UUID we generate + request_id from response headers usually + span_id GUID assoc'd with active span + trace_id current trace ID + request.temperature how random/deterministic output shoudl be + request.max_tokens max #tokens that can be generated + request.model model name e.g. gpt-3.5-turbo + response.model model name returned in response + response.number_of_messages number of msgs comprising completiong + response.choices.finish_reason reason model stopped (e.g. "stop") + vendor "bedrock" + ingest_source "Go" + duration total time taken for chat completiong in mS + error true if error occurred or omitted + llm. **custom** + response.headers. **response** + + LlmChatCompletionMessage (each message sent/rec'd from chat completion call. + id UUID we generate OR - returned by LLM + request_id from response headers usually + span_id GUID assoc'd with active span + trace_id current trace ID + ??request.model model name e.g. gpt-3.5-turbo + response.model model name returned in response + vendor "bedrock" + ingest_source "Go" + content content of msg + role role of msg creator + sequence index (0..) w/each msg including prompt and responses + completion_id ID of LlmChatCompletionSummary event that event is connected to + is_response true if msg is result of completion, not input msg OR omitted + token_count value from LLMTokenCountCallback or omitted + llm. **custom** + +response.model = request.model if we don't get a response.model +custom attributes to LLM events have llm. prefix and this should be retained +llm.conversation_id + +**custom** +user may add custom attributes to txn but we MUST strip out all that don't start with +"llm." +we recommend adding llm.conversation_id since that has UI implications + +**response** +Capture response header values and add them as attributes to LLMEmbedding and +LLMChatCompletionSummary events as "response.headers." if present, +omit any that are not present. + +OpenAI: llmVersion, ratelimitLimitRequests, ratelimitResetTokens, ratelimitLimitTokens, +ratelimitRemainingTokens, ratelimitRemainingRequests, ratelimitLimitTokensUsageBased, +ratelimitResetTokensUsageBased, ratelimitRemainingTokensUsageBased +Bedrock: ?? + +MUST add "llm: True" as agent attr to txn that contain instrumented LLM functions. +MUST be sent to txn events attr dest (DST_TRANSACTION_EVENTS). OMIT if there are no +LLM events in the txn. + +MUST create span for each LLM embedding and chat completion call. MUST only be created +if there is a txn. MUST name them "Llm/completion|embedding/Bedrock/invoke_model|create|etc" + +Errors -> notice_error + http.statusCode, error.code (exception), error.param (exception), completion_id, embedding_id + STILL create LlmChatCompletionSummary and LlmEmbedding events in error context + with all attrs that can be captured, plus set error=true. + + +Supportability Metric +X Supportability/Go/Bedrock/ +X Supportability/Go/ML/Streaming/Disabled if !ai_monitoring.streaming.enabled + +Config + ai_monitoring.enabled + ai_monitoring.streaming.enabled + ai_monitoring.record_content.enabled + If true, suppress + LlmChatCompletionMessage.content + LlmEmbedding.imput + LlmTool.input + LlmTool.output + LlmVectorSearch.request.query + LlmVectorSearchResult.page_content + +Feedback + tracked on trace ID + API: getCurrentTraceID() or something to get the ID of the current active trace + OR use pre-existing getLinkingMetadata to pull from map of returned data values + **this means DT must be enabled to use feedback + + API: RecordLLMFeedbackEvent() -> custom event which includes end user feedback data + API: LLMTokenCountCallback() to get the token count + pass model name (string), content of message/prompt (string) + receive integer count value -> token_count attr in LlmChatCompletionMessage or + LlmEmbedding event UNLESS value <= 0, in which case ignore it. + API: function to register the callback function, allowed to replace with a new one + at any time. + +New models mistral.mistral-7b-instruct-v0:2, mistral.mixtral-8x7b-instruct-v0:1 support? + -> body looks like { + 'prompt': , + 'max_tokens': + 'temperature': + } + +openai response headers include these but not always since they aren't always present + ratelimitLimitTokensUsageBased + ratelimitResetTokensUsageBased + ratelimitRemainingTokensUsageBased +***/ diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 1132dd7b6..d1ec4994c 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -6,8 +6,6 @@ package newrelic import ( "os" "time" - - "golang.org/x/exp/slices" ) // Application represents your application. All methods on Application are nil @@ -17,6 +15,7 @@ type Application struct { app *app } +/* // IsAIMonitoringEnabled returns true if monitoring for the specified mode of the named integration is enabled. func (app *Application) IsAIMonitoringEnabled(integration string, streaming bool) bool { if app == nil || app.app == nil || app.app.run == nil { @@ -34,6 +33,7 @@ func (app *Application) IsAIMonitoringEnabled(integration string, streaming bool } return true } +*/ // StartTransaction begins a Transaction with the given name. func (app *Application) StartTransaction(name string, opts ...TraceOption) *Transaction { @@ -68,7 +68,7 @@ func (app *Application) RecordCustomEvent(eventType string, params map[string]in } } -// RecordLlmFeedbackEvent adds a LLM Feedback event. +// RecordLLMFeedbackEvent adds a LLM Feedback event. // An error is logged if eventType or params is invalid. func (app *Application) RecordLLMFeedbackEvent(trace_id string, rating any, category string, message string, metadata map[string]interface{}) { if app == nil || app.app == nil { @@ -94,6 +94,43 @@ func (app *Application) RecordLLMFeedbackEvent(trace_id string, rating any, cate } } +// InvokeLLMTokenCountCallback invokes the function registered previously as the callback +// function to compute token counts to report for LLM transactions, if any. If there is +// no current callback funtion, this simply returns a zero count and a false boolean value. +// Otherwise, it returns the value returned by the callback and a true value. +// +// Although there's no harm in calling this method to invoke your callback function, +// there is no need (or particular benefit) of doing so. This is called as needed internally +// by the AI Monitoring integrations. +func (app *Application) InvokeLLMTokenCountCallback(model, content string) (int, bool) { + if app == nil || app.app == nil || app.app.llmTokenCountCallback == nil { + return 0, false + } + return app.app.llmTokenCountCallback(model, content), true +} + +// HasLLMTokenCountCallback returns true if there is currently a registered callback function +// or false otherwise. +func (app *Application) HasLLMTokenCountCallback() bool { + return app != nil && app.app != nil && app.app.llmTokenCountCallback != nil +} + +// SetLLMTokenCountCallback registers a callback function which will be used by the AI Montoring +// integration packages in cases where they are unable to determine the token counts directly. +// You may call SetLLMTokenCountCallback multiple times. If you do, each call registers a new +// callback function which replaces the previous one. Calling SetLLMTokenCountCallback(nil) removes +// the callback function entirely. +// +// Your callback function will be passed two string parameters: model name and content. It must +// return a single integer value which is the number of tokens to report. If it returns a value less +// than or equal to zero, no token count report will be made (which includes the case where your +// callback function was unable to determine the token count). +func (app *Application) SetLLMTokenCountCallback(callbackFunction func(string, string) int) { + if app != nil && app.app != nil { + app.app.llmTokenCountCallback = callbackFunction + } +} + // RecordCustomMetric records a custom metric. The metric name you // provide will be prefixed by "Custom/". Custom metrics are not // currently supported in serverless mode. diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index 6382e6c8d..d0461ca1c 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -461,17 +461,6 @@ type Config struct { } // Security is used to post security configuration on UI. Security interface{} `json:"Security,omitempty"` - - // AIMonitoring controls instrumentation and reporting of AI model invocations and feedback. - AIMonitoring struct { - // Enabled controls whether any AI instrumentation is to be performed. - Enabled bool - // Streaming enables streaming mode instrumentation. - Streaming bool - // IncludeOnly is the list of specific integrations to enable. If empty, all integrations which - // were compiled into the application will be active whenever Enabled is true. - IncludeOnly []string - } } // CodeLevelMetricsScope is a bit-encoded value. Each such value describes @@ -704,11 +693,6 @@ func defaultConfig() Config { // Module Dependency Metrics c.ModuleDependencyMetrics.Enabled = true c.ModuleDependencyMetrics.RedactIgnoredPrefixes = true - - // AI Monitoring - c.AIMonitoring.Enabled = false - c.AIMonitoring.Streaming = false - c.AIMonitoring.IncludeOnly = nil return c } diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 4b1d1e8ab..875afa494 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -60,37 +60,10 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { return func(cfg *Config) { cfg.DistributedTracer.ReservoirLimit = limit } } -// ConfigAIMonitoringEnabled turns on or off the collection of AI Monitoring metrics. -func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { - return func(cfg *Config) { - cfg.AIMonitoring.Enabled = enabled - } -} - -// ConfigAIMonitoringStreaming turns on or off the collection of AI Monitoring streaming mode metrics. -func ConfigAIMonitoringStreaming(enabled bool) ConfigOption { - return func(cfg *Config) { - cfg.AIMonitoring.Streaming = enabled - } -} - -// ConfigAIMonitoringIncludeOnly sets the list of specific AI integrations to enable, if not the entire list. -// A nil slice of integration names means not to restrict which ones are enabled. -func ConfigAIMonitoringIncludeOnly(integrations []string) ConfigOption { - return func(cfg *Config) { - cfg.AIMonitoring.IncludeOnly = integrations - } -} - -const ( - AIMonitoringBedrock = "nrawsbedrock" -) - -// ConfigAIMonitoringIncludeOnlyString is like ConfigAIMonitoringIncludeOnly except that it takes a single -// comma-separated list of names. -func ConfigAIMonitoringIncludeOnlyString(integrations string) ConfigOption { +// ConfigAIMonitoringStreamingEnabled turns on or off the collection of AI Monitoring streaming mode metrics. +func ConfigAIMonitoringStreamingEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - cfg.AIMonitoring.IncludeOnly = strings.Split(integrations, ",") + cfg.AIMonitoring.Streaming.Enabled = enabled } } @@ -270,6 +243,9 @@ func ConfigAppLogDecoratingEnabled(enabled bool) ConfigOption { } } +// ConfigAIMonitoringEnabled enables or disables the collection of AI Monitoring event data. +// Note that if HighSecurity is enabled, AI Monitoring will automatically be disabled. In this +// case you MUST enable HighSecurity BEFORE calling ConfigAIMonitoringEnabled. func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { return func(cfg *Config) { if enabled && !cfg.HighSecurity { @@ -280,6 +256,8 @@ func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { } } +// ConfigAIMonitoringRecordContentEnabled enables or disables the collection of the prompt and +// response data along with other AI event metadata. func ConfigAIMonitoringRecordContentEnabled(enabled bool) ConfigOption { return func(cfg *Config) { cfg.AIMonitoring.RecordContent.Enabled = enabled @@ -414,8 +392,8 @@ func ConfigDebugLogger(w io.Writer) ConfigOption { // NEW_RELIC_APPLICATION_LOGGING_LOCAL_DECORATING_ENABLED sets ApplicationLogging.LocalDecoration.Enabled. Set to true to enable local log decoration. // NEW_RELIC_APPLICATION_LOGGING_FORWARDING_MAX_SAMPLES_STORED sets ApplicationLogging.LogForwarding.Limit. Set to 0 to prevent captured logs from being forwarded. // NEW_RELIC_AI_MONITORING_ENABLED sets AIMonitoring.Enabled -// NEW_RELIC_AI_MONITORING_STREAMING sets AIMonitoring.Streaming -// NEW_RELIC_AI_MONITORING_INCLUDE_ONLY sets AIMonitoring.IncludeOnly +// NEW_RELIC_AI_MONITORING_STREAMING_ENABLED sets AIMonitoring.Streaming.Enabled +// NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED sets AIMonitoring.RecordContent.Enabled // // This function is strict and will assign Config.Error if any of the // environment variables cannot be parsed. @@ -480,9 +458,10 @@ func configFromEnvironment(getenv func(string) string) ConfigOption { assignBool(&cfg.ApplicationLogging.Metrics.Enabled, "NEW_RELIC_APPLICATION_LOGGING_METRICS_ENABLED") assignBool(&cfg.ApplicationLogging.LocalDecorating.Enabled, "NEW_RELIC_APPLICATION_LOGGING_LOCAL_DECORATING_ENABLED") assignBool(&cfg.AIMonitoring.Enabled, "NEW_RELIC_AI_MONITORING_ENABLED") - assignBool(&cfg.AIMonitoring.Streaming, "NEW_RELIC_AI_MONITORING_STREAMING") - if env := getenv("NEW_RELIC_AI_MONITORING_INCLUDE_ONLY"); env != "" { - cfg.AIMonitoring.IncludeOnly = strings.Split(env, ",") + assignBool(&cfg.AIMonitoring.Streaming.Enabled, "NEW_RELIC_AI_MONITORING_STREAMING_ENABLED") + assignBool(&cfg.AIMonitoring.RecordContent.Enabled, "NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED") + if cfg.HighSecurity { + cfg.AIMonitoring.Enabled = false } if env := getenv("NEW_RELIC_LABELS"); env != "" { diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 3b0d55f39..00e510347 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -63,6 +63,9 @@ type app struct { // (disconnect, license exception, shutdown). err error + // registered callback functions + llmTokenCountCallback func(string, string) int + serverless *serverlessHarvest } From f7bdaf867836a1e359589dc9b27ccbd4e3229109 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Thu, 21 Mar 2024 04:26:31 -0700 Subject: [PATCH 09/38] fixed nrawssdk-v2 --- v3/integrations/nrawssdk-v2/LICENSE.txt | 206 +++++++ v3/integrations/nrawssdk-v2/README.md | 10 + v3/integrations/nrawssdk-v2/example/main.go | 67 +++ v3/integrations/nrawssdk-v2/go.mod | 18 + v3/integrations/nrawssdk-v2/nrawssdk.go | 144 +++++ v3/integrations/nrawssdk-v2/nrawssdk_test.go | 580 +++++++++++++++++++ 6 files changed, 1025 insertions(+) create mode 100644 v3/integrations/nrawssdk-v2/LICENSE.txt create mode 100644 v3/integrations/nrawssdk-v2/README.md create mode 100644 v3/integrations/nrawssdk-v2/example/main.go create mode 100644 v3/integrations/nrawssdk-v2/go.mod create mode 100644 v3/integrations/nrawssdk-v2/nrawssdk.go create mode 100644 v3/integrations/nrawssdk-v2/nrawssdk_test.go diff --git a/v3/integrations/nrawssdk-v2/LICENSE.txt b/v3/integrations/nrawssdk-v2/LICENSE.txt new file mode 100644 index 000000000..cee548c2d --- /dev/null +++ b/v3/integrations/nrawssdk-v2/LICENSE.txt @@ -0,0 +1,206 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Versions 3.8.0 and above for this project are licensed under Apache 2.0. For +prior versions of this project, please see the LICENCE.txt file in the root +directory of that version for more information. diff --git a/v3/integrations/nrawssdk-v2/README.md b/v3/integrations/nrawssdk-v2/README.md new file mode 100644 index 000000000..fe4869102 --- /dev/null +++ b/v3/integrations/nrawssdk-v2/README.md @@ -0,0 +1,10 @@ +# v3/integrations/nrawssdk-v2 [![GoDoc](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2?status.svg)](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2) + +Package `nrawssdk` instruments https://github.com/aws/aws-sdk-go-v2 requests. + +```go +import "github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2). diff --git a/v3/integrations/nrawssdk-v2/example/main.go b/v3/integrations/nrawssdk-v2/example/main.go new file mode 100644 index 000000000..3f740302e --- /dev/null +++ b/v3/integrations/nrawssdk-v2/example/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + nraws "github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2" + "github.com/newrelic/go-agent/v3/newrelic" +) + +func main() { + + // Create a New Relic application. This will look for your license key in an + // environment variable called NEW_RELIC_LICENSE_KEY. This example turns on + // Distributed Tracing, but that's not required. + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("Example App"), + newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigDistributedTracerEnabled(true), + ) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // For demo purposes only. Don't use the app.WaitForConnection call in + // production unless this is a very short-lived process and the caller + // doesn't block or exit if there's an error. + app.WaitForConnection(5 * time.Second) + + // Start recording a New Relic transaction + txn := app.StartTransaction("My sample transaction") + + ctx := context.Background() + awsConfig, err := config.LoadDefaultConfig(ctx) + if err != nil { + log.Fatal(err) + } + + // Instrument all new AWS clients with New Relic + nraws.AppendMiddlewares(&awsConfig.APIOptions, nil) + + s3Client := s3.NewFromConfig(awsConfig) + output, err := s3Client.ListBuckets(ctx, nil) + if err != nil { + log.Fatal(err) + } + + for _, object := range output.Buckets { + log.Printf("Bucket name is %s\n", aws.ToString(object.Name)) + } + + // End the New Relic transaction + txn.End() + + // Force all the harvests and shutdown. Like the app.WaitForConnection call + // above, this is for the purposes of this demo only and can be safely + // removed for longer-running processes. + app.Shutdown(10 * time.Second) +} diff --git a/v3/integrations/nrawssdk-v2/go.mod b/v3/integrations/nrawssdk-v2/go.mod new file mode 100644 index 000000000..1597772e1 --- /dev/null +++ b/v3/integrations/nrawssdk-v2/go.mod @@ -0,0 +1,18 @@ +module github.com/newrelic/go-agent/v3/integrations/nrawssdk-v2 + +// As of May 2021, the aws-sdk-go-v2 go.mod file uses 1.15: +// https://github.com/aws/aws-sdk-go-v2/blob/master/go.mod +go 1.19 + +require ( + github.com/aws/aws-sdk-go-v2 v1.16.15 + github.com/aws/aws-sdk-go-v2/config v1.17.6 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.17.0 + github.com/aws/aws-sdk-go-v2/service/lambda v1.24.5 + github.com/aws/aws-sdk-go-v2/service/s3 v1.27.10 + github.com/aws/smithy-go v1.13.3 + github.com/newrelic/go-agent/v3 v3.30.0 +) + + +replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrawssdk-v2/nrawssdk.go b/v3/integrations/nrawssdk-v2/nrawssdk.go new file mode 100644 index 000000000..8ff3a8ab6 --- /dev/null +++ b/v3/integrations/nrawssdk-v2/nrawssdk.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nrawssdk instruments requests made by the +// https://github.com/aws/aws-sdk-go-v2 library. +// +// For most operations, external segments and spans are automatically created +// for display in the New Relic UI on the External services section. For +// DynamoDB operations, datastore segements and spans are created and will be +// displayed on the Databases page. All operations will also be displayed on +// transaction traces and distributed traces. +// +// To use this integration, simply apply the AppendMiddlewares fuction to the apiOptions in +// your AWS Config object before performing any AWS operations. See +// example/main.go for a working sample. +package nrawssdk + +import ( + "context" + "strconv" + + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithymiddle "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/newrelic/go-agent/v3/internal/integrationsupport" + "github.com/newrelic/go-agent/v3/newrelic" +) + +type nrMiddleware struct { + txn *newrelic.Transaction +} + +type endable interface{ End() } + +// See https://aws.github.io/aws-sdk-go-v2/docs/middleware/ for a description of +// AWS SDK V2 middleware. +func (m nrMiddleware) deserializeMiddleware(stack *smithymiddle.Stack) error { + return stack.Deserialize.Add(smithymiddle.DeserializeMiddlewareFunc("NRDeserializeMiddleware", func( + ctx context.Context, in smithymiddle.DeserializeInput, next smithymiddle.DeserializeHandler) ( + out smithymiddle.DeserializeOutput, metadata smithymiddle.Metadata, err error) { + + txn := m.txn + if txn == nil { + txn = newrelic.FromContext(ctx) + } + + smithyRequest := in.Request.(*smithyhttp.Request) + + // The actual http.Request is inside the smithyhttp.Request + httpRequest := smithyRequest.Request + serviceName := awsmiddle.GetServiceID(ctx) + operation := awsmiddle.GetOperationName(ctx) + region := awsmiddle.GetRegion(ctx) + + var segment endable + // Service name capitalization is different for v1 and v2. + if serviceName == "dynamodb" || serviceName == "DynamoDB" { + segment = &newrelic.DatastoreSegment{ + Product: newrelic.DatastoreDynamoDB, + Collection: "", // AWS SDK V2 doesn't expose TableName + Operation: operation, + ParameterizedQuery: "", + QueryParameters: nil, + Host: httpRequest.URL.Host, + PortPathOrID: httpRequest.URL.Port(), + DatabaseName: "", + StartTime: txn.StartSegmentNow(), + } + } else { + segment = newrelic.StartExternalSegment(txn, httpRequest) + } + + // Hand off execution to other middlewares and then perform the request + out, metadata, err = next.HandleDeserialize(ctx, in) + + // After the request + response, ok := out.RawResponse.(*smithyhttp.Response) + + if ok { + // Set additional span attributes + integrationsupport.AddAgentSpanAttribute(txn, + newrelic.AttributeResponseCode, strconv.Itoa(response.StatusCode)) + integrationsupport.AddAgentSpanAttribute(txn, + newrelic.SpanAttributeAWSOperation, operation) + integrationsupport.AddAgentSpanAttribute(txn, + newrelic.SpanAttributeAWSRegion, region) + requestID, ok := awsmiddle.GetRequestIDMetadata(metadata) + if ok { + integrationsupport.AddAgentSpanAttribute(txn, + newrelic.AttributeAWSRequestID, requestID) + } + } + segment.End() + return out, metadata, err + }), + smithymiddle.Before) +} + +// AppendMiddlewares inserts New Relic middleware in the given `apiOptions` for +// the AWS SDK V2 for Go. It must be called only once per AWS configuration. +// +// If `txn` is provided as nil, the New Relic transaction will be retrieved +// using `newrelic.FromContext`. +// +// Additional attributes will be added to transaction trace segments and span +// events: aws.region, aws.requestId, and aws.operation. In addition, +// http.statusCode will be added to span events. +// +// To see segments and spans for all AWS invocations, call AppendMiddlewares +// with the AWS Config `apiOptions` and provide nil for `txn`. For example: +// +// awsConfig, err := config.LoadDefaultConfig(ctx) +// if err != nil { +// log.Fatal(err) +// } +// nraws.AppendMiddlewares(&awsConfig.APIOptions, nil) +// +// If do not want the transaction to be retrived from the context, you can +// explicitly set `txn`. For example: +// +// awsConfig, err := config.LoadDefaultConfig(ctx) +// if err != nil { +// log.Fatal(err) +// } +// +// ... +// +// txn := loadNewRelicTransaction() +// nraws.AppendMiddlewares(&awsConfig.APIOptions, txn) +func AppendMiddlewares(apiOptions *[]func(*smithymiddle.Stack) error, txn *newrelic.Transaction) { + m := nrMiddleware{txn: txn} + *apiOptions = append(*apiOptions, m.deserializeMiddleware) +} diff --git a/v3/integrations/nrawssdk-v2/nrawssdk_test.go b/v3/integrations/nrawssdk-v2/nrawssdk_test.go new file mode 100644 index 000000000..79b1f389a --- /dev/null +++ b/v3/integrations/nrawssdk-v2/nrawssdk_test.go @@ -0,0 +1,580 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package nrawssdk + +import ( + "bytes" + "context" + "errors" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/lambda" + "github.com/aws/aws-sdk-go-v2/service/lambda/types" + "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/internal/integrationsupport" + "github.com/newrelic/go-agent/v3/newrelic" +) + +func testApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, integrationsupport.DTEnabledCfgFn, newrelic.ConfigCodeLevelMetricsEnabled(false)) +} + +type fakeTransport struct{} + +func (t fakeTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +type fakeCredsWithoutContext struct{} + +func (c fakeCredsWithoutContext) Retrieve() (aws.Credentials, error) { + return aws.Credentials{}, nil +} + +type fakeCredsWithContext struct{} + +func (c fakeCredsWithContext) Retrieve(ctx context.Context) (aws.Credentials, error) { + return aws.Credentials{}, nil +} + +var fakeCreds = func() interface{} { + var c interface{} = fakeCredsWithoutContext{} + if _, ok := c.(aws.CredentialsProvider); ok { + return c + } + return fakeCredsWithContext{} +}() + +func newConfig(ctx context.Context, txn *newrelic.Transaction) aws.Config { + cfg, _ := config.LoadDefaultConfig(ctx) + cfg.Credentials = fakeCreds.(aws.CredentialsProvider) + cfg.Region = awsRegion + cfg.HTTPClient = &http.Client{ + Transport: &fakeTransport{}, + } + + AppendMiddlewares(&cfg.APIOptions, txn) + + return cfg +} + +const ( + requestID = "testing request id" + txnName = "aws-txn" + awsRegion = "us-west-2" +) + +var ( + genericSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/" + txnName, + "transaction.name": "OtherTransaction/Go/" + txnName, + "sampled": true, + "category": "generic", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "nr.entryPoint": true, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + } + externalSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": awsRegion, + "aws.requestId": requestID, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + "http.statusCode": "200", + }, + } + externalSpanNoRequestID = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": awsRegion, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + "http.statusCode": "200", + }, + } + datastoreSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "Datastore/operation/DynamoDB/DescribeTable", + "sampled": true, + "category": "datastore", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "DynamoDB", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "DescribeTable", + "aws.region": awsRegion, + "aws.requestId": requestID, + "db.statement": "'DescribeTable' on 'unknown' using 'DynamoDB'", + "peer.address": "dynamodb.us-west-2.amazonaws.com:unknown", + "peer.hostname": "dynamodb.us-west-2.amazonaws.com", + "http.statusCode": "200", + }, + } + txnMetrics = []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + } + externalMetrics = append(txnMetrics, []internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, + }...) + datastoreMetrics = append(txnMetrics, []internal.WantMetric{ + {Name: "Datastore/DynamoDB/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/DynamoDB/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/instance/DynamoDB/dynamodb.us-west-2.amazonaws.com/unknown", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "OtherTransaction/Go/aws-txn", Forced: false, Data: nil}, + }...) +) + +type testTableEntry struct { + Name string + + BuildContext func(txn *newrelic.Transaction) context.Context + BuildConfig func(ctx context.Context, txn *newrelic.Transaction) aws.Config +} + +func runTestTable(t *testing.T, table []*testTableEntry, executeEntry func(t *testing.T, entry *testTableEntry)) { + for _, entry := range table { + entry := entry // Pin range variable + + t.Run(entry.Name, func(t *testing.T) { + executeEntry(t, entry) + }) + } +} + +func TestInstrumentRequestExternal(t *testing.T) { + runTestTable(t, + []*testTableEntry{ + { + Name: "with manually set transaction", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return context.Background() + }, + BuildConfig: newConfig, + }, + { + Name: "with transaction set in context", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return newrelic.NewContext(context.Background(), txn) + }, + BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { + return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context + }, + }, + }, + + func(t *testing.T, entry *testTableEntry) { + app := testApp() + txn := app.StartTransaction(txnName) + ctx := entry.BuildContext(txn) + + client := lambda.NewFromConfig(entry.BuildConfig(ctx, txn)) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: types.InvocationTypeRequestResponse, + LogType: types.LogTypeTail, + Payload: []byte("{}"), + } + + _, err := client.Invoke(ctx, input) + if err != nil { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + externalSpan, genericSpan}) + }, + ) +} + +func TestInstrumentRequestDatastore(t *testing.T) { + runTestTable(t, + []*testTableEntry{ + { + Name: "with manually set transaction", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return context.Background() + }, + BuildConfig: newConfig, + }, + { + Name: "with transaction set in context", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return newrelic.NewContext(context.Background(), txn) + }, + BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { + return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context + }, + }, + }, + + func(t *testing.T, entry *testTableEntry) { + app := testApp() + txn := app.StartTransaction(txnName) + ctx := entry.BuildContext(txn) + + client := dynamodb.NewFromConfig(entry.BuildConfig(ctx, txn)) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + _, err := client.DescribeTable(ctx, input) + if err != nil { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, datastoreMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + datastoreSpan, genericSpan}) + }, + ) +} + +type firstFailingTransport struct { + failing bool +} + +func (t *firstFailingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if t.failing { + t.failing = false + return nil, errors.New("Oops this failed") + } + + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +func TestRetrySend(t *testing.T) { + runTestTable(t, + []*testTableEntry{ + { + Name: "with manually set transaction", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return context.Background() + }, + BuildConfig: newConfig, + }, + { + Name: "with transaction set in context", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return newrelic.NewContext(context.Background(), txn) + }, + BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { + return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context + }, + }, + }, + + func(t *testing.T, entry *testTableEntry) { + app := testApp() + txn := app.StartTransaction(txnName) + ctx := entry.BuildContext(txn) + + cfg := entry.BuildConfig(ctx, txn) + + cfg.HTTPClient = &http.Client{ + Transport: &firstFailingTransport{failing: true}, + } + + customRetry := retry.NewStandard(func(o *retry.StandardOptions) { + o.MaxAttempts = 2 + }) + client := lambda.NewFromConfig(cfg, func(o *lambda.Options) { + o.Retryer = customRetry + }) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: types.InvocationTypeRequestResponse, + LogType: types.LogTypeTail, + Payload: []byte("{}"), + } + + _, err := client.Invoke(ctx, input) + if err != nil { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": awsRegion, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + "http.statusCode": "0", + }, + }, { + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": awsRegion, + "aws.requestId": requestID, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + "http.statusCode": "200", + }, + }, { + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/" + txnName, + "transaction.name": "OtherTransaction/Go/" + txnName, + "sampled": true, + "category": "generic", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "nr.entryPoint": true, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }}) + }, + ) +} + +func TestRequestSentTwice(t *testing.T) { + runTestTable(t, + []*testTableEntry{ + { + Name: "with manually set transaction", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return context.Background() + }, + BuildConfig: newConfig, + }, + { + Name: "with transaction set in context", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return newrelic.NewContext(context.Background(), txn) + }, + BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { + return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context + }, + }, + }, + + func(t *testing.T, entry *testTableEntry) { + app := testApp() + txn := app.StartTransaction(txnName) + ctx := entry.BuildContext(txn) + + client := lambda.NewFromConfig(entry.BuildConfig(ctx, txn)) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: types.InvocationTypeRequestResponse, + LogType: types.LogTypeTail, + Payload: []byte("{}"), + } + + _, firstErr := client.Invoke(ctx, input) + if firstErr != nil { + t.Error(firstErr) + } + + _, secondErr := client.Invoke(ctx, input) + if secondErr != nil { + t.Error(secondErr) + } + + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/allOther", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: []float64{2}}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + externalSpan, externalSpan, genericSpan}) + }, + ) +} + +type noRequestIDTransport struct{} + +func (t *noRequestIDTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil +} + +func TestNoRequestIDFound(t *testing.T) { + runTestTable(t, + []*testTableEntry{ + { + Name: "with manually set transaction", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return context.Background() + }, + BuildConfig: newConfig, + }, + { + Name: "with transaction set in context", + + BuildContext: func(txn *newrelic.Transaction) context.Context { + return newrelic.NewContext(context.Background(), txn) + }, + BuildConfig: func(ctx context.Context, txn *newrelic.Transaction) aws.Config { + return newConfig(ctx, nil) // Set txn to nil to ensure transaction is retrieved from the context + }, + }, + }, + + func(t *testing.T, entry *testTableEntry) { + app := testApp() + txn := app.StartTransaction(txnName) + ctx := entry.BuildContext(txn) + + cfg := entry.BuildConfig(ctx, txn) + cfg.HTTPClient = &http.Client{ + Transport: &noRequestIDTransport{}, + } + client := lambda.NewFromConfig(cfg) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: types.InvocationTypeRequestResponse, + LogType: types.LogTypeTail, + Payload: []byte("{}"), + } + _, err := client.Invoke(ctx, input) + if err != nil { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + externalSpanNoRequestID, genericSpan}) + }, + ) +} From 0c2f107a1f2e000ca35eebaf74630420a3ae1d45 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Thu, 21 Mar 2024 04:33:34 -0700 Subject: [PATCH 10/38] fixed high security vs. ai monitoring --- v3/newrelic/config_options.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 875afa494..082b46d83 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -244,15 +244,9 @@ func ConfigAppLogDecoratingEnabled(enabled bool) ConfigOption { } // ConfigAIMonitoringEnabled enables or disables the collection of AI Monitoring event data. -// Note that if HighSecurity is enabled, AI Monitoring will automatically be disabled. In this -// case you MUST enable HighSecurity BEFORE calling ConfigAIMonitoringEnabled. func ConfigAIMonitoringEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - if enabled && !cfg.HighSecurity { - cfg.AIMonitoring.Enabled = true - } else { - cfg.AIMonitoring.Enabled = false - } + cfg.AIMonitoring.Enabled = enabled } } @@ -460,9 +454,6 @@ func configFromEnvironment(getenv func(string) string) ConfigOption { assignBool(&cfg.AIMonitoring.Enabled, "NEW_RELIC_AI_MONITORING_ENABLED") assignBool(&cfg.AIMonitoring.Streaming.Enabled, "NEW_RELIC_AI_MONITORING_STREAMING_ENABLED") assignBool(&cfg.AIMonitoring.RecordContent.Enabled, "NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED") - if cfg.HighSecurity { - cfg.AIMonitoring.Enabled = false - } if env := getenv("NEW_RELIC_LABELS"); env != "" { if labels := getLabels(getenv("NEW_RELIC_LABELS")); len(labels) > 0 { From 75d681e107075178e8d0d04f1c3de7014f5cc1b0 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Thu, 21 Mar 2024 12:25:35 -0700 Subject: [PATCH 11/38] tweaks after testing --- v3/integrations/nrawsbedrock/example/main.go | 11 +- v3/integrations/nrawsbedrock/nrawsbedrock.go | 130 ++++++++++++++----- 2 files changed, 104 insertions(+), 37 deletions(-) diff --git a/v3/integrations/nrawsbedrock/example/main.go b/v3/integrations/nrawsbedrock/example/main.go index 813ec3568..12b6d5e4b 100644 --- a/v3/integrations/nrawsbedrock/example/main.go +++ b/v3/integrations/nrawsbedrock/example/main.go @@ -16,7 +16,7 @@ import ( "github.com/newrelic/go-agent/v3/newrelic" ) -const region = "us-east-2" +const region = "us-east-1" func main() { sdkConfig, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region)) @@ -32,7 +32,7 @@ func main() { fmt.Println("no models found") } for _, modelSummary := range result.ModelSummaries { - fmt.Printf("Name: %30s | Provider: %20s | ID: %s\n", *modelSummary.ModelName, *modelSummary.ProviderName, *modelSummary.ModelId) + fmt.Printf("Name: %-30s | Provider: %-20s | ID: %s\n", *modelSummary.ModelName, *modelSummary.ProviderName, *modelSummary.ModelId) } // Create a New Relic application. This will look for your license key in an @@ -58,17 +58,22 @@ func main() { // Start recording a New Relic transaction txn := app.StartTransaction("My sample transaction") + contentType := "application/json" model := "amazon.titan-text-lite-v1" //model := "amazon.titan-embed-g1-text-02" //model := "amazon.titan-text-express-v1" brc := bedrockruntime.NewFromConfig(sdkConfig) //output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ output, err := nrawsbedrock.InvokeModel(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + ContentType: &contentType, + Accept: &contentType, Body: []byte(`{ "inputText": "What is your quest?", "textGenerationConfig": { "temperature": 0.5, - "maxTokenCount": 100 + "maxTokenCount": 100, + "stopSequences": [], + "topP": 1 } }`), ModelId: &model, diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index c1d80a5e1..7ddd5e2b6 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -59,7 +59,7 @@ func init() { } } } - internal.TrackUsage("Go", "ML", "Bedrock", "unknown") + internal.TrackUsage("Go", "ML", "Bedrock", "0.0.0") } // @@ -159,15 +159,22 @@ func CompleteStreamResponse() {} // If the transaction is unable to be created or used, the Bedrock call will be made anyway, without instrumentation. // func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) { - var txn *newrelic.Transaction // the transaction to record in, or nil if we aren't instrumenting this time + return InvokeModelWithAttributes(app, brc, ctx, params, nil, optFns...) +} - // spanID := txn.GetTraceMetadata().SpanID - // traceID := txn.GetTraceMetadata().TraceID - // transactionID := traceID[:16] - // uuid := uuid.New() +// +// InvokeModelWithAttributes is identical to InvokeModel except for the addition of the attrs parameter, which is a +// map of strings to values of any type. This map holds any custom attributes you wish to add to the reported metrics +// relating to this model invocation. +// +// Each key in the attrs map must begin with "llm."; if any of them do not, "llm." is automatically prepended to +// the attribute key before the metrics are sent out. +// +// We recommend including at least "llm.conversation_id" in your attributes. +// +func InvokeModelWithAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) { + var txn *newrelic.Transaction // the transaction to record in, or nil if we aren't instrumenting this time - // params: .Body []byte .ModelId *string .Accept *string ("application/json") .ContentType *string ("application/json") - // output: .Body []byte .ContentType *string .ResultMetadata middleware.Metadata (.Get(key any)->any|nil, .Has(key any)->bool, .Set(key, value any) aiEnabled, recordContentEnabled := isEnabled(app, false) if aiEnabled { txn = newrelic.FromContext(ctx) @@ -229,7 +236,7 @@ func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx cont // record with our instrumentation var requestData, responseData map[string]any - var inputString string + var inputString, outputString string if params.Body != nil && json.Unmarshal(params.Body, &requestData) == nil { if recordContentEnabled { @@ -281,47 +288,85 @@ func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx cont if s, ok := responseData["stop_reason"]; ok { stopReason, _ = s.(string) - } else if rs, ok := responseData["results"]; ok { + } + + if out, ok := responseData["completion"]; ok { + outputString, _ = out.(string) + } + + // TODO only collecting last entry of these result sets + if rs, ok := responseData["results"]; ok { if crs, ok := rs.([]map[string]any); ok { for _, crv := range crs { if reason, ok := crv["completionReason"]; ok { stopReason, _ = reason.(string) - break + } + if out, ok := crv["outputText"]; ok { + outputString, _ = out.(string) } } } } - if stopReason == "" { - if rs, ok := responseData["outputs"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { - if reason, ok := crv["stop_reason"]; ok { - stopReason, _ = reason.(string) - break + if rs, ok := responseData["completions"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if cdata, ok := crv["data"]; ok { + if cdatamap, ok := cdata.(map[string]string); ok { + if out, ok := cdatamap["text"]; ok { + outputString = out + } } } } } } - if stopReason == "" { - if rs, ok := responseData["generations"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { - if reason, ok := crv["finish_reason"]; ok { - stopReason, _ = reason.(string) - break - } + if rs, ok := responseData["outputs"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if reason, ok := crv["stop_reason"]; ok { + stopReason, _ = reason.(string) + break + } + if out, ok := crv["text"]; ok { + outputString, _ = out.(string) } } } } + if rs, ok := responseData["generations"]; ok { + if crs, ok := rs.([]map[string]any); ok { + for _, crv := range crs { + if reason, ok := crv["finish_reason"]; ok { + stopReason, _ = reason.(string) + } + if out, ok := crv["text"]; ok { + outputString, _ = out.(string) + } + } + } + } + if outputString == "" { + if out, ok := responseData["generation"]; ok { + outputString, _ = out.(string) + } + } } } - /* ESC "llm.*" */ - var userCount int + if attrs != nil { + for k, v := range attrs { + if strings.HasPrefix(k, "llm.") { + meta[k] = v + } else { + meta["llm."+k] = v + } + } + } + + var userCount, outputCount int if app.HasLLMTokenCountCallback() { userCount, _ = app.InvokeLLMTokenCountCallback(*params.ModelId, inputString) + outputCount, _ = app.InvokeLLMTokenCountCallback(*params.ModelId, outputString) } if embedding { @@ -336,6 +381,7 @@ func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx cont if stopReason != "" { meta["response.choices.finish_reason"] = stopReason } + meta["response.number_of_messages"] = 2 app.RecordCustomEvent("LlmChatCompletionSummary", meta) delete(meta, "duration") if userCount > 0 { @@ -344,12 +390,28 @@ func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx cont if inputString != "" { meta["content"] = inputString } - /* - C "role" //role of creator - C "sequence" //0.. - C "completion_id" // id of S event that this is connected to - C "is_response" // if result of chat completion and not input message - */ + + // move the id field from the summary to completion_id in the messages + meta["completion_id"] = meta["id"] + delete(meta, "id") + delete(meta, "response.number_of_messages") + meta["sequence"] = 0 + meta["role"] = "user" + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + + meta["sequence"] = 1 + meta["role"] = "assistant" + meta["is_response"] = true + if outputString != "" { + meta["content"] = outputString + } else { + delete(meta, "content") + } + if outputCount > 0 { + meta["token_count"] = outputCount + } else { + delete(meta, "token_count") + } app.RecordCustomEvent("LlmChatCompletionMessage", meta) } } From b6c1693b331e6a0970032a9eb619ef87da2f938b Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Thu, 21 Mar 2024 13:37:17 -0700 Subject: [PATCH 12/38] stubbed out revised streaming functions --- v3/integrations/nrawsbedrock/nrawsbedrock.go | 102 ++++++++++++++----- 1 file changed, 75 insertions(+), 27 deletions(-) diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index 7ddd5e2b6..6ebda1394 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -26,6 +26,7 @@ package nrawsbedrock import ( "context" "encoding/json" + "fmt" "runtime/debug" "strings" "sync" @@ -98,51 +99,98 @@ type ResponseStream struct { params *bedrockruntime.InvokeModelInput // The model output - response *bedrockruntime.InvokeModelWithResponseStreamOutput + Response *bedrockruntime.InvokeModelWithResponseStreamOutput } // -// InvokeModelWithResponseStream works as InvokeModel does, but the response is returned as a stream. -// In summary, given a bedrockruntime.Client b, where you would normally call the AWS method -// b.InvokeModelWithResponseStream(c, p, f...) +// InvokeModelWithResponseStream invokes a model but unlike the InvokeModel method, the data returned +// is a stream of multiple events instead of a single response value. +// This function is the analogue of the bedrockruntime library InvokeModelWithResponseStream function, +// so that, given a bedrockruntime.Client b, where you would normally call the AWS method +// response, err := b.InvokeModelWithResponseStream(c, p, f...) // You instead invoke the New Relic InvokeModelWithResponseStream function as: -// nrbedrock.InvokeModelWithResponseStream(app, b, c, p, f...) +// rstream, err := nrbedrock.InvokeModelWithResponseStream(app, b, c, p, f...) // where app is your New Relic Application value. // +// If using the bedrockruntime library directly, you would then process the response stream value +// (the response variable in the above example), iterating over the provided channel where the stream +// data appears until it is exhausted, and then calling Close() on the stream (see the bedrock API +// documentation for details). +// +// When using the New Relic nrawsbedrock integration, this response value is available as +// rstream.Response. You would perform the same operations as you would directly with the bedrock API +// once you have that value. +// Since this means control has passed back to your code for processing of the stream data, you need to +// add instrumentation calls to your processing code: +// rstream.RecordEvent(content) // for each event received from the stream +// rstream.Close() // when you are finished and are going to close the stream +// +// However, see ProcessModelWithResponseStream for an easier alternative. +// // Either start a transaction on your own and add it to the context c passed into this function, or // a transaction will be started for you that lasts only for the duration of the model invocation. // -// You may elect to have our function collect the stream's output for you, instrumenting the operations -// along the way. To do this, pass a callback function with the invocation signature -// callback(app *newrelic.Application, txn *newrelic.Transaction, ctx context.Context, part []byte) error -// This will be called for every event read from the response stream, allowing your application -// to collect and use that streamed data as it arrives. If your callback function returns a non-nil error -// value, the stream reading will terminate immediately. +func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { + return InvokeModelWithResponseStreamAttributes(app, brc, ctx, params, nil, optFns...) +} + +// +// InvokeModelWithResponseStreamAttributes is identical to InvokeModelWithResponseStream except that +// it adds the attrs parameter, which is a +// map of strings to values of any type. This map holds any custom attributes you wish to add to the reported metrics +// relating to this model invocation. +// +// Each key in the attrs map must begin with "llm."; if any of them do not, "llm." is automatically prepended to +// the attribute key before the metrics are sent out. // -// Alternatively, if a nil value is passed for the callback function, we will assume that you want to do all -// the stream reading yourself. In that case you will need to make calls to the following functions as you -// read data in order for the instrumentation to function correctly. +// We recommend including at least "llm.conversation_id" in your attributes. // -// RecordStreamResponseEvent(app, txn, ctx, part) -// for every part read from the output stream. +func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { + return ResponseStream{}, fmt.Errorf("not implemented") +} + // -// CompleteStreamResponse(app, txn, ctx) -// after reading all of the stream data you will be reading. This finishes up the instrumentation -// for the model invocation. +// RecordEvent records a single stream event as read from the data stream started by InvokeModelWithStreamResponse. // -/* -func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { +func (s *ResponseStream) RecordEvent(data []byte) error { + return fmt.Errorf("not implemented") } -func (s *ResponseStream) RecordEvent(data []byte) error {} -func (s *ResponseStream) Close() error {} +// +// Close finishes up the instrumentation for a response stream. +// +func (s *ResponseStream) Close() error { + return fmt.Errorf("not implemented") +} -func InvokeModelWithResponseStream(app *newrelic.Application, callback func(*newrelic.Application, *newrelic.Transaction, context.Context, []byte) error, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelWithResponseStreamOutput, error) { +// +// ProcessModelWithResponseStream works just like InvokeModelWithResponseStream, except that +// it handles all the stream processing automatically for you. For each event received from +// the response stream, it will invoke the callback function you pass into the function call +// so that your application can act on the response data. When the stream is complete, the +// ProcessModelWithResponseStream call will return. +// +// If your callback function returns an error, the processing of the response stream will +// terminate at that point. +// +func ProcessModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) error { + return ProcessModelWithResponseStreamAttributes(app, brc, ctx, callback, params, nil, optFns...) } -func RecordStreamResponseEvent() {} -func CompleteStreamResponse() {} -*/ +// +// ProcessModelWithResponseStreamAttributes is identical to ProcessModelWithResponseStream except that +// it adds the attrs parameter, which is a +// map of strings to values of any type. This map holds any custom attributes you wish to add to the reported metrics +// relating to this model invocation. +// +// Each key in the attrs map must begin with "llm."; if any of them do not, "llm." is automatically prepended to +// the attribute key before the metrics are sent out. +// +// We recommend including at least "llm.conversation_id" in your attributes. +// +func ProcessModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) error { + return fmt.Errorf("not implemented") +} // // InvokeModel provides an instrumented interface through which to call the AWS Bedrock InvokeModel function. From c411bcd813876eb0568ea9fedfbb09ac5bd2cfae Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Fri, 22 Mar 2024 23:45:47 -0700 Subject: [PATCH 13/38] refined json data payload search algorithm --- v3/integrations/nrawsbedrock/example/main.go | 135 +++++- v3/integrations/nrawsbedrock/go.mod | 8 +- v3/integrations/nrawsbedrock/nrawsbedrock.go | 467 +++++++++++++------ 3 files changed, 448 insertions(+), 162 deletions(-) diff --git a/v3/integrations/nrawsbedrock/example/main.go b/v3/integrations/nrawsbedrock/example/main.go index 12b6d5e4b..ce7c5ea1c 100644 --- a/v3/integrations/nrawsbedrock/example/main.go +++ b/v3/integrations/nrawsbedrock/example/main.go @@ -9,9 +9,11 @@ import ( "os" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/bedrock" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types" "github.com/newrelic/go-agent/v3/integrations/nrawsbedrock" "github.com/newrelic/go-agent/v3/newrelic" ) @@ -23,17 +25,6 @@ func main() { if err != nil { panic(err) } - bedrockClient := bedrock.NewFromConfig(sdkConfig) - result, err := bedrockClient.ListFoundationModels(context.TODO(), &bedrock.ListFoundationModelsInput{}) - if err != nil { - panic(err) - } - if len(result.ModelSummaries) == 0 { - fmt.Println("no models found") - } - for _, modelSummary := range result.ModelSummaries { - fmt.Printf("Name: %-30s | Provider: %-20s | ID: %s\n", *modelSummary.ModelName, *modelSummary.ProviderName, *modelSummary.ModelId) - } // Create a New Relic application. This will look for your license key in an // environment variable called NEW_RELIC_LICENSE_KEY. This example turns on @@ -55,16 +46,43 @@ func main() { // doesn't block or exit if there's an error. app.WaitForConnection(5 * time.Second) + listModels(sdkConfig) + + brc := bedrockruntime.NewFromConfig(sdkConfig) + simpleChatCompletion(app, brc) + //processedChatCompletionStream(app, brc) + manualChatCompletionStream(app, brc) + + app.Shutdown(10 * time.Second) +} + +func listModels(sdkConfig aws.Config) { + bedrockClient := bedrock.NewFromConfig(sdkConfig) + result, err := bedrockClient.ListFoundationModels(context.TODO(), &bedrock.ListFoundationModelsInput{}) + if err != nil { + panic(err) + } + if len(result.ModelSummaries) == 0 { + fmt.Println("no models found") + } + for _, modelSummary := range result.ModelSummaries { + fmt.Printf("Name: %-30s | Provider: %-20s | ID: %s\n", *modelSummary.ModelName, *modelSummary.ProviderName, *modelSummary.ModelId) + } +} + +func simpleChatCompletion(app *newrelic.Application, brc *bedrockruntime.Client) { // Start recording a New Relic transaction - txn := app.StartTransaction("My sample transaction") + txn := app.StartTransaction("demo-chat-completion") contentType := "application/json" model := "amazon.titan-text-lite-v1" - //model := "amazon.titan-embed-g1-text-02" - //model := "amazon.titan-text-express-v1" - brc := bedrockruntime.NewFromConfig(sdkConfig) - //output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ - output, err := nrawsbedrock.InvokeModel(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + // + // without nrawsbedrock instrumentation, the call to invoke the model would be: + // output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ + // ... + // }) + // + output, err := nrawsbedrock.InvokeModel(app, brc, newrelic.NewContext(context.Background(), txn), &bedrockruntime.InvokeModelInput{ ContentType: &contentType, Accept: &contentType, Body: []byte(`{ @@ -78,17 +96,84 @@ func main() { }`), ModelId: &model, }) + + txn.End() + if err != nil { fmt.Printf("error: %v\n", err) - } else { - fmt.Printf("%v\n", output) } - // End the New Relic transaction - txn.End() + fmt.Printf("Result: %v\n", string(output.Body)) - // Force all the harvests and shutdown. Like the app.WaitForConnection call - // above, this is for the purposes of this demo only and can be safely - // removed for longer-running processes. - app.Shutdown(10 * time.Second) +} + +// +// This example shows a stream invocation where we let the nrawsbedrock integration retrieve +// all the stream output for us. +// +func processedChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.Client) { + contentType := "application/json" + model := "anthropic.claude-v2" + + err := nrawsbedrock.ProcessModelWithResponseStreamAttributes(app, brc, context.Background(), func(data []byte) error { + fmt.Printf(">>> Received %s\n", string(data)) + return nil + }, &bedrockruntime.InvokeModelInput{ + ModelId: &model, + ContentType: &contentType, + Accept: &contentType, + Body: []byte(`{ + "Prompt": "Human: Tell me a story.\n\nAssistant:", + "MaxTokensToSample": 200, + "Temperature": 0.5, + "StopSequences": ["\n\nAssistant:"] + }`), + }, map[string]any{ + "llm.what_is_this": "processed stream invocation", + }) + + if err != nil { + fmt.Printf("ERROR processing model: %v\n", err) + } +} + +// +// This example shows a stream invocation where we manually process the retrieval +// of the stream output. +// +func manualChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.Client) { + contentType := "application/json" + model := "anthropic.claude-v2" + + output, err := nrawsbedrock.InvokeModelWithResponseStream(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + ModelId: &model, + ContentType: &contentType, + Accept: &contentType, + Body: []byte(`{ + "prompt": "Human: Tell me a story.\n\nAssistant:", + "max_tokens_to_sample": 200, + "temperature": 0.5 + }, map[string]any{ + "llm.what_is_this": "manual stream invocation", + }`), + }) + + if err != nil { + fmt.Printf("ERROR processing model: %v\n", err) + return + } + + stream := output.Response.GetStream() + for event := range stream.Events() { + switch v := event.(type) { + case *types.ResponseStreamMemberChunk: + fmt.Println("=====[event received]=====") + fmt.Println(string(v.Value.Bytes)) + output.RecordEvent(v.Value.Bytes) + default: + fmt.Println("=====[unknown value received]=====") + } + } + output.Close() + stream.Close() } diff --git a/v3/integrations/nrawsbedrock/go.mod b/v3/integrations/nrawsbedrock/go.mod index 6d69a27b5..ea8a78835 100644 --- a/v3/integrations/nrawsbedrock/go.mod +++ b/v3/integrations/nrawsbedrock/go.mod @@ -3,6 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrawsbedrock go 1.19 require ( + github.com/aws/aws-sdk-go-v2 v1.26.0 github.com/aws/aws-sdk-go-v2/config v1.27.4 github.com/aws/aws-sdk-go-v2/service/bedrock v1.7.3 github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.7.1 @@ -11,7 +12,6 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect @@ -25,9 +25,9 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect github.com/aws/smithy-go v1.20.1 // indirect github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.56.3 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index 6ebda1394..1d0c1bc58 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -60,7 +60,7 @@ func init() { } } } - internal.TrackUsage("Go", "ML", "Bedrock", "0.0.0") + internal.TrackUsage("Go", "ML", "Bedrock", "unknown") } // @@ -71,6 +71,11 @@ func init() { // we need to initialize the app and load options before we know if that one // gets sent, we have to wait until later on to report that. // +// streaming indicates if you're asking if it's ok to instrument streaming calls. +// The return values are two booleans: the first indicates if AI instrumentation +// is enabled at all, the second tells if it is permitted to record request and +// response data (as opposed to just metadata). +// func isEnabled(app *newrelic.Application, streaming bool) (bool, bool) { if app == nil { return false, false @@ -93,15 +98,27 @@ func isEnabled(app *newrelic.Application, streaming bool) (bool, bool) { // are processed. type ResponseStream struct { // The request parameters that started the invocation - ctx context.Context - app *newrelic.Application - client *bedrockruntime.Client - params *bedrockruntime.InvokeModelInput + ctx context.Context // + app *newrelic.Application // + client *bedrockruntime.Client // + params *bedrockruntime.InvokeModelInput // // The model output Response *bedrockruntime.InvokeModelWithResponseStreamOutput } +type modelResultList struct { + output string + completionReason string + tokenCount int +} + +type modelInputList struct { + input string + role string + tokenCount int +} + // // InvokeModelWithResponseStream invokes a model but unlike the InvokeModel method, the data returned // is a stream of multiple events instead of a single response value. @@ -280,190 +297,342 @@ func InvokeModelWithAttributes(app *newrelic.Application, brc *bedrockruntime.Cl meta["error"] = true } - // go fishing in the request and response JSON strings to find values we want to - // record with our instrumentation - - var requestData, responseData map[string]any - var inputString, outputString string - - if params.Body != nil && json.Unmarshal(params.Body, &requestData) == nil { - if recordContentEnabled { - if s, ok := requestData["inputText"]; ok { - inputString, _ = s.(string) - } else if s, ok := requestData["prompt"]; ok { - inputString, _ = s.(string) - } else if ss, ok := requestData["texts"]; ok { - if slist, ok := ss.([]string); ok { - inputString = strings.Join(slist, ",") - } + var modelInput, modelOutput []byte + if params != nil && params.Body != nil { + modelInput = params.Body + } + if output != nil && output.Body != nil { + modelOutput = output.Body + } + + inputs, outputs, systemMessage := parseModelData(app, *params.ModelId, meta, modelInput, modelOutput, attrs) + // To be more runtime efficient, we don't copy the maps or rebuild them for each kind of message. + // Instead, we build one map with most of the attributes common to all messages and then adjust as needed + // when reporting out each metric. + + if embedding { + for _, theInput := range inputs { + if theInput.tokenCount > 0 { + meta["token_count"] = theInput.tokenCount + } else { + delete(meta, "token_count") + } + if recordContentEnabled && theInput.input != "" { + meta["input"] = theInput.input + } else { + delete(meta, "input") + } + app.RecordCustomEvent("LlmEmbedding", meta) + } + } else { + messageQty := len(inputs) + len(outputs) + messageSeq := 0 + if systemMessage != "" { + messageQty++ + } + + meta["response.number_of_messages"] = messageQty + app.RecordCustomEvent("LlmChatCompletionSummary", meta) + delete(meta, "duration") + meta["completion_id"] = meta["id"] + delete(meta, "id") + delete(meta, "response.number_of_messages") + + if systemMessage != "" { + meta["sequence"] = messageSeq + messageSeq++ + meta["role"] = "system" + if recordContentEnabled { + meta["content"] = systemMessage } + app.RecordCustomEvent("LlmChatCompletionMessage", meta) } - if cfg, ok := requestData["textGenerationConfig"]; ok { - if cfgMap, ok := cfg.(map[string]any); ok { - if t, ok := cfgMap["temperature"]; ok { - meta["request.temperature"] = t + maxIterations := len(inputs) + if maxIterations < len(outputs) { + maxIterations = len(outputs) + } + for i := 0; i < maxIterations; i++ { + if i < len(inputs) { + meta["sequence"] = messageSeq + messageSeq++ + if inputs[i].tokenCount > 0 { + meta["token_count"] = inputs[i].tokenCount + } else { + delete(meta, "token_count") } - if m, ok := cfgMap["maxTokenCount"]; ok { - meta["request.max_tokens"] = m + if recordContentEnabled { + meta["content"] = inputs[i].input + } else { + delete(meta, "content") } + delete(meta, "is_response") + delete(meta, "response.choices.finish_reason") + meta["role"] = "user" + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + if i < len(outputs) { + meta["sequence"] = messageSeq + messageSeq++ + if outputs[i].tokenCount > 0 { + meta["token_count"] = outputs[i].tokenCount + } else { + delete(meta, "token_count") + } + if recordContentEnabled { + meta["content"] = outputs[i].output + } else { + delete(meta, "content") + } + meta["role"] = "assistant" + meta["is_response"] = true + if outputs[i].completionReason != "" { + meta["response.choices.finish_reason"] = outputs[i].completionReason + } else { + delete(meta, "response.choices.finish_reason") + } + app.RecordCustomEvent("LlmChatCompletionMessage", meta) } - } else if t, ok := requestData["temperature"]; ok { - meta["request.temperature"] = t - } - if m, ok := requestData["max_tokens_to_sample"]; ok { - meta["request.max_tokens"] = m - } else if m, ok := requestData["max_tokens"]; ok { - meta["request.max_tokens"] = m - } else if m, ok := requestData["maxTokens"]; ok { - meta["request.max_tokens"] = m - } else if m, ok := requestData["max_gen_len"]; ok { - meta["request.max_tokens"] = m } } + } + return output, nil +} - var stopReason string - if output != nil && output.Body != nil { - if json.Unmarshal(output.Body, &responseData) == nil { - if recordContentEnabled && inputString == "" { - if s, ok := responseData["prompt"]; ok { - inputString, _ = s.(string) +func parseModelData(app *newrelic.Application, modelID string, meta map[string]any, modelInput, modelOutput []byte, attrs map[string]any) ([]modelInputList, []modelResultList, string) { + inputs := []modelInputList{} + outputs := []modelResultList{} + + // Go fishing in the request and response JSON strings to find values we want to + // record with our instrumentation. Since each model can define its own set of + // expected input and output data formats, we either have to specifically define + // model-specific templates or try to heuristically find our values in the places + // we'd expect given the existing patterns shown in the model set we have today. + // + // This implementation takes the latter approach so as to be as flexible as possible + // and have a good chance to find the data we're looking for even in new models + // that follow the same general pattern as those models that came before them. + // + // Thanks to the fact that the input and output can be a JSON data structure + // of literally anything, there's a lot of type assertion shenanigans going on + // below, as we unmarshal the JSON into a map[string]any at the top level, and + // then explore the "any" values on the way down, asserting them to be the actual + // expected types as needed. + + var requestData, responseData map[string]any + var systemMessage string + + if modelInput != nil && json.Unmarshal(modelInput, &requestData) == nil { + // if the input contains a messages list, we have multiple messages to record + if rs, ok := requestData["messages"]; ok { + if rss, ok := rs.([]any); ok { + for _, em := range rss { + if eachMessage, ok := em.(map[string]any); ok { + var role string + if r, ok := eachMessage["role"]; ok { + role, _ = r.(string) + } + if cs, ok := eachMessage["content"]; ok { + if css, ok := cs.([]any); ok { + for _, ec := range css { + if eachContent, ok := ec.(map[string]any); ok { + if ty, ok := eachContent["type"]; ok { + if typ, ok := ty.(string); ok && typ == "text" { + if txt, ok := eachContent["text"]; ok { + if txts, ok := txt.(string); ok { + inputs = append(inputs, modelInputList{input: txts, role: role}) + } + } + } + } + } + } + } + } } } - if id, ok := responseData["id"]; ok { - meta["request_id"] = id + } + } + if sys, ok := requestData["system"]; ok { + systemMessage, _ = sys.(string) + } + + // otherwise, look for what the single or multiple prompt input is called + var inputString string + if s, ok := requestData["inputText"]; ok { + inputString, _ = s.(string) + } else if s, ok := requestData["prompt"]; ok { + inputString, _ = s.(string) + } else if ss, ok := requestData["texts"]; ok { + if slist, ok := ss.([]string); ok { + for _, inpStr := range slist { + inputs = append(inputs, modelInputList{input: inpStr, role: "user"}) } + } + } + if inputString != "" { + inputs = append(inputs, modelInputList{input: inputString, role: "user"}) + } - if s, ok := responseData["stop_reason"]; ok { - stopReason, _ = s.(string) + if cfg, ok := requestData["textGenerationConfig"]; ok { + if cfgMap, ok := cfg.(map[string]any); ok { + if t, ok := cfgMap["temperature"]; ok { + meta["request.temperature"] = t + } + if m, ok := cfgMap["maxTokenCount"]; ok { + meta["request.max_tokens"] = m } + } + } else if t, ok := requestData["temperature"]; ok { + meta["request.temperature"] = t + } + if m, ok := requestData["max_tokens_to_sample"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["max_tokens"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["maxTokens"]; ok { + meta["request.max_tokens"] = m + } else if m, ok := requestData["max_gen_len"]; ok { + meta["request.max_tokens"] = m + } + } - if out, ok := responseData["completion"]; ok { - outputString, _ = out.(string) + var stopReason string + var outputString string + if modelOutput != nil { + if json.Unmarshal(modelOutput, &responseData) == nil { + if len(inputs) == 0 { + if s, ok := responseData["prompt"]; ok { + if inpStr, ok := s.(string); ok { + inputs = append(inputs, modelInputList{input: inpStr, role: "user"}) + } } + } + if id, ok := responseData["id"]; ok { + meta["request_id"] = id + } + + if s, ok := responseData["stop_reason"]; ok { + stopReason, _ = s.(string) + } - // TODO only collecting last entry of these result sets - if rs, ok := responseData["results"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { - if reason, ok := crv["completionReason"]; ok { - stopReason, _ = reason.(string) + if out, ok := responseData["completion"]; ok { + outputString, _ = out.(string) + } + + if rs, ok := responseData["results"]; ok { + if crs, ok := rs.([]any); ok { + for _, crv := range crs { + if crvv, ok := crv.(map[string]any); ok { + var stopR, outputS string + if reason, ok := crvv["completionReason"]; ok { + stopR, _ = reason.(string) } - if out, ok := crv["outputText"]; ok { - outputString, _ = out.(string) + if out, ok := crvv["outputText"]; ok { + outputS, _ = out.(string) + outputs = append(outputs, modelResultList{output: outputS, completionReason: stopR}) } } } } - if rs, ok := responseData["completions"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { + } + //modelResultList{output: completionReason:} + if rs, ok := responseData["completions"]; ok { + if crs, ok := rs.([]any); ok { + for _, crsv := range crs { + if crv, ok := crsv.(map[string]any); ok { + var outputR string + + if cdata, ok := crv["finishReason"]; ok { + if cdatamap, ok := cdata.(map[string]any); ok { + if reason, ok := cdatamap["reason"]; ok { + outputR, _ = reason.(string) + } + } + } if cdata, ok := crv["data"]; ok { - if cdatamap, ok := cdata.(map[string]string); ok { + if cdatamap, ok := cdata.(map[string]any); ok { if out, ok := cdatamap["text"]; ok { - outputString = out + if outS, ok := out.(string); ok { + outputs = append(outputs, modelResultList{output: outS, completionReason: outputR}) + } } } } } } } - if rs, ok := responseData["outputs"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { + } + if rs, ok := responseData["outputs"]; ok { + if crs, ok := rs.([]any); ok { + for _, crvv := range crs { + if crv, ok := crvv.(map[string]any); ok { + var stopR string if reason, ok := crv["stop_reason"]; ok { - stopReason, _ = reason.(string) - break + stopR, _ = reason.(string) } if out, ok := crv["text"]; ok { - outputString, _ = out.(string) + if outS, ok := out.(string); ok { + outputs = append(outputs, modelResultList{output: outS, completionReason: stopR}) + } } } } } - if rs, ok := responseData["generations"]; ok { - if crs, ok := rs.([]map[string]any); ok { - for _, crv := range crs { + } + if rs, ok := responseData["generations"]; ok { + if crs, ok := rs.([]any); ok { + for _, crvv := range crs { + if crv, ok := crvv.(map[string]any); ok { + var stopR string if reason, ok := crv["finish_reason"]; ok { - stopReason, _ = reason.(string) + stopR, _ = reason.(string) } if out, ok := crv["text"]; ok { - outputString, _ = out.(string) + if outS, ok := out.(string); ok { + outputs = append(outputs, modelResultList{output: outS, completionReason: stopR}) + } } } } } - if outputString == "" { - if out, ok := responseData["generation"]; ok { - outputString, _ = out.(string) - } - } } - } - - if attrs != nil { - for k, v := range attrs { - if strings.HasPrefix(k, "llm.") { - meta[k] = v - } else { - meta["llm."+k] = v + if outputString == "" { + if out, ok := responseData["generation"]; ok { + outputString, _ = out.(string) } } - } - var userCount, outputCount int - if app.HasLLMTokenCountCallback() { - userCount, _ = app.InvokeLLMTokenCountCallback(*params.ModelId, inputString) - outputCount, _ = app.InvokeLLMTokenCountCallback(*params.ModelId, outputString) + if outputString != "" { + outputs = append(outputs, modelResultList{output: outputString, completionReason: stopReason}) + } } + } - if embedding { - if userCount > 0 { - meta["token_count"] = userCount - } - if inputString != "" { - meta["input"] = inputString - } - app.RecordCustomEvent("LlmEmbedding", meta) - } else { - if stopReason != "" { - meta["response.choices.finish_reason"] = stopReason - } - meta["response.number_of_messages"] = 2 - app.RecordCustomEvent("LlmChatCompletionSummary", meta) - delete(meta, "duration") - if userCount > 0 { - meta["token_count"] = userCount - } - if inputString != "" { - meta["content"] = inputString + if attrs != nil { + for k, v := range attrs { + if strings.HasPrefix(k, "llm.") { + meta[k] = v + } else { + meta["llm."+k] = v } + } + } - // move the id field from the summary to completion_id in the messages - meta["completion_id"] = meta["id"] - delete(meta, "id") - delete(meta, "response.number_of_messages") - meta["sequence"] = 0 - meta["role"] = "user" - app.RecordCustomEvent("LlmChatCompletionMessage", meta) - - meta["sequence"] = 1 - meta["role"] = "assistant" - meta["is_response"] = true - if outputString != "" { - meta["content"] = outputString - } else { - delete(meta, "content") + if app.HasLLMTokenCountCallback() { + for i, _ := range inputs { + if inputs[i].input != "" { + inputs[i].tokenCount, _ = app.InvokeLLMTokenCountCallback(modelID, inputs[i].input) } - if outputCount > 0 { - meta["token_count"] = outputCount - } else { - delete(meta, "token_count") + } + for i, _ := range outputs { + if outputs[i].output != "" { + outputs[i].tokenCount, _ = app.InvokeLLMTokenCountCallback(modelID, outputs[i].output) } - app.RecordCustomEvent("LlmChatCompletionMessage", meta) } } - return output, nil + + return inputs, outputs, systemMessage } /*** @@ -626,4 +795,36 @@ openai response headers include these but not always since they aren't always pr ratelimitLimitTokensUsageBased ratelimitResetTokensUsageBased ratelimitRemainingTokensUsageBased + + + ModelResultList + Output + CompletionReason + TokenCount + ModelInputList + Role + Input + +amazon titan + out: + results[] outputText, completionReason + stream: + chunk/bytes/index, outputText, completionReason +Claude + in: + messages[] role, content[] type='text', text + system: "system message" + out: + content[] type="text", text + stop_reason +Cohere: + out: + generations[] finish_reason, id, text, index? + id + prompt +Mistral + out: + outputs[] text, stop_reason + + ***/ From 6c7afcfddf187e01064803684d160f36b63cfcb6 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Fri, 22 Mar 2024 23:46:40 -0700 Subject: [PATCH 14/38] removed stripping of "v" from library version number --- v3/integrations/nrawsbedrock/nrawsbedrock.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index 1d0c1bc58..f2bd23bf5 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -51,11 +51,7 @@ func init() { if info != nil && ok { for _, module := range info.Deps { if module != nil && strings.Contains(module.Path, "/aws/aws-sdk-go-v2/service/bedrockruntime") { - if len(module.Version) > 1 && module.Version[0] == 'v' { - internal.TrackUsage("Go", "ML", "Bedrock", module.Version[1:]) - } else { - internal.TrackUsage("Go", "ML", "Bedrock", module.Version) - } + internal.TrackUsage("Go", "ML", "Bedrock", module.Version) return } } From 28c40bd7cdb0c4b9fe83b2f5e178a1fb512c28a8 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Sun, 24 Mar 2024 19:59:41 -0700 Subject: [PATCH 15/38] streaming in place --- v3/integrations/nrawsbedrock/example/main.go | 25 +- v3/integrations/nrawsbedrock/nrawsbedrock.go | 249 +++++++++++++++++-- 2 files changed, 247 insertions(+), 27 deletions(-) diff --git a/v3/integrations/nrawsbedrock/example/main.go b/v3/integrations/nrawsbedrock/example/main.go index ce7c5ea1c..5cd73dc37 100644 --- a/v3/integrations/nrawsbedrock/example/main.go +++ b/v3/integrations/nrawsbedrock/example/main.go @@ -32,7 +32,8 @@ func main() { app, err := newrelic.NewApplication( newrelic.ConfigFromEnvironment(), newrelic.ConfigAppName("Example App"), - newrelic.ConfigDebugLogger(os.Stdout), + // newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigInfoLogger(os.Stdout), newrelic.ConfigDistributedTracerEnabled(true), newrelic.ConfigAIMonitoringEnabled(true), ) @@ -50,7 +51,7 @@ func main() { brc := bedrockruntime.NewFromConfig(sdkConfig) simpleChatCompletion(app, brc) - //processedChatCompletionStream(app, brc) + processedChatCompletionStream(app, brc) manualChatCompletionStream(app, brc) app.Shutdown(10 * time.Second) @@ -118,15 +119,14 @@ func processedChatCompletionStream(app *newrelic.Application, brc *bedrockruntim err := nrawsbedrock.ProcessModelWithResponseStreamAttributes(app, brc, context.Background(), func(data []byte) error { fmt.Printf(">>> Received %s\n", string(data)) return nil - }, &bedrockruntime.InvokeModelInput{ + }, &bedrockruntime.InvokeModelWithResponseStreamInput{ ModelId: &model, ContentType: &contentType, Accept: &contentType, Body: []byte(`{ - "Prompt": "Human: Tell me a story.\n\nAssistant:", - "MaxTokensToSample": 200, - "Temperature": 0.5, - "StopSequences": ["\n\nAssistant:"] + "prompt": "Human: Tell me a story.\n\nAssistant:", + "max_tokens_to_sample": 200, + "temperature": 0.5 }`), }, map[string]any{ "llm.what_is_this": "processed stream invocation", @@ -145,7 +145,7 @@ func manualChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.C contentType := "application/json" model := "anthropic.claude-v2" - output, err := nrawsbedrock.InvokeModelWithResponseStream(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + output, err := nrawsbedrock.InvokeModelWithResponseStreamAttributes(app, brc, context.Background(), &bedrockruntime.InvokeModelWithResponseStreamInput{ ModelId: &model, ContentType: &contentType, Accept: &contentType, @@ -153,10 +153,11 @@ func manualChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.C "prompt": "Human: Tell me a story.\n\nAssistant:", "max_tokens_to_sample": 200, "temperature": 0.5 - }, map[string]any{ - "llm.what_is_this": "manual stream invocation", - }`), - }) + }`)}, + map[string]any{ + "llm.what_is_this": "manual chat completion stream", + }, + ) if err != nil { fmt.Printf("ERROR processing model: %v\n", err) diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index f2bd23bf5..ce8c3ebc2 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -15,10 +15,26 @@ // Package nrawsbedrock instruments AI model invocation requests made by the // https://github.com/aws/aws-sdk-go-v2/service/bedrockruntime library. // +// Specifically, this provides instrumentation for the InvokeModel and InvokeModelWithResponseStream +// bedrock client API library functions. +// // To use this integration, enable the New Relic AIMonitoring configuration options // in your application, import this integration, and use the model invocation calls // from this library in place of the corresponding ones from the AWS Bedrock -// runtime library. +// runtime library, as documented below. +// +// The relevant configuration options are passed to the NewApplication function and include +// ConfigAIMonitoringEnabled(true), // enable (or disable if false) this integration +// ConfigAIMonitoringStreamingEnabled(true), // enable instrumentation of streaming invocations +// ConfigAIMonitoringRecordContentEnabled(true), // include input/output data in instrumentation +// +// Or, if ConfigFromEnvironment() is included in your configuration options, the above configuration +// options may be specified using these environment variables, respectively: +// NEW_RELIC_AI_MONITORING_ENABLED=true +// NEW_RELIC_AI_MONITORING_STREAMING_ENABLED=true +// NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED=true +// The values for these variables may be any form accepted by strconv.ParseBool (e.g., 1, t, T, true, TRUE, True, +// 0, f, F, false, FALSE, or False). // // See example/main.go for a working sample. package nrawsbedrock @@ -26,20 +42,24 @@ package nrawsbedrock import ( "context" "encoding/json" - "fmt" + "errors" "runtime/debug" "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types" "github.com/google/uuid" "github.com/newrelic/go-agent/v3/internal" "github.com/newrelic/go-agent/v3/internal/integrationsupport" "github.com/newrelic/go-agent/v3/newrelic" ) -var reportStreamingDisabled func() +var ( + reportStreamingDisabled func() + ErrMissingResponseData = errors.New("missing response data") +) func init() { reportStreamingDisabled = sync.OnceFunc(func() { @@ -94,10 +114,17 @@ func isEnabled(app *newrelic.Application, streaming bool) (bool, bool) { // are processed. type ResponseStream struct { // The request parameters that started the invocation - ctx context.Context // - app *newrelic.Application // - client *bedrockruntime.Client // - params *bedrockruntime.InvokeModelInput // + ctx context.Context + app *newrelic.Application + client *bedrockruntime.Client + params *bedrockruntime.InvokeModelWithResponseStreamInput + attrs map[string]any + recordContentEnabled bool + closeTxn bool + txn *newrelic.Transaction + seg *newrelic.Segment + completionID string + seq int // The model output Response *bedrockruntime.InvokeModelWithResponseStreamOutput @@ -143,7 +170,7 @@ type modelInputList struct { // Either start a transaction on your own and add it to the context c passed into this function, or // a transaction will be started for you that lasts only for the duration of the model invocation. // -func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { +func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelWithResponseStreamInput, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { return InvokeModelWithResponseStreamAttributes(app, brc, ctx, params, nil, optFns...) } @@ -158,22 +185,185 @@ func InvokeModelWithResponseStream(app *newrelic.Application, brc *bedrockruntim // // We recommend including at least "llm.conversation_id" in your attributes. // -func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { - return ResponseStream{}, fmt.Errorf("not implemented") +func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelWithResponseStreamInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) (ResponseStream, error) { + var aiEnabled bool + var err error + + resp := ResponseStream{ + ctx: ctx, + app: app, + client: brc, + params: params, + attrs: attrs, + } + + aiEnabled, resp.recordContentEnabled = isEnabled(app, true) + if aiEnabled { + resp.txn = newrelic.FromContext(ctx) + if resp.txn == nil { + resp.txn = app.StartTransaction("InvokeModelWithResponseStream") + resp.closeTxn = true + } + } + + if resp.txn != nil { + integrationsupport.AddAgentAttribute(resp.txn, "llm", "", true) + if params.ModelId != nil { + resp.seg = resp.txn.StartSegment("Llm/completion/Bedrock/InvokeModelWithResponseStream") + } else { + // we don't have a model! + resp.txn = nil + } + } + + start := time.Now() + resp.Response, err = brc.InvokeModelWithResponseStream(ctx, params, optFns...) + duration := time.Since(start).Milliseconds() + + if resp.txn != nil { + md := resp.txn.GetTraceMetadata() + resp.completionID = uuid.New().String() + meta := map[string]any{ + "id": resp.completionID, + "span_id": md.SpanID, + "trace_id": md.TraceID, + "request.model": *params.ModelId, + "response.model": *params.ModelId, + "vendor": "bedrock", + "ingest_source": "Go", + "duration": duration, + } + + if err != nil { + resp.txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "BedrockError", + Attributes: map[string]any{ + "completion_id": resp.completionID, + }, + }) + meta["error"] = true + } + + var modelInput []byte + if params != nil && params.Body != nil { + modelInput = params.Body + } + + inputs, outputs, systemMessage := parseModelData(app, *params.ModelId, meta, modelInput, nil, attrs) + // To be more runtime efficient, we don't copy the maps or rebuild them for each kind of message. + // Instead, we build one map with most of the attributes common to all messages and then adjust as needed + // when reporting out each metric. + + app.RecordCustomEvent("LlmChatCompletionSummary", meta) + delete(meta, "duration") + meta["completion_id"] = meta["id"] + delete(meta, "id") + + if systemMessage != "" { + meta["sequence"] = resp.seq + resp.seq++ + meta["role"] = "system" + if resp.recordContentEnabled { + meta["content"] = systemMessage + } + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + + meta["role"] = "user" + for _, msg := range inputs { + meta["sequence"] = resp.seq + resp.seq++ + if msg.tokenCount > 0 { + meta["token_count"] = msg.tokenCount + } else { + delete(meta, "token_count") + } + if resp.recordContentEnabled { + meta["content"] = msg.input + } else { + delete(meta, "content") + } + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + for _, msg := range outputs { + meta["sequence"] = resp.seq + resp.seq++ + if msg.tokenCount > 0 { + meta["token_count"] = msg.tokenCount + } else { + delete(meta, "token_count") + } + if resp.recordContentEnabled { + meta["content"] = msg.output + } else { + delete(meta, "content") + } + app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + } + return resp, nil } // // RecordEvent records a single stream event as read from the data stream started by InvokeModelWithStreamResponse. // func (s *ResponseStream) RecordEvent(data []byte) error { - return fmt.Errorf("not implemented") + if s == nil || s.txn == nil || s.app == nil { + return nil + } + if s.params == nil || s.params.ModelId == nil { + return ErrMissingResponseData + } + + md := s.txn.GetTraceMetadata() + + meta := map[string]any{ + "completion_id": s.completionID, + "span_id": md.SpanID, + "trace_id": md.TraceID, + "request.model": *s.params.ModelId, + "response.model": *s.params.ModelId, + "vendor": "bedrock", + "ingest_source": "Go", + "role": "assistant", + } + + _, outputs, _ := parseModelData(s.app, *s.params.ModelId, meta, s.params.Body, data, s.attrs) + + for _, msg := range outputs { + meta["sequence"] = s.seq + s.seq++ + if msg.tokenCount > 0 { + meta["token_count"] = msg.tokenCount + } else { + delete(meta, "token_count") + } + if s.recordContentEnabled { + meta["content"] = msg.output + } else { + delete(meta, "content") + } + s.app.RecordCustomEvent("LlmChatCompletionMessage", meta) + } + return nil } // // Close finishes up the instrumentation for a response stream. // func (s *ResponseStream) Close() error { - return fmt.Errorf("not implemented") + if s == nil || s.txn == nil { + return nil + } + + if s.seg != nil { + s.seg.End() + } + if s.closeTxn { + s.txn.End() + } + return nil } // @@ -186,7 +376,7 @@ func (s *ResponseStream) Close() error { // If your callback function returns an error, the processing of the response stream will // terminate at that point. // -func ProcessModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) error { +func ProcessModelWithResponseStream(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelWithResponseStreamInput, optFns ...func(*bedrockruntime.Options)) error { return ProcessModelWithResponseStreamAttributes(app, brc, ctx, callback, params, nil, optFns...) } @@ -201,8 +391,37 @@ func ProcessModelWithResponseStream(app *newrelic.Application, brc *bedrockrunti // // We recommend including at least "llm.conversation_id" in your attributes. // -func ProcessModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) error { - return fmt.Errorf("not implemented") +func ProcessModelWithResponseStreamAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, callback func([]byte) error, params *bedrockruntime.InvokeModelWithResponseStreamInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) error { + var err error + var userErr error + + response, err := InvokeModelWithResponseStreamAttributes(app, brc, ctx, params, attrs, optFns...) + if err != nil { + return err + } + if response.Response == nil { + return response.Close() + } + + stream := response.Response.GetStream() + defer func() { + err = stream.Close() + }() + + for event := range stream.Events() { + if v, ok := event.(*types.ResponseStreamMemberChunk); ok { + if userErr = callback(v.Value.Bytes); userErr != nil { + break + } + response.RecordEvent(v.Value.Bytes) + } + } + + err = response.Close() + if userErr != nil { + return userErr + } + return err } // From 9f0c74570fea66ef769c80ea2a51290ee7f7ed79 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Sun, 24 Mar 2024 20:42:47 -0700 Subject: [PATCH 16/38] removed attribute size limits and high security block for LLM custom events --- v3/newrelic/attributes_from_internal.go | 30 +++++++++++++++++++++++++ v3/newrelic/custom_event.go | 26 +++++++++++++++++++++ v3/newrelic/internal_app.go | 22 ++++++++++++------ 3 files changed, 71 insertions(+), 7 deletions(-) diff --git a/v3/newrelic/attributes_from_internal.go b/v3/newrelic/attributes_from_internal.go index 64012b93b..6b9b31394 100644 --- a/v3/newrelic/attributes_from_internal.go +++ b/v3/newrelic/attributes_from_internal.go @@ -382,6 +382,36 @@ func validateUserAttribute(key string, val interface{}) (interface{}, error) { return val, nil } +// validateUserAttributeUnlimitedSize validates a user attribute without truncating string values. +func validateUserAttributeUnlimitedSize(key string, val interface{}) (interface{}, error) { + switch v := val.(type) { + case string, bool, + uint8, uint16, uint32, uint64, int8, int16, int32, int64, + uint, int, uintptr: + case float32: + if err := validateFloat(float64(v), key); err != nil { + return nil, err + } + case float64: + if err := validateFloat(v, key); err != nil { + return nil, err + } + default: + return nil, errInvalidAttributeType{ + key: key, + val: val, + } + } + + // Attributes whose keys are excessively long are dropped rather than + // truncated to avoid worrying about the application of configuration to + // truncated values or performing the truncation after configuration. + if len(key) > attributeKeyLengthLimit { + return nil, invalidAttributeKeyErr{key: key} + } + return val, nil +} + func validateFloat(v float64, key string) error { if math.IsInf(v, 0) || math.IsNaN(v) { return invalidFloatAttrValue{ diff --git a/v3/newrelic/custom_event.go b/v3/newrelic/custom_event.go index a1aacb8f3..80aa08812 100644 --- a/v3/newrelic/custom_event.go +++ b/v3/newrelic/custom_event.go @@ -100,6 +100,32 @@ func createCustomEvent(eventType string, params map[string]interface{}, now time }, nil } +// CreateCustomEventUnlimitedSize creates a custom event without restricting string value length. +func createCustomEventUnlimitedSize(eventType string, params map[string]interface{}, now time.Time) (*customEvent, error) { + if err := eventTypeValidate(eventType); err != nil { + return nil, err + } + + if len(params) > customEventAttributeLimit { + return nil, errNumAttributes + } + + truncatedParams := make(map[string]interface{}) + for key, val := range params { + val, err := validateUserAttributeUnlimitedSize(key, val) + if err != nil { + return nil, err + } + truncatedParams[key] = val + } + + return &customEvent{ + eventType: eventType, + timestamp: now, + truncatedParams: truncatedParams, + }, nil +} + // MergeIntoHarvest implements Harvestable. func (e *customEvent) MergeIntoHarvest(h *harvest) { h.CustomEvents.Add(e) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 00e510347..4e30629cb 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -545,18 +545,26 @@ var ( // RecordCustomEvent implements newrelic.Application's RecordCustomEvent. func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error { + var event *customEvent + var e error + if nil == app { return nil } - if app.config.Config.HighSecurity { - return errHighSecurityEnabled - } - if !app.config.CustomInsightsEvents.Enabled { - return errCustomEventsDisabled - } + if eventType == "LlmEmbedding" || eventType == "LlmChatCompletionSummary" || eventType == "LlmChatCompletionMessage" { + event, e = createCustomEventUnlimitedSize(eventType, params, time.Now()) + } else { + if app.config.Config.HighSecurity { + return errHighSecurityEnabled + } - event, e := createCustomEvent(eventType, params, time.Now()) + if !app.config.CustomInsightsEvents.Enabled { + return errCustomEventsDisabled + } + + event, e := createCustomEvent(eventType, params, time.Now()) + } if nil != e { return e } From 05755513b5050d69517efa83e43b644fd69f3038 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Mon, 25 Mar 2024 07:59:25 -0700 Subject: [PATCH 17/38] backed out high security check for llm custom events --- v3/newrelic/internal_app.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 4e30629cb..b85365a86 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -552,18 +552,18 @@ func (app *app) RecordCustomEvent(eventType string, params map[string]interface{ return nil } + if app.config.Config.HighSecurity { + return errHighSecurityEnabled + } + + if !app.config.CustomInsightsEvents.Enabled { + return errCustomEventsDisabled + } + if eventType == "LlmEmbedding" || eventType == "LlmChatCompletionSummary" || eventType == "LlmChatCompletionMessage" { event, e = createCustomEventUnlimitedSize(eventType, params, time.Now()) } else { - if app.config.Config.HighSecurity { - return errHighSecurityEnabled - } - - if !app.config.CustomInsightsEvents.Enabled { - return errCustomEventsDisabled - } - - event, e := createCustomEvent(eventType, params, time.Now()) + event, e = createCustomEvent(eventType, params, time.Now()) } if nil != e { return e From 67c0a9263e30ec73964c8ced245a81fa3d5802bb Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Mon, 25 Mar 2024 11:17:46 -0500 Subject: [PATCH 18/38] OpenAI - Added Support for Token Callback / Bug Fixes (#875) * Bug Fixes for OpenAI --- .../chatcompletion/chatcompletion_example.go | 38 ++++- .../chatcompletionfeedback.go | 2 +- .../examples/embeddings/embeddings_example.go | 25 ++++ v3/integrations/nropenai/go.mod | 4 +- v3/integrations/nropenai/nropenai.go | 124 ++++++++++++---- v3/integrations/nropenai/nropenai_test.go | 138 ++++++++---------- 6 files changed, 220 insertions(+), 111 deletions(-) diff --git a/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go b/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go index 6aeef8da1..e87b2f9bf 100644 --- a/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go +++ b/v3/integrations/nropenai/examples/chatcompletion/chatcompletion_example.go @@ -7,6 +7,7 @@ import ( "github.com/newrelic/go-agent/v3/integrations/nropenai" "github.com/newrelic/go-agent/v3/newrelic" + "github.com/pkoukk/tiktoken-go" openai "github.com/sashabaranov/go-openai" ) @@ -25,6 +26,34 @@ func main() { } app.WaitForConnection(10 * time.Second) + // SetLLMTokenCountCallback allows for custom token counting, if left unset and if newrelic.ConfigAIMonitoringRecordContentEnabled() + // is disabled, no token counts will be reported + app.SetLLMTokenCountCallback(func(modelName string, content string) int { + var tokensPerMessage, tokensPerName int + switch modelName { + case "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613": + tokensPerMessage = 3 + tokensPerName = 1 + case "gpt-3.5-turbo-0301": + tokensPerMessage = 4 + tokensPerName = -1 + } + + tkm, err := tiktoken.EncodingForModel(modelName) + if err != nil { + fmt.Println("error getting tokens", err) + return 0 + } + token := tkm.Encode(content, nil, nil) + totalTokens := len(token) + tokensPerMessage + tokensPerName + return totalTokens + }) + // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) @@ -40,13 +69,13 @@ func main() { // GPT Request req := openai.ChatCompletionRequest{ - Model: openai.GPT3Dot5Turbo, + Model: openai.GPT4, Temperature: 0.7, MaxTokens: 150, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, - Content: "What is 8*5", + Content: "What is Observability in Software Engineering?", }, }, } @@ -56,8 +85,9 @@ func main() { if err != nil { panic(err) } - - fmt.Println(resp.ChatCompletionResponse.Choices[0].Message.Content) + if len(resp.ChatCompletionResponse.Choices) == 0 { + fmt.Println("No choices returned") + } // Shutdown Application app.Shutdown(5 * time.Second) diff --git a/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go b/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go index 24eba417d..21caea010 100644 --- a/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go +++ b/v3/integrations/nropenai/examples/chatcompletionfeedback/chatcompletionfeedback.go @@ -49,7 +49,7 @@ func main() { Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, - Content: "What is 8*5", + Content: "What is observability in software engineering?", }, }, } diff --git a/v3/integrations/nropenai/examples/embeddings/embeddings_example.go b/v3/integrations/nropenai/examples/embeddings/embeddings_example.go index ff50d7428..421e4bd6a 100644 --- a/v3/integrations/nropenai/examples/embeddings/embeddings_example.go +++ b/v3/integrations/nropenai/examples/embeddings/embeddings_example.go @@ -7,6 +7,7 @@ import ( "github.com/newrelic/go-agent/v3/integrations/nropenai" "github.com/newrelic/go-agent/v3/newrelic" + "github.com/pkoukk/tiktoken-go" openai "github.com/sashabaranov/go-openai" ) @@ -23,7 +24,31 @@ func main() { panic(err) } app.WaitForConnection(10 * time.Second) + app.SetLLMTokenCountCallback(func(modelName string, content string) int { + var tokensPerMessage, tokensPerName int + switch modelName { + case "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613": + tokensPerMessage = 3 + tokensPerName = 1 + case "gpt-3.5-turbo-0301": + tokensPerMessage = 4 + tokensPerName = -1 + } + tkm, err := tiktoken.EncodingForModel(modelName) + if err != nil { + fmt.Println("error getting tokens", err) + return 0 + } + token := tkm.Encode(content, nil, nil) + totalTokens := len(token) + tokensPerMessage + tokensPerName + return totalTokens + }) // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) diff --git a/v3/integrations/nropenai/go.mod b/v3/integrations/nropenai/go.mod index 338ad0aef..a565f429e 100644 --- a/v3/integrations/nropenai/go.mod +++ b/v3/integrations/nropenai/go.mod @@ -5,10 +5,12 @@ go 1.21.0 require ( github.com/google/uuid v1.6.0 github.com/newrelic/go-agent/v3 v3.30.0 + github.com/pkoukk/tiktoken-go v0.1.6 github.com/sashabaranov/go-openai v1.20.2 ) require ( + github.com/dlclark/regexp2 v1.10.0 // indirect github.com/golang/protobuf v1.5.3 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect @@ -18,4 +20,4 @@ require ( google.golang.org/protobuf v1.30.0 // indirect ) -replace github.com/newrelic/go-agent/v3 => ../.. \ No newline at end of file +replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index 33054cec9..8549780c1 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -6,29 +6,38 @@ package nropenai import ( "context" "errors" + "fmt" "reflect" "runtime/debug" "strings" + "sync" "time" "github.com/google/uuid" "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/internal/integrationsupport" "github.com/newrelic/go-agent/v3/newrelic" "github.com/sashabaranov/go-openai" ) +var reportStreamingDisabled func() + func init() { + reportStreamingDisabled = sync.OnceFunc(func() { + internal.TrackUsage("Go", "ML", "Streaming", "Disabled") + }) // Get current go-openai version info, ok := debug.ReadBuildInfo() if info != nil && ok { for _, module := range info.Deps { if module != nil && strings.Contains(module.Path, "go-openai") { + internal.TrackUsage("Go", "ML", "OpenAI", module.Version) + return } } } - internal.TrackUsage("Go", "ML", "OpenAI", "unknown") } @@ -171,11 +180,15 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl uuid := uuid.New() spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID - transactionID := traceID[:16] ChatCompletionSummaryData := map[string]interface{}{} - + if !appConfig.AIMonitoring.Streaming.Enabled { + if reportStreamingDisabled != nil { + reportStreamingDisabled() + } + } // Start span + integrationsupport.AddAgentAttribute(txn, "llm", "", true) chatCompletionSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") // Track Total time taken for the chat completion or embedding call to complete in milliseconds start := time.Now() @@ -192,8 +205,6 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl Message: err.Error(), Class: "OpenAIError", Attributes: map[string]interface{}{ - "http.status": resp.Header().Get("Status"), - "error.code": resp.Header().Get("Error-Code"), "completion_id": uuid.String(), }, }) @@ -208,14 +219,11 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl ChatCompletionSummaryData["duration"] = duration // Response Data - ChatCompletionSummaryData["response.number_of_messages"] = len(resp.Choices) + ChatCompletionSummaryData["response.number_of_messages"] = len(resp.Choices) + len(req.Messages) ChatCompletionSummaryData["response.model"] = resp.Model ChatCompletionSummaryData["request_id"] = resp.ID ChatCompletionSummaryData["response.organization"] = resp.Header().Get("Openai-Organization") - ChatCompletionSummaryData["response.number_of_messages"] = len(resp.Choices) - ChatCompletionSummaryData["response.usage.total_tokens"] = resp.Usage.TotalTokens - ChatCompletionSummaryData["response.usage.prompt_tokens"] = resp.Usage.PromptTokens - ChatCompletionSummaryData["response.usage.completion_tokens"] = resp.Usage.CompletionTokens + if len(resp.Choices) > 0 { finishReason, err := resp.Choices[0].FinishReason.MarshalJSON() if err != nil { @@ -241,7 +249,6 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl // New Relic Attributes ChatCompletionSummaryData["id"] = uuid.String() ChatCompletionSummaryData["span_id"] = spanID - ChatCompletionSummaryData["transaction_id"] = transactionID ChatCompletionSummaryData["trace_id"] = traceID ChatCompletionSummaryData["api_key_last_four_digits"] = cw.LicenseKeyLastFour ChatCompletionSummaryData["vendor"] = "OpenAI" @@ -254,6 +261,8 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl // Record Custom Event app.RecordCustomEvent("LlmChatCompletionSummary", ChatCompletionSummaryData) + // Capture request message + NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) // Capture completion messages NRCreateChatCompletionMessage(txn, app, resp, uuid, cw) txn.End() @@ -263,22 +272,64 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl TraceID: traceID, } } +func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic.Application, req openai.ChatCompletionRequest, uuid uuid.UUID, cw *ClientWrapper) { + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + appCfg, configErr := app.Config() + if !configErr { + appCfg.AppName = "Unknown" + } + integrationsupport.AddAgentAttribute(txn, "llm", "", true) + chatCompletionMessageSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessage") + + ChatCompletionMessageData := map[string]interface{}{} + // if the response doesn't have an ID, use the UUID from the summary + ChatCompletionMessageData["id"] = uuid.String() + "-0" + + // Response Data + ChatCompletionMessageData["response.model"] = req.Model + + if appCfg.AIMonitoring.RecordContent.Enabled { + ChatCompletionMessageData["content"] = req.Messages[0].Content + } + + ChatCompletionMessageData["role"] = req.Messages[0].Role + + // New Relic Attributes + ChatCompletionMessageData["sequence"] = 0 + ChatCompletionMessageData["vendor"] = "openai" + ChatCompletionMessageData["ingest_source"] = "go" + ChatCompletionMessageData["span_id"] = spanID + ChatCompletionMessageData["trace_id"] = traceID + contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(req.Model, req.Messages[0].Content) + + if contentCounted { + ChatCompletionMessageData["token_count"] = contentTokens + } + + // If custom attributes are set, add them to the data + ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) + chatCompletionMessageSpan.End() + // Record Custom Event for each message + app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + +} // NRCreateChatCompletionMessage captures the completion messages and records a custom event in New Relic for each message func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper) { spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID - transactionID := traceID[:16] appCfg, configErr := app.Config() if !configErr { appCfg.AppName = "Unknown" } + integrationsupport.AddAgentAttribute(txn, "llm", "", true) chatCompletionMessageSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessage") for i, choice := range resp.Choices { ChatCompletionMessageData := map[string]interface{}{} // if the response doesn't have an ID, use the UUID from the summary if resp.ID == "" { - ChatCompletionMessageData["id"] = uuid.String() + ChatCompletionMessageData["id"] = uuid.String() + "-" + fmt.Sprint(i+1) } else { ChatCompletionMessageData["id"] = resp.ID } @@ -296,14 +347,15 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl ChatCompletionMessageData["request_id"] = resp.Header().Get("X-Request-Id") // New Relic Attributes - ChatCompletionMessageData["sequence"] = i + ChatCompletionMessageData["sequence"] = i + 1 ChatCompletionMessageData["vendor"] = "openai" ChatCompletionMessageData["ingest_source"] = "go" ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID - ChatCompletionMessageData["transaction_id"] = transactionID - // TO:DO completion_id set in CompletionSummary which is a UUID generated by the agent to identify the event - // TO:DO - llm.conversation_id + tokenCount, tokensCounted := TokenCountingHelper(app, choice.Message, resp.Model) + if tokensCounted { + ChatCompletionMessageData["token_count"] = tokenCount + } // If custom attributes are set, add them to the data ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) @@ -316,6 +368,16 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl chatCompletionMessageSpan.End() } +func TokenCountingHelper(app *newrelic.Application, message openai.ChatCompletionMessage, model string) (numTokens int, tokensCounted bool) { + + contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(model, message.Content) + roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(model, message.Role) + messageTokens, messageCounted := app.InvokeLLMTokenCountCallback(model, message.Name) + numTokens += contentTokens + roleTokens + messageTokens + + return numTokens, (contentCounted && roleCounted && messageCounted) +} + // NRCreateChatCompletion is a wrapper for the OpenAI CreateChatCompletion method. // If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateChatCompletion method and return the response with no New Relic instrumentation func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, app *newrelic.Application) (ChatCompletionResponseWrapper, error) { @@ -323,6 +385,7 @@ func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, if !cfgErr { config.AppName = "Unknown" } + resp := ChatCompletionResponseWrapper{} // If AI Monitoring is disabled, do not start a transaction but still perform the request if !config.AIMonitoring.Enabled { @@ -366,11 +429,11 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID - transactionID := traceID[:16] EmbeddingsData := map[string]interface{}{} uuid := uuid.New() + integrationsupport.AddAgentAttribute(txn, "llm", "", true) - embeddingSpan := txn.StartSegment("Llm/completion/OpenAI/CreateEmbedding") + embeddingSpan := txn.StartSegment("Llm/embedding/OpenAI/CreateEmbedding") start := time.Now() resp, err := cw.Client.CreateEmbeddings(context.Background(), req) duration := time.Since(start).Milliseconds() @@ -382,8 +445,6 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr Message: err.Error(), Class: "OpenAIError", Attributes: map[string]interface{}{ - "http.status": resp.Header().Get("Status"), - "error.code": resp.Header().Get("Error-Code"), "embedding_id": uuid.String(), }, }) @@ -393,14 +454,20 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr if config.AIMonitoring.RecordContent.Enabled { EmbeddingsData["input"] = GetInput(req.Input) } + + EmbeddingsData["request_id"] = resp.Header().Get("X-Request-Id") EmbeddingsData["api_key_last_four_digits"] = cw.LicenseKeyLastFour EmbeddingsData["request.model"] = string(req.Model) EmbeddingsData["duration"] = duration // Response Data EmbeddingsData["response.model"] = string(resp.Model) - EmbeddingsData["response.usage.total_tokens"] = resp.Usage.TotalTokens - EmbeddingsData["response.usage.prompt_tokens"] = resp.Usage.PromptTokens + // cast input as string + input := GetInput(req.Input).(string) + tokenCount, tokensCounted := app.InvokeLLMTokenCountCallback(string(resp.Model), input) + if tokensCounted { + EmbeddingsData["token_count"] = tokenCount + } // Response Headers EmbeddingsData["response.organization"] = resp.Header().Get("Openai-Organization") @@ -419,7 +486,6 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr EmbeddingsData["vendor"] = "OpenAI" EmbeddingsData["ingest_source"] = "Go" EmbeddingsData["span_id"] = spanID - EmbeddingsData["transaction_id"] = transactionID EmbeddingsData["trace_id"] = traceID app.RecordCustomEvent("LlmEmbedding", EmbeddingsData) @@ -432,7 +498,11 @@ func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req op if !cfgErr { config.AppName = "Unknown" } - + if !config.AIMonitoring.Streaming.Enabled { + if reportStreamingDisabled != nil { + reportStreamingDisabled() + } + } // If AI Monitoring OR AIMonitoring.Streaming is disabled, do not start a transaction but still perform the request if !config.AIMonitoring.Enabled || !config.AIMonitoring.Streaming.Enabled { stream, err := cw.Client.CreateChatCompletionStream(ctx, req) @@ -446,10 +516,9 @@ func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req op txn := app.StartTransaction("OpenAIChatCompletionStream") spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID - transactionID := traceID[:16] StreamingData := map[string]interface{}{} uuid := uuid.New() - + integrationsupport.AddAgentAttribute(txn, "llm", "", true) streamSpan := txn.StartSegment("Llm/completion/OpenAI/stream") start := time.Now() stream, err := cw.Client.CreateChatCompletionStream(ctx, req) @@ -478,7 +547,6 @@ func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req op // New Relic Attributes StreamingData["id"] = uuid.String() StreamingData["span_id"] = spanID - StreamingData["transaction_id"] = transactionID StreamingData["trace_id"] = traceID StreamingData["api_key_last_four_digits"] = cw.LicenseKeyLastFour StreamingData["vendor"] = "OpenAI" diff --git a/v3/integrations/nropenai/nropenai_test.go b/v3/integrations/nropenai/nropenai_test.go index 6648de26f..68498eaff 100644 --- a/v3/integrations/nropenai/nropenai_test.go +++ b/v3/integrations/nropenai/nropenai_test.go @@ -237,28 +237,24 @@ func TestNRCreateChatCompletion(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ - "ingest_source": "Go", - "vendor": "OpenAI", - "model": "gpt-3.5-turbo", - "id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, - "trace_id": internal.MatchAnything, - "span_id": internal.MatchAnything, - "appName": "my app", - "duration": 0, - "response.choices.finish_reason": internal.MatchAnything, - "request.temperature": 0, - "api_key_last_four_digits": "sk-mnop", - "request_id": "chatcmpl-123", - "request.model": "gpt-3.5-turbo", - "request.max_tokens": 150, - "response.number_of_messages": 1, - "response.headers.llmVersion": "2020-10-01", - "response.organization": "user-123", - "response.usage.completion_tokens": 12, - "response.model": "gpt-3.5-turbo", - "response.usage.total_tokens": 21, - "response.usage.prompt_tokens": 9, + "ingest_source": "Go", + "vendor": "OpenAI", + "model": "gpt-3.5-turbo", + "id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "appName": "my app", + "duration": 0, + "response.choices.finish_reason": internal.MatchAnything, + "request.temperature": 0, + "api_key_last_four_digits": "sk-mnop", + "request_id": "chatcmpl-123", + "request.model": "gpt-3.5-turbo", + "request.max_tokens": 150, + "response.number_of_messages": 1, + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.model": "gpt-3.5-turbo", "response.headers.ratelimitRemainingTokens": "100", "response.headers.ratelimitRemainingRequests": "10000", "response.headers.ratelimitResetTokens": "100", @@ -274,7 +270,6 @@ func TestNRCreateChatCompletion(t *testing.T) { }, UserAttributes: map[string]interface{}{ "trace_id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, "span_id": internal.MatchAnything, "id": "chatcmpl-123", "sequence": 0, @@ -350,28 +345,24 @@ func TestNRCreateChatCompletionError(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ - "error": true, - "ingest_source": "Go", - "vendor": "OpenAI", - "model": "gpt-3.5-turbo", - "id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, - "trace_id": internal.MatchAnything, - "span_id": internal.MatchAnything, - "appName": "my app", - "duration": 0, - "request.temperature": 0, - "api_key_last_four_digits": "sk-mnop", - "request_id": "", - "request.model": "gpt-3.5-turbo", - "request.max_tokens": 150, - "response.number_of_messages": 0, - "response.headers.llmVersion": "2020-10-01", - "response.organization": "user-123", - "response.usage.completion_tokens": 0, - "response.model": "", - "response.usage.total_tokens": 0, - "response.usage.prompt_tokens": 0, + "error": true, + "ingest_source": "Go", + "vendor": "OpenAI", + "model": "gpt-3.5-turbo", + "id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "appName": "my app", + "duration": 0, + "request.temperature": 0, + "api_key_last_four_digits": "sk-mnop", + "request_id": "", + "request.model": "gpt-3.5-turbo", + "request.max_tokens": 150, + "response.number_of_messages": 0, + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.model": "", "response.headers.ratelimitRemainingTokens": "100", "response.headers.ratelimitRemainingRequests": "10000", "response.headers.ratelimitResetTokens": "100", @@ -428,21 +419,18 @@ func TestNRCreateEmbedding(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ - "ingest_source": "Go", - "vendor": "OpenAI", - "id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, - "trace_id": internal.MatchAnything, - "span_id": internal.MatchAnything, - "duration": 0, - "api_key_last_four_digits": "sk-mnop", - "request.model": "text-embedding-ada-002", - "response.headers.llmVersion": "2020-10-01", - "response.organization": "user-123", - "response.model": "text-embedding-ada-002", - "response.usage.total_tokens": 21, - "response.usage.prompt_tokens": 9, - "input": "The food was delicious and the waiter", + "ingest_source": "Go", + "vendor": "OpenAI", + "id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "duration": 0, + "api_key_last_four_digits": "sk-mnop", + "request.model": "text-embedding-ada-002", + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "response.model": "text-embedding-ada-002", + "input": "The food was delicious and the waiter", "response.headers.ratelimitRemainingTokens": "100", "response.headers.ratelimitRemainingRequests": "10000", "response.headers.ratelimitResetTokens": "100", @@ -509,22 +497,19 @@ func TestNRCreateEmbeddingError(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ - "ingest_source": "Go", - "vendor": "OpenAI", - "id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, - "trace_id": internal.MatchAnything, - "span_id": internal.MatchAnything, - "duration": 0, - "api_key_last_four_digits": "sk-mnop", - "request.model": "text-embedding-ada-002", - "response.headers.llmVersion": "2020-10-01", - "response.organization": "user-123", - "error": true, - "response.model": "", - "response.usage.total_tokens": 0, - "response.usage.prompt_tokens": 0, - "input": "testError", + "ingest_source": "Go", + "vendor": "OpenAI", + "id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "duration": 0, + "api_key_last_four_digits": "sk-mnop", + "request.model": "text-embedding-ada-002", + "response.headers.llmVersion": "2020-10-01", + "response.organization": "user-123", + "error": true, + "response.model": "", + "input": "testError", "response.headers.ratelimitRemainingTokens": "100", "response.headers.ratelimitRemainingRequests": "10000", "response.headers.ratelimitResetTokens": "100", @@ -589,7 +574,6 @@ func TestNRCreateStream(t *testing.T) { "vendor": "OpenAI", "model": "gpt-3.5-turbo", "id": internal.MatchAnything, - "transaction_id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "appName": "my app", From a3a13d8b1c0c28d2da3bf02af99052a161c6803a Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Mon, 25 Mar 2024 12:16:54 -0700 Subject: [PATCH 19/38] fixes missing "llm" transaction attr and handling of bedrock streams --- v3/integrations/nrawsbedrock/example/main.go | 84 ++++++++- v3/integrations/nrawsbedrock/nrawsbedrock.go | 182 +++++++++---------- v3/newrelic/attributes.go | 2 + v3/newrelic/attributes_from_internal.go | 1 + 4 files changed, 173 insertions(+), 96 deletions(-) diff --git a/v3/integrations/nrawsbedrock/example/main.go b/v3/integrations/nrawsbedrock/example/main.go index 5cd73dc37..f767a42ee 100644 --- a/v3/integrations/nrawsbedrock/example/main.go +++ b/v3/integrations/nrawsbedrock/example/main.go @@ -31,11 +31,12 @@ func main() { // Distributed Tracing, but that's not required. app, err := newrelic.NewApplication( newrelic.ConfigFromEnvironment(), - newrelic.ConfigAppName("Example App"), - // newrelic.ConfigDebugLogger(os.Stdout), - newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigAppName("Example Bedrock App"), + newrelic.ConfigDebugLogger(os.Stdout), + //newrelic.ConfigInfoLogger(os.Stdout), newrelic.ConfigDistributedTracerEnabled(true), newrelic.ConfigAIMonitoringEnabled(true), + newrelic.ConfigAIMonitoringRecordContentEnabled(true), ) if nil != err { fmt.Println(err) @@ -50,6 +51,8 @@ func main() { listModels(sdkConfig) brc := bedrockruntime.NewFromConfig(sdkConfig) + simpleEmbedding(app, brc) + simpleChatCompletionError(app, brc) simpleChatCompletion(app, brc) processedChatCompletionStream(app, brc) manualChatCompletionStream(app, brc) @@ -58,6 +61,7 @@ func main() { } func listModels(sdkConfig aws.Config) { + fmt.Println("================================================== MODELS") bedrockClient := bedrock.NewFromConfig(sdkConfig) result, err := bedrockClient.ListFoundationModels(context.TODO(), &bedrock.ListFoundationModelsInput{}) if err != nil { @@ -71,7 +75,72 @@ func listModels(sdkConfig aws.Config) { } } +func simpleChatCompletionError(app *newrelic.Application, brc *bedrockruntime.Client) { + fmt.Println("================================================== CHAT COMPLETION WITH ERROR") + // Start recording a New Relic transaction + txn := app.StartTransaction("demo-chat-completion-error") + + contentType := "application/json" + model := "amazon.titan-text-lite-v1" + // + // without nrawsbedrock instrumentation, the call to invoke the model would be: + // output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ + // ... + // }) + // + _, err := nrawsbedrock.InvokeModel(app, brc, newrelic.NewContext(context.Background(), txn), &bedrockruntime.InvokeModelInput{ + ContentType: &contentType, + Accept: &contentType, + Body: []byte(`{ + "inputTexxt": "What is your quest?", + "textGenerationConfig": { + "temperature": 0.5, + "maxTokenCount": 100, + "stopSequences": [], + "topP": 1 + } + }`), + ModelId: &model, + }) + + txn.End() + + if err != nil { + fmt.Printf("error: %v\n", err) + } +} + +func simpleEmbedding(app *newrelic.Application, brc *bedrockruntime.Client) { + fmt.Println("================================================== EMBEDDING") + // Start recording a New Relic transaction + contentType := "application/json" + model := "amazon.titan-embed-text-v1" + // + // without nrawsbedrock instrumentation, the call to invoke the model would be: + // output, err := brc.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput{ + // ... + // }) + // + output, err := nrawsbedrock.InvokeModel(app, brc, context.Background(), &bedrockruntime.InvokeModelInput{ + ContentType: &contentType, + Accept: &contentType, + Body: []byte(`{ + "inputText": "What is your quest?" + }`), + ModelId: &model, + }) + + if err != nil { + fmt.Printf("error: %v\n", err) + } + + if output != nil { + fmt.Printf("Result: %v\n", string(output.Body)) + } +} + func simpleChatCompletion(app *newrelic.Application, brc *bedrockruntime.Client) { + fmt.Println("================================================== COMPLETION") // Start recording a New Relic transaction txn := app.StartTransaction("demo-chat-completion") @@ -83,6 +152,7 @@ func simpleChatCompletion(app *newrelic.Application, brc *bedrockruntime.Client) // ... // }) // + app.SetLLMTokenCountCallback(func(model, data string) int { return 42 }) output, err := nrawsbedrock.InvokeModel(app, brc, newrelic.NewContext(context.Background(), txn), &bedrockruntime.InvokeModelInput{ ContentType: &contentType, Accept: &contentType, @@ -99,13 +169,15 @@ func simpleChatCompletion(app *newrelic.Application, brc *bedrockruntime.Client) }) txn.End() + app.SetLLMTokenCountCallback(nil) if err != nil { fmt.Printf("error: %v\n", err) } - fmt.Printf("Result: %v\n", string(output.Body)) - + if output != nil { + fmt.Printf("Result: %v\n", string(output.Body)) + } } // @@ -113,6 +185,7 @@ func simpleChatCompletion(app *newrelic.Application, brc *bedrockruntime.Client) // all the stream output for us. // func processedChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.Client) { + fmt.Println("================================================== STREAM (PROCESSED)") contentType := "application/json" model := "anthropic.claude-v2" @@ -142,6 +215,7 @@ func processedChatCompletionStream(app *newrelic.Application, brc *bedrockruntim // of the stream output. // func manualChatCompletionStream(app *newrelic.Application, brc *bedrockruntime.Client) { + fmt.Println("================================================== STREAM (MANUAL)") contentType := "application/json" model := "anthropic.claude-v2" diff --git a/v3/integrations/nrawsbedrock/nrawsbedrock.go b/v3/integrations/nrawsbedrock/nrawsbedrock.go index ce8c3ebc2..99114a5f1 100644 --- a/v3/integrations/nrawsbedrock/nrawsbedrock.go +++ b/v3/integrations/nrawsbedrock/nrawsbedrock.go @@ -119,12 +119,15 @@ type ResponseStream struct { client *bedrockruntime.Client params *bedrockruntime.InvokeModelWithResponseStreamInput attrs map[string]any + meta map[string]any recordContentEnabled bool closeTxn bool txn *newrelic.Transaction seg *newrelic.Segment completionID string seq int + output strings.Builder + stopReason string // The model output Response *bedrockruntime.InvokeModelWithResponseStreamOutput @@ -192,6 +195,7 @@ func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bed resp := ResponseStream{ ctx: ctx, app: app, + meta: map[string]any{}, client: brc, params: params, attrs: attrs, @@ -223,7 +227,7 @@ func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bed if resp.txn != nil { md := resp.txn.GetTraceMetadata() resp.completionID = uuid.New().String() - meta := map[string]any{ + resp.meta = map[string]any{ "id": resp.completionID, "span_id": md.SpanID, "trace_id": md.TraceID, @@ -242,66 +246,10 @@ func InvokeModelWithResponseStreamAttributes(app *newrelic.Application, brc *bed "completion_id": resp.completionID, }, }) - meta["error"] = true - } - - var modelInput []byte - if params != nil && params.Body != nil { - modelInput = params.Body - } - - inputs, outputs, systemMessage := parseModelData(app, *params.ModelId, meta, modelInput, nil, attrs) - // To be more runtime efficient, we don't copy the maps or rebuild them for each kind of message. - // Instead, we build one map with most of the attributes common to all messages and then adjust as needed - // when reporting out each metric. - - app.RecordCustomEvent("LlmChatCompletionSummary", meta) - delete(meta, "duration") - meta["completion_id"] = meta["id"] - delete(meta, "id") - - if systemMessage != "" { - meta["sequence"] = resp.seq - resp.seq++ - meta["role"] = "system" - if resp.recordContentEnabled { - meta["content"] = systemMessage - } - app.RecordCustomEvent("LlmChatCompletionMessage", meta) - } - - meta["role"] = "user" - for _, msg := range inputs { - meta["sequence"] = resp.seq - resp.seq++ - if msg.tokenCount > 0 { - meta["token_count"] = msg.tokenCount - } else { - delete(meta, "token_count") - } - if resp.recordContentEnabled { - meta["content"] = msg.input - } else { - delete(meta, "content") - } - app.RecordCustomEvent("LlmChatCompletionMessage", meta) - } - for _, msg := range outputs { - meta["sequence"] = resp.seq - resp.seq++ - if msg.tokenCount > 0 { - meta["token_count"] = msg.tokenCount - } else { - delete(meta, "token_count") - } - if resp.recordContentEnabled { - meta["content"] = msg.output - } else { - delete(meta, "content") - } - app.RecordCustomEvent("LlmChatCompletionMessage", meta) + resp.meta["error"] = true } } + return resp, nil } @@ -312,50 +260,101 @@ func (s *ResponseStream) RecordEvent(data []byte) error { if s == nil || s.txn == nil || s.app == nil { return nil } - if s.params == nil || s.params.ModelId == nil { + if s.params == nil || s.params.ModelId == nil || s.meta == nil { + return ErrMissingResponseData + } + + _, outputs, _ := parseModelData(s.app, *s.params.ModelId, s.meta, s.params.Body, data, s.attrs, false) + for _, msg := range outputs { + s.output.WriteString(msg.output) + if msg.completionReason != "" { + s.stopReason = msg.completionReason + } + } + return nil +} + +// +// Close finishes up the instrumentation for a response stream. +// +func (s *ResponseStream) Close() error { + if s == nil || s.app == nil || s.txn == nil { + return nil + } + if s.params == nil || s.params.ModelId == nil || s.meta == nil { return ErrMissingResponseData } - md := s.txn.GetTraceMetadata() - - meta := map[string]any{ - "completion_id": s.completionID, - "span_id": md.SpanID, - "trace_id": md.TraceID, - "request.model": *s.params.ModelId, - "response.model": *s.params.ModelId, - "vendor": "bedrock", - "ingest_source": "Go", - "role": "assistant", + var modelInput []byte + modelOutput := s.output.String() + if s.params != nil && s.params.Body != nil { + modelInput = s.params.Body } - _, outputs, _ := parseModelData(s.app, *s.params.ModelId, meta, s.params.Body, data, s.attrs) + inputs, _, systemMessage := parseModelData(s.app, *s.params.ModelId, s.meta, modelInput, nil, s.attrs, true) + // To be more runtime efficient, we don't copy the maps or rebuild them for each kind of message. + // Instead, we build one map with most of the attributes common to all messages and then adjust as needed + // when reporting out each metric. - for _, msg := range outputs { - meta["sequence"] = s.seq + otherQty := 0 + if systemMessage != "" { + otherQty++ + } + if modelOutput != "" { + otherQty++ + } + + if s.stopReason != "" { + s.meta["response.choices.finish_reason"] = s.stopReason + } + s.meta["response.number_of_messages"] = len(inputs) + otherQty + + s.app.RecordCustomEvent("LlmChatCompletionSummary", s.meta) + delete(s.meta, "duration") + s.meta["completion_id"] = s.meta["id"] + delete(s.meta, "id") + + if systemMessage != "" { + s.meta["sequence"] = s.seq + s.seq++ + s.meta["role"] = "system" + if s.recordContentEnabled { + s.meta["content"] = systemMessage + } + s.app.RecordCustomEvent("LlmChatCompletionMessage", s.meta) + } + + s.meta["role"] = "user" + for _, msg := range inputs { + s.meta["sequence"] = s.seq s.seq++ if msg.tokenCount > 0 { - meta["token_count"] = msg.tokenCount + s.meta["token_count"] = msg.tokenCount } else { - delete(meta, "token_count") + delete(s.meta, "token_count") } if s.recordContentEnabled { - meta["content"] = msg.output + s.meta["content"] = msg.input } else { - delete(meta, "content") + delete(s.meta, "content") } - s.app.RecordCustomEvent("LlmChatCompletionMessage", meta) + s.app.RecordCustomEvent("LlmChatCompletionMessage", s.meta) } - return nil -} -// -// Close finishes up the instrumentation for a response stream. -// -func (s *ResponseStream) Close() error { - if s == nil || s.txn == nil { - return nil + if s.app.HasLLMTokenCountCallback() { + if tc, _ := s.app.InvokeLLMTokenCountCallback(*s.params.ModelId, modelOutput); tc > 0 { + s.meta["token_count"] = tc + } + } + s.meta["role"] = "assistant" + s.meta["sequence"] = s.seq + s.seq++ + if s.recordContentEnabled { + s.meta["content"] = modelOutput + } else { + delete(s.meta, "content") } + s.app.RecordCustomEvent("LlmChatCompletionMessage", s.meta) if s.seg != nil { s.seg.End() @@ -454,6 +453,7 @@ func InvokeModel(app *newrelic.Application, brc *bedrockruntime.Client, ctx cont // func InvokeModelWithAttributes(app *newrelic.Application, brc *bedrockruntime.Client, ctx context.Context, params *bedrockruntime.InvokeModelInput, attrs map[string]any, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) { var txn *newrelic.Transaction // the transaction to record in, or nil if we aren't instrumenting this time + var err error aiEnabled, recordContentEnabled := isEnabled(app, false) if aiEnabled { @@ -520,7 +520,7 @@ func InvokeModelWithAttributes(app *newrelic.Application, brc *bedrockruntime.Cl modelOutput = output.Body } - inputs, outputs, systemMessage := parseModelData(app, *params.ModelId, meta, modelInput, modelOutput, attrs) + inputs, outputs, systemMessage := parseModelData(app, *params.ModelId, meta, modelInput, modelOutput, attrs, true) // To be more runtime efficient, we don't copy the maps or rebuild them for each kind of message. // Instead, we build one map with most of the attributes common to all messages and then adjust as needed // when reporting out each metric. @@ -611,10 +611,10 @@ func InvokeModelWithAttributes(app *newrelic.Application, brc *bedrockruntime.Cl } } } - return output, nil + return output, err } -func parseModelData(app *newrelic.Application, modelID string, meta map[string]any, modelInput, modelOutput []byte, attrs map[string]any) ([]modelInputList, []modelResultList, string) { +func parseModelData(app *newrelic.Application, modelID string, meta map[string]any, modelInput, modelOutput []byte, attrs map[string]any, countTokens bool) ([]modelInputList, []modelResultList, string) { inputs := []modelInputList{} outputs := []modelResultList{} @@ -834,7 +834,7 @@ func parseModelData(app *newrelic.Application, modelID string, meta map[string]a } } - if app.HasLLMTokenCountCallback() { + if countTokens && app.HasLLMTokenCountCallback() { for i, _ := range inputs { if inputs[i].input != "" { inputs[i].tokenCount, _ = app.InvokeLLMTokenCountCallback(modelID, inputs[i].input) diff --git a/v3/newrelic/attributes.go b/v3/newrelic/attributes.go index 0221b5795..2fd2f8d09 100644 --- a/v3/newrelic/attributes.go +++ b/v3/newrelic/attributes.go @@ -56,6 +56,8 @@ const ( AttributeErrorGroupName = "error.group.name" // AttributeUserID tracks the user a transaction and its child events are impacting AttributeUserID = "enduser.id" + // AttributeLLM tracks LLM transactions + AttributeLLM = "llm" ) // Attributes destined for Errors and Transaction Traces: diff --git a/v3/newrelic/attributes_from_internal.go b/v3/newrelic/attributes_from_internal.go index 6b9b31394..fca46d9cf 100644 --- a/v3/newrelic/attributes_from_internal.go +++ b/v3/newrelic/attributes_from_internal.go @@ -58,6 +58,7 @@ var ( AttributeCodeFilepath: usualDests, AttributeCodeLineno: usualDests, AttributeUserID: usualDests, + AttributeLLM: usualDests, // Span specific attributes SpanAttributeDBStatement: usualDests, From a3dbcec3fccbeb9a77b1846f912ddbed6ebda5fd Mon Sep 17 00:00:00 2001 From: mirackara Date: Mon, 25 Mar 2024 14:58:14 -0500 Subject: [PATCH 20/38] openai changes --- v3/integrations/nropenai/nropenai.go | 105 +++++++++++++++++++--- v3/integrations/nropenai/nropenai_test.go | 93 +++++++++++-------- 2 files changed, 151 insertions(+), 47 deletions(-) diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index 8549780c1..0a1cf1434 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -149,23 +149,49 @@ type ChatCompletionResponseWrapper struct { // Wrapper for ChatCompletionStream that is returned from NRCreateChatCompletionStream type ChatCompletionStreamWrapper struct { - stream *openai.ChatCompletionStream - txn *newrelic.Transaction + app *newrelic.Application + stream *openai.ChatCompletionStream + streamResp openai.ChatCompletionResponse + responseStr string + uuid string + txn *newrelic.Transaction + cw *ClientWrapper + role string + model string + StreamingData map[string]interface{} + isRoleAdded bool + TraceID string } // Wrapper for Recv() method that calls the underlying stream's Recv() method func (w *ChatCompletionStreamWrapper) Recv() (openai.ChatCompletionStreamResponse, error) { response, err := w.stream.Recv() - if err != nil { return response, err } + if !w.isRoleAdded && (response.Choices[0].Delta.Role == "assistant" || response.Choices[0].Delta.Role == "user" || response.Choices[0].Delta.Role == "system") { + w.isRoleAdded = true + w.role = response.Choices[0].Delta.Role + + } + if response.Choices[0].FinishReason != "stop" { + w.responseStr += response.Choices[0].Delta.Content + w.streamResp.ID = response.ID + w.streamResp.Model = response.Model + w.model = response.Model + } return response, nil } func (w *ChatCompletionStreamWrapper) Close() { + w.StreamingData["response.model"] = w.model + w.app.RecordCustomEvent("LlmChatCompletionSummary", w.StreamingData) + + NRCreateChatCompletionMessageStream(w.app, uuid.MustParse(w.uuid), w, w.cw) + + w.txn.End() w.stream.Close() } @@ -188,7 +214,8 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl } } // Start span - integrationsupport.AddAgentAttribute(txn, "llm", "", true) + txn.AddAttribute("llm", true) + chatCompletionSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") // Track Total time taken for the chat completion or embedding call to complete in milliseconds start := time.Now() @@ -272,6 +299,52 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl TraceID: traceID, } } + +func NRCreateChatCompletionMessageStream(app *newrelic.Application, uuid uuid.UUID, sw *ChatCompletionStreamWrapper, cw *ClientWrapper) { + + spanID := sw.txn.GetTraceMetadata().SpanID + traceID := sw.txn.GetTraceMetadata().TraceID + + appCfg, configErr := app.Config() + if !configErr { + appCfg.AppName = "Unknown" + } + integrationsupport.AddAgentAttribute(sw.txn, "llm", "", true) + chatCompletionMessageSpan := sw.txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessageStream") + + ChatCompletionMessageData := map[string]interface{}{} + // if the response doesn't have an ID, use the UUID from the summary + + ChatCompletionMessageData["id"] = sw.streamResp.ID + + // Response Data + ChatCompletionMessageData["request.model"] = sw.model + + if appCfg.AIMonitoring.RecordContent.Enabled { + ChatCompletionMessageData["content"] = sw.responseStr + } + + ChatCompletionMessageData["role"] = sw.role + + // New Relic Attributes + ChatCompletionMessageData["sequence"] = 1 + ChatCompletionMessageData["vendor"] = "OpenAI" + ChatCompletionMessageData["ingest_source"] = "Go" + ChatCompletionMessageData["span_id"] = spanID + ChatCompletionMessageData["trace_id"] = traceID + contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(sw.model, sw.responseStr) + if contentCounted { + ChatCompletionMessageData["token_count"] = contentTokens + } + + // If custom attributes are set, add them to the data + ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) + chatCompletionMessageSpan.End() + // Record Custom Event for each message + app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + +} + func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic.Application, req openai.ChatCompletionRequest, uuid uuid.UUID, cw *ClientWrapper) { spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID @@ -297,8 +370,8 @@ func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic // New Relic Attributes ChatCompletionMessageData["sequence"] = 0 - ChatCompletionMessageData["vendor"] = "openai" - ChatCompletionMessageData["ingest_source"] = "go" + ChatCompletionMessageData["vendor"] = "OpenAI" + ChatCompletionMessageData["ingest_source"] = "Go" ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(req.Model, req.Messages[0].Content) @@ -348,8 +421,8 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl // New Relic Attributes ChatCompletionMessageData["sequence"] = i + 1 - ChatCompletionMessageData["vendor"] = "openai" - ChatCompletionMessageData["ingest_source"] = "go" + ChatCompletionMessageData["vendor"] = "OpenAI" + ChatCompletionMessageData["ingest_source"] = "Go" ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID tokenCount, tokensCounted := TokenCountingHelper(app, choice.Message, resp.Model) @@ -378,6 +451,16 @@ func TokenCountingHelper(app *newrelic.Application, message openai.ChatCompletio return numTokens, (contentCounted && roleCounted && messageCounted) } +func TokenCountingHelperStream(app *newrelic.Application, model string, content string, role string, messageName string) (numTokens int, tokensCounted bool) { + + contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(model, content) + roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(model, role) + messageTokens, messageCounted := app.InvokeLLMTokenCountCallback(model, messageName) + numTokens += contentTokens + roleTokens + messageTokens + + return numTokens, (contentCounted && roleCounted && messageCounted) +} + // NRCreateChatCompletion is a wrapper for the OpenAI CreateChatCompletion method. // If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateChatCompletion method and return the response with no New Relic instrumentation func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, app *newrelic.Application) (ChatCompletionResponseWrapper, error) { @@ -552,8 +635,8 @@ func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req op StreamingData["vendor"] = "OpenAI" StreamingData["ingest_source"] = "Go" StreamingData["appName"] = config.AppName - app.RecordCustomEvent("LlmChatCompletionSummary", StreamingData) - txn.End() - return &ChatCompletionStreamWrapper{stream: stream, txn: txn}, nil + + NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) + return &ChatCompletionStreamWrapper{app: app, stream: stream, txn: txn, uuid: uuid.String(), cw: cw, StreamingData: StreamingData, TraceID: traceID}, nil } diff --git a/v3/integrations/nropenai/nropenai_test.go b/v3/integrations/nropenai/nropenai_test.go index 68498eaff..76c762b6a 100644 --- a/v3/integrations/nropenai/nropenai_test.go +++ b/v3/integrations/nropenai/nropenai_test.go @@ -251,7 +251,7 @@ func TestNRCreateChatCompletion(t *testing.T) { "request_id": "chatcmpl-123", "request.model": "gpt-3.5-turbo", "request.max_tokens": 150, - "response.number_of_messages": 1, + "response.number_of_messages": 2, "response.headers.llmVersion": "2020-10-01", "response.organization": "user-123", "response.model": "gpt-3.5-turbo", @@ -271,13 +271,31 @@ func TestNRCreateChatCompletion(t *testing.T) { UserAttributes: map[string]interface{}{ "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, - "id": "chatcmpl-123", + "id": internal.MatchAnything, "sequence": 0, + "role": "user", + "content": "What is 8*5", + "vendor": "OpenAI", + "ingest_source": "Go", + "response.model": "gpt-3.5-turbo", + }, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionMessage", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "id": "chatcmpl-123", + "sequence": 1, "role": "assistant", "content": "\n\nHello there, how may I assist you today?", "request_id": "chatcmpl-123", - "vendor": "openai", - "ingest_source": "go", + "vendor": "OpenAI", + "ingest_source": "Go", "response.model": "gpt-3.5-turbo", }, AgentAttributes: map[string]interface{}{}, @@ -359,7 +377,7 @@ func TestNRCreateChatCompletionError(t *testing.T) { "request_id": "", "request.model": "gpt-3.5-turbo", "request.max_tokens": 150, - "response.number_of_messages": 0, + "response.number_of_messages": 1, "response.headers.llmVersion": "2020-10-01", "response.organization": "user-123", "response.model": "", @@ -371,6 +389,23 @@ func TestNRCreateChatCompletionError(t *testing.T) { "response.headers.ratelimitLimitRequests": "10000", }, }, + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionMessage", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "ingest_source": "Go", + "vendor": "OpenAI", + "id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "content": "testError", + "role": "user", + "response.model": "gpt-3.5-turbo", + "sequence": 0, + }, + }, }) app.ExpectErrorEvents(t, []internal.WantEvent{ { @@ -385,11 +420,11 @@ func TestNRCreateChatCompletionError(t *testing.T) { "error.message": "test error", }, UserAttributes: map[string]interface{}{ - "error.code": "404", - "http.status": "404", "completion_id": internal.MatchAnything, + "llm": true, }, - }}) + }, + }) } func TestNRCreateEmbedding(t *testing.T) { mockClient := &MockOpenAIClient{} @@ -425,6 +460,7 @@ func TestNRCreateEmbedding(t *testing.T) { "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "duration": 0, + "request_id": "chatcmpl-123", "api_key_last_four_digits": "sk-mnop", "request.model": "text-embedding-ada-002", "response.headers.llmVersion": "2020-10-01", @@ -504,6 +540,7 @@ func TestNRCreateEmbeddingError(t *testing.T) { "span_id": internal.MatchAnything, "duration": 0, "api_key_last_four_digits": "sk-mnop", + "request_id": "chatcmpl-123", "request.model": "text-embedding-ada-002", "response.headers.llmVersion": "2020-10-01", "response.organization": "user-123", @@ -533,8 +570,6 @@ func TestNRCreateEmbeddingError(t *testing.T) { "error.message": "test error", }, UserAttributes: map[string]interface{}{ - "error.code": "404", - "http.status": "404", "embedding_id": internal.MatchAnything, }, }}) @@ -566,36 +601,21 @@ func TestNRCreateStream(t *testing.T) { app.ExpectCustomEvents(t, []internal.WantEvent{ { Intrinsics: map[string]interface{}{ - "type": "LlmChatCompletionSummary", + "type": "LlmChatCompletionMessage", "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ - "ingest_source": "Go", - "vendor": "OpenAI", - "model": "gpt-3.5-turbo", - "id": internal.MatchAnything, - "trace_id": internal.MatchAnything, - "span_id": internal.MatchAnything, - "appName": "my app", - "duration": 0, - "request.temperature": 0, - "api_key_last_four_digits": "sk-mnop", - "request.max_tokens": 1500, - "request.model": "gpt-3.5-turbo", - }, - }, - }) - app.ExpectTxnEvents(t, []internal.WantEvent{ - { - Intrinsics: map[string]interface{}{ - "type": "Transaction", - "name": "OtherTransaction/Go/OpenAIChatCompletionStream", - "timestamp": internal.MatchAnything, - "traceId": internal.MatchAnything, - "priority": internal.MatchAnything, - "sampled": internal.MatchAnything, - "guid": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "id": internal.MatchAnything, + "sequence": 0, + "role": "user", + "content": "Say this is a test", + "vendor": "OpenAI", + "ingest_source": "Go", + "response.model": "gpt-3.5-turbo", }, + AgentAttributes: map[string]interface{}{}, }, }) } @@ -625,6 +645,7 @@ func TestNRCreateStreamAIMonitoringNotEnabled(t *testing.T) { } app.ExpectCustomEvents(t, []internal.WantEvent{}) app.ExpectTxnEvents(t, []internal.WantEvent{}) + } func TestNRCreateStreamError(t *testing.T) { From 13475968633e91419a5bbf0aee1ace55c14406a5 Mon Sep 17 00:00:00 2001 From: mirackara Date: Mon, 25 Mar 2024 15:01:34 -0500 Subject: [PATCH 21/38] Close stream after response is finished --- .../chatcompletionstreaming/chatcompletionstreaming.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go index 4745aae3b..e4235930f 100644 --- a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go +++ b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go @@ -61,8 +61,6 @@ func main() { if err != nil { panic(err) } - defer stream.Close() - fmt.Printf("Stream response: ") for { var response openai.ChatCompletionStreamResponse @@ -78,6 +76,7 @@ func main() { fmt.Printf(response.Choices[0].Delta.Content) } + stream.Close() // Shutdown Application app.Shutdown(5 * time.Second) } From 7c7f73834fbf6023f0805780e0911d4499456aaa Mon Sep 17 00:00:00 2001 From: mirackara Date: Mon, 25 Mar 2024 15:04:54 -0500 Subject: [PATCH 22/38] added feedback to streaming example --- .../chatcompletionstreaming.go | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go index e4235930f..50f73ec13 100644 --- a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go +++ b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go @@ -13,12 +13,26 @@ import ( openai "github.com/sashabaranov/go-openai" ) +// Simulates feedback being sent to New Relic. Feedback on a chat completion requires +// having access to the ChatCompletionResponseWrapper which is returned by the NRCreateChatCompletion function. +func SendFeedback(app *newrelic.Application, resp nropenai.ChatCompletionStreamWrapper) { + trace_id := resp.TraceID + rating := "5" + category := "informative" + message := "The response was concise yet thorough." + customMetadata := map[string]interface{}{ + "foo": "bar", + "pi": 3.14, + } + + app.RecordLLMFeedbackEvent(trace_id, rating, category, message, customMetadata) +} + func main() { // Start New Relic Application app, err := newrelic.NewApplication( newrelic.ConfigAppName("Basic OpenAI App"), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigDebugLogger(os.Stdout), // Enable AI Monitoring // NOTE - If High Security Mode is enabled, AI Monitoring will always be disabled newrelic.ConfigAIMonitoringEnabled(true), @@ -49,7 +63,7 @@ func main() { Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, - Content: "Say this is a test", + Content: "What is observability in software engineering?", }, }, Stream: true, @@ -59,6 +73,7 @@ func main() { stream, err := nropenai.NRCreateChatCompletionStream(client, ctx, req, app) if err != nil { + panic(err) } fmt.Printf("Stream response: ") @@ -77,6 +92,7 @@ func main() { fmt.Printf(response.Choices[0].Delta.Content) } stream.Close() + SendFeedback(app, *stream) // Shutdown Application app.Shutdown(5 * time.Second) } From 95cd5b8ea36fb14af7f21b5757acf9302e09f899 Mon Sep 17 00:00:00 2001 From: mirackara Date: Mon, 25 Mar 2024 15:12:53 -0500 Subject: [PATCH 23/38] token callback extra check --- v3/integrations/nropenai/nropenai.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index 0a1cf1434..36b262586 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -333,8 +333,10 @@ func NRCreateChatCompletionMessageStream(app *newrelic.Application, uuid uuid.UU ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(sw.model, sw.responseStr) - if contentCounted { - ChatCompletionMessageData["token_count"] = contentTokens + roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(sw.model, sw.role) + + if (contentCounted && roleCounted) && app.HasLLMTokenCountCallback() { + ChatCompletionMessageData["token_count"] = contentTokens + roleTokens } // If custom attributes are set, add them to the data @@ -376,7 +378,7 @@ func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic ChatCompletionMessageData["trace_id"] = traceID contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(req.Model, req.Messages[0].Content) - if contentCounted { + if contentCounted && app.HasLLMTokenCountCallback() { ChatCompletionMessageData["token_count"] = contentTokens } @@ -548,7 +550,8 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr // cast input as string input := GetInput(req.Input).(string) tokenCount, tokensCounted := app.InvokeLLMTokenCountCallback(string(resp.Model), input) - if tokensCounted { + + if tokensCounted && app.HasLLMTokenCountCallback() { EmbeddingsData["token_count"] = tokenCount } From 8bd6b0bb365b1f3d65e3ace9ace707864ef1b16a Mon Sep 17 00:00:00 2001 From: mirackara Date: Mon, 25 Mar 2024 15:47:13 -0500 Subject: [PATCH 24/38] Corrected token count issue and added token callback for streaming --- .../chatcompletionstreaming.go | 29 ++++++++++++++++- v3/integrations/nropenai/nropenai.go | 31 +++++++++---------- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go index 50f73ec13..f43ddba70 100644 --- a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go +++ b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go @@ -10,6 +10,7 @@ import ( "github.com/newrelic/go-agent/v3/integrations/nropenai" "github.com/newrelic/go-agent/v3/newrelic" + "github.com/pkoukk/tiktoken-go" openai "github.com/sashabaranov/go-openai" ) @@ -41,7 +42,33 @@ func main() { panic(err) } app.WaitForConnection(10 * time.Second) + // SetLLMTokenCountCallback allows for custom token counting, if left unset and if newrelic.ConfigAIMonitoringRecordContentEnabled() + // is disabled, no token counts will be reported + app.SetLLMTokenCountCallback(func(modelName string, content string) int { + var tokensPerMessage, tokensPerName int + switch modelName { + case "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613": + tokensPerMessage = 3 + tokensPerName = 1 + case "gpt-3.5-turbo-0301": + tokensPerMessage = 4 + tokensPerName = -1 + } + tkm, err := tiktoken.EncodingForModel(modelName) + if err != nil { + fmt.Println("error getting tokens", err) + return 0 + } + token := tkm.Encode(content, nil, nil) + totalTokens := len(token) + tokensPerMessage + tokensPerName + return totalTokens + }) // OpenAI Config - Additionally, NRDefaultAzureConfig(apiKey, baseURL string) can be used for Azure cfg := nropenai.NRDefaultConfig(os.Getenv("OPEN_AI_API_KEY")) @@ -57,7 +84,7 @@ func main() { // GPT Request req := openai.ChatCompletionRequest{ - Model: openai.GPT3Dot5Turbo, + Model: openai.GPT4, Temperature: 0.7, MaxTokens: 1500, Messages: []openai.ChatCompletionMessage{ diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index 36b262586..8c2b6c248 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -332,11 +332,15 @@ func NRCreateChatCompletionMessageStream(app *newrelic.Application, uuid uuid.UU ChatCompletionMessageData["ingest_source"] = "Go" ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID - contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(sw.model, sw.responseStr) - roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(sw.model, sw.role) - - if (contentCounted && roleCounted) && app.HasLLMTokenCountCallback() { - ChatCompletionMessageData["token_count"] = contentTokens + roleTokens + tmpMessage := openai.ChatCompletionMessage{ + Content: sw.responseStr, + Role: sw.role, + // Name is not provided in the stream response, so we don't include it in token counting + Name: "", + } + tokenCount, tokensCounted := TokenCountingHelper(app, tmpMessage, sw.model) + if tokensCounted { + ChatCompletionMessageData["token_count"] = tokenCount } // If custom attributes are set, add them to the data @@ -444,23 +448,16 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl } func TokenCountingHelper(app *newrelic.Application, message openai.ChatCompletionMessage, model string) (numTokens int, tokensCounted bool) { - contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(model, message.Content) roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(model, message.Role) - messageTokens, messageCounted := app.InvokeLLMTokenCountCallback(model, message.Name) - numTokens += contentTokens + roleTokens + messageTokens + var messageTokens int + if message.Name != "" { + messageTokens, _ = app.InvokeLLMTokenCountCallback(model, message.Name) - return numTokens, (contentCounted && roleCounted && messageCounted) -} - -func TokenCountingHelperStream(app *newrelic.Application, model string, content string, role string, messageName string) (numTokens int, tokensCounted bool) { - - contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(model, content) - roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(model, role) - messageTokens, messageCounted := app.InvokeLLMTokenCountCallback(model, messageName) + } numTokens += contentTokens + roleTokens + messageTokens - return numTokens, (contentCounted && roleCounted && messageCounted) + return numTokens, (contentCounted && roleCounted) } // NRCreateChatCompletion is a wrapper for the OpenAI CreateChatCompletion method. From addbe9477c6c4e55125824a0410f3894d46f3964 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Tue, 26 Mar 2024 17:08:05 -0700 Subject: [PATCH 25/38] changelog, bump version --- CHANGELOG.md | 31 +++++++++++++++++++++++++++++++ v3/newrelic/version.go | 2 +- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9b75e1f1..619b051cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,34 @@ +## 3.31.0 +### Added + * Integration packages to instrument AI model invocations (see below). + * New package nrawsbedrock v1.0.0 introduced to instrument calls to Amazon Bedrock Runtime Client API `InvokeModel` and `InvokeModelWithResponseStream` calls. + * New package nropenai v1.0.0 introduced to instrument OpenAI model invocations. + +### AI Monitoring Configuration +New configuration options are available specific to [AI monitoring](). These settings include: + * `AIMonitoring.Enabled`, configured via `ConfigAIMonitoring.Enabled(`_bool_`)` [default `false`] + * `AIMonitoring.Streaming.Enabled`, configured via `ConfigAIMonitoringStreamingEnabled(`_bool_`)` [default `true`] + * `AIMonitoring.Content.Enabled`, configured via `ConfigAIMonitoringContentEnabled(`_bool_`)` [default `true`] + +### AI Monitoring Public API Methods +Two new AI monitoring related public API methods have been added, as methods of the `newrelic.Application` value returned by `newrelic.NewApplication`: + * [app.RecordLLMFeedbackEvent](https://pkg.go.dev/github.com/newrelic/go-agent/v3/newrelic#Application.RecordLLMFeedbackEvent) + * [app.SetLLMTokenCountCallback](https://pkg.go.dev/github.com/newrelic/go-agent/v3/newrelic#Application.SetLLMTokenCountCallback) + +### AI Monitoring +New Relic AI monitoring is the industry’s first APM solution that provides end-to-end visibility for AI Large Language Model (LLM) applications. It enables end-to-end visibility into the key components of an AI LLM application. With AI monitoring, users can monitor, alert, and debug AI-powered applications for reliability, latency, performance, security and cost. AI monitoring also enables AI/LLM specific insights (metrics, events, logs and traces) which can easily integrate to build advanced guardrails for enterprise security, privacy and compliance. + +AI monitoring offers custom-built insights and tracing for the complete lifecycle of an LLM’s prompts and responses, from raw user input to repaired/polished responses. AI monitoring provides built-in integrations with popular LLMs and components of the AI development stack. This release provides instrumentation for [OpenAI](https://pkg.go.dev/github.com/newrelic/go-agent/v3/integrations/nropenai) +and [Bedrock](https://pkg.go.dev/github.com/newrelic/go-agent/v3/integrations/nrawsbedrock). + +When AI monitoring is enabled with `ConfigAIMonitoringEnabled(true)`, the agent will now capture AI LLM related data. This data will be visible under a new APM tab called AI Responses. See our [AI Monitoring documentation](https://docs.newrelic.com/docs/ai-monitoring/intro-to-ai-monitoring/) for more details. + +### Support statement +We use the latest version of the Go language. At minimum, you should be using no version of Go older than what is supported by the Go team themselves. +See the [Go agent EOL Policy](https://docs.newrelic.com/docs/apm/agents/go-agent/get-started/go-agent-eol-policy/) for details about supported versions of the Go agent and third-party components. + + + ## 3.30.0 ### Added * Updated the depencency on nrsecurityagent to 1.0.0. diff --git a/v3/newrelic/version.go b/v3/newrelic/version.go index 5e1658b1c..4a34de8eb 100644 --- a/v3/newrelic/version.go +++ b/v3/newrelic/version.go @@ -11,7 +11,7 @@ import ( const ( // Version is the full string version of this Go Agent. - Version = "3.30.0" + Version = "3.31.0" ) var ( From 3ecd852971067fc9b73aee0c1dd00f158e1fb7e0 Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:02:57 -0500 Subject: [PATCH 26/38] Openai bug squash (#884) * OpenAI Patches --- .../chatcompletionstreaming.go | 1 + v3/integrations/nropenai/nropenai.go | 452 +++++++++--------- v3/integrations/nropenai/nropenai_test.go | 147 +++--- 3 files changed, 296 insertions(+), 304 deletions(-) diff --git a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go index f43ddba70..4b0eb7265 100644 --- a/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go +++ b/v3/integrations/nropenai/examples/chatcompletionstreaming/chatcompletionstreaming.go @@ -34,6 +34,7 @@ func main() { app, err := newrelic.NewApplication( newrelic.ConfigAppName("Basic OpenAI App"), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), // Enable AI Monitoring // NOTE - If High Security Mode is enabled, AI Monitoring will always be disabled newrelic.ConfigAIMonitoringEnabled(true), diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index 8c2b6c248..da927b039 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -6,7 +6,6 @@ package nropenai import ( "context" "errors" - "fmt" "reflect" "runtime/debug" "strings" @@ -54,28 +53,48 @@ type OpenAIClient interface { // Wrapper for OpenAI Configuration type ConfigWrapper struct { - Config *openai.ClientConfig - LicenseKeyLastFour string + Config *openai.ClientConfig } // Wrapper for OpenAI Client with Custom Attributes that can be set for all LLM Events type ClientWrapper struct { - Client OpenAIClient - LicenseKeyLastFour string + Client OpenAIClient // Set of Custom Attributes that get tied to all LLM Events CustomAttributes map[string]interface{} } -func FormatAPIKey(apiKey string) string { - return "sk-" + apiKey[len(apiKey)-4:] +// Wrapper for ChatCompletionResponse that is returned from NRCreateChatCompletion. It also includes the TraceID of the transaction for linking a chat response with it's feedback +type ChatCompletionResponseWrapper struct { + ChatCompletionResponse openai.ChatCompletionResponse + TraceID string +} + +// Wrapper for ChatCompletionStream that is returned from NRCreateChatCompletionStream +// Contains attributes that get populated during the streaming process +type ChatCompletionStreamWrapper struct { + app *newrelic.Application + span *newrelic.Segment // active span + stream *openai.ChatCompletionStream + streamResp openai.ChatCompletionResponse + txn *newrelic.Transaction + cw *ClientWrapper + role string + model string + responseStr string + uuid string + finishReason string + StreamingData map[string]interface{} + isRoleAdded bool + TraceID string + isError bool + sequence int } // Default Config func NRDefaultConfig(authToken string) *ConfigWrapper { cfg := openai.DefaultConfig(authToken) return &ConfigWrapper{ - Config: &cfg, - LicenseKeyLastFour: FormatAPIKey(authToken), + Config: &cfg, } } @@ -83,8 +102,7 @@ func NRDefaultConfig(authToken string) *ConfigWrapper { func NRDefaultAzureConfig(apiKey, baseURL string) *ConfigWrapper { cfg := openai.DefaultAzureConfig(apiKey, baseURL) return &ConfigWrapper{ - Config: &cfg, - LicenseKeyLastFour: FormatAPIKey(apiKey), + Config: &cfg, } } @@ -92,8 +110,7 @@ func NRDefaultAzureConfig(apiKey, baseURL string) *ConfigWrapper { func NRNewClient(authToken string) *ClientWrapper { client := openai.NewClient(authToken) return &ClientWrapper{ - Client: client, - LicenseKeyLastFour: FormatAPIKey(authToken), + Client: client, } } @@ -101,8 +118,7 @@ func NRNewClient(authToken string) *ClientWrapper { func NRNewClientWithConfig(config *ConfigWrapper) *ClientWrapper { client := openai.NewClientWithConfig(*config.Config) return &ClientWrapper{ - Client: client, - LicenseKeyLastFour: config.LicenseKeyLastFour, + Client: client, } } @@ -141,28 +157,6 @@ func GetInput(any interface{}) any { } -// Wrapper for ChatCompletionResponse that is returned from NRCreateChatCompletion. It also includes the TraceID of the transaction for linking a chat response with it's feedback -type ChatCompletionResponseWrapper struct { - ChatCompletionResponse openai.ChatCompletionResponse - TraceID string -} - -// Wrapper for ChatCompletionStream that is returned from NRCreateChatCompletionStream -type ChatCompletionStreamWrapper struct { - app *newrelic.Application - stream *openai.ChatCompletionStream - streamResp openai.ChatCompletionResponse - responseStr string - uuid string - txn *newrelic.Transaction - cw *ClientWrapper - role string - model string - StreamingData map[string]interface{} - isRoleAdded bool - TraceID string -} - // Wrapper for Recv() method that calls the underlying stream's Recv() method func (w *ChatCompletionStreamWrapper) Recv() (openai.ChatCompletionStreamResponse, error) { response, err := w.stream.Recv() @@ -180,6 +174,11 @@ func (w *ChatCompletionStreamWrapper) Recv() (openai.ChatCompletionStreamRespons w.streamResp.Model = response.Model w.model = response.Model } + finishReason, finishReasonErr := response.Choices[0].FinishReason.MarshalJSON() + if finishReasonErr != nil { + w.isError = true + } + w.finishReason = string(finishReason) return response, nil @@ -187,9 +186,15 @@ func (w *ChatCompletionStreamWrapper) Recv() (openai.ChatCompletionStreamRespons func (w *ChatCompletionStreamWrapper) Close() { w.StreamingData["response.model"] = w.model - w.app.RecordCustomEvent("LlmChatCompletionSummary", w.StreamingData) + NRCreateChatCompletionMessageStream(w.app, uuid.MustParse(w.uuid), w, w.cw, w.sequence) + if w.isError { + w.StreamingData["error"] = true + } else { + w.StreamingData["response.choices.finish_reason"] = w.finishReason + } - NRCreateChatCompletionMessageStream(w.app, uuid.MustParse(w.uuid), w, w.cw) + w.span.End() + w.app.RecordCustomEvent("LlmChatCompletionSummary", w.StreamingData) w.txn.End() w.stream.Close() @@ -198,11 +203,15 @@ func (w *ChatCompletionStreamWrapper) Close() { // NRCreateChatCompletionSummary captures the request and response data for a chat completion request and records a custom event in New Relic. It also captures the completion messages // With a call to NRCreateChatCompletionMessage func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Application, cw *ClientWrapper, req openai.ChatCompletionRequest) ChatCompletionResponseWrapper { + // Start span + txn.AddAttribute("llm", true) + + chatCompletionSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") + // Track Total time taken for the chat completion or embedding call to complete in milliseconds + // Get App Config for setting App Name Attribute - appConfig, configErr := app.Config() - if !configErr { - appConfig.AppName = "Unknown" - } + appConfig, _ := app.Config() + uuid := uuid.New() spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID @@ -213,18 +222,12 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl reportStreamingDisabled() } } - // Start span - txn.AddAttribute("llm", true) - - chatCompletionSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") - // Track Total time taken for the chat completion or embedding call to complete in milliseconds start := time.Now() resp, err := cw.Client.CreateChatCompletion( context.Background(), req, ) duration := time.Since(start).Milliseconds() - chatCompletionSpan.End() if err != nil { ChatCompletionSummaryData["error"] = true // notice error with custom attributes @@ -237,7 +240,6 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl }) } - // ratelimitLimitTokensUsageBased, ratelimitResetTokensUsageBased, and ratelimitRemainingTokensUsageBased are not in the response // Request Headers ChatCompletionSummaryData["request.temperature"] = req.Temperature ChatCompletionSummaryData["request.max_tokens"] = req.MaxTokens @@ -277,21 +279,19 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl ChatCompletionSummaryData["id"] = uuid.String() ChatCompletionSummaryData["span_id"] = spanID ChatCompletionSummaryData["trace_id"] = traceID - ChatCompletionSummaryData["api_key_last_four_digits"] = cw.LicenseKeyLastFour - ChatCompletionSummaryData["vendor"] = "OpenAI" + ChatCompletionSummaryData["vendor"] = "openai" ChatCompletionSummaryData["ingest_source"] = "Go" - ChatCompletionSummaryData["appName"] = appConfig.AppName - // Record any custom attributes if they exist ChatCompletionSummaryData = AppendCustomAttributesToEvent(cw, ChatCompletionSummaryData) // Record Custom Event app.RecordCustomEvent("LlmChatCompletionSummary", ChatCompletionSummaryData) - - // Capture request message - NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) + // Capture request message, returns a sequence of the messages already sent in the request. We will use that during the response message counting + sequence := NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) // Capture completion messages - NRCreateChatCompletionMessage(txn, app, resp, uuid, cw) + NRCreateChatCompletionMessage(txn, app, resp, uuid, cw, sequence) + chatCompletionSpan.End() + txn.End() return ChatCompletionResponseWrapper{ @@ -300,115 +300,67 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl } } -func NRCreateChatCompletionMessageStream(app *newrelic.Application, uuid uuid.UUID, sw *ChatCompletionStreamWrapper, cw *ClientWrapper) { +// Captures initial request messages and records a custom event in New Relic for each message +func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic.Application, req openai.ChatCompletionRequest, inputuuid uuid.UUID, cw *ClientWrapper) int { + sequence := 0 + for i, message := range req.Messages { + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID - spanID := sw.txn.GetTraceMetadata().SpanID - traceID := sw.txn.GetTraceMetadata().TraceID + appCfg, _ := app.Config() + newUUID := uuid.New() + newID := newUUID.String() + integrationsupport.AddAgentAttribute(txn, "llm", "", true) - appCfg, configErr := app.Config() - if !configErr { - appCfg.AppName = "Unknown" - } - integrationsupport.AddAgentAttribute(sw.txn, "llm", "", true) - chatCompletionMessageSpan := sw.txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessageStream") + ChatCompletionMessageData := map[string]interface{}{} + // if the response doesn't have an ID, use the UUID from the summary + ChatCompletionMessageData["id"] = newID - ChatCompletionMessageData := map[string]interface{}{} - // if the response doesn't have an ID, use the UUID from the summary + // Response Data + ChatCompletionMessageData["response.model"] = req.Model - ChatCompletionMessageData["id"] = sw.streamResp.ID + if appCfg.AIMonitoring.RecordContent.Enabled { + ChatCompletionMessageData["content"] = message.Content + } - // Response Data - ChatCompletionMessageData["request.model"] = sw.model + ChatCompletionMessageData["role"] = message.Role + ChatCompletionMessageData["completion_id"] = inputuuid.String() - if appCfg.AIMonitoring.RecordContent.Enabled { - ChatCompletionMessageData["content"] = sw.responseStr - } + // New Relic Attributes + ChatCompletionMessageData["sequence"] = i + ChatCompletionMessageData["vendor"] = "openai" + ChatCompletionMessageData["ingest_source"] = "Go" + ChatCompletionMessageData["span_id"] = spanID + ChatCompletionMessageData["trace_id"] = traceID + contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(req.Model, message.Content) - ChatCompletionMessageData["role"] = sw.role + if contentCounted && app.HasLLMTokenCountCallback() { + ChatCompletionMessageData["token_count"] = contentTokens + } - // New Relic Attributes - ChatCompletionMessageData["sequence"] = 1 - ChatCompletionMessageData["vendor"] = "OpenAI" - ChatCompletionMessageData["ingest_source"] = "Go" - ChatCompletionMessageData["span_id"] = spanID - ChatCompletionMessageData["trace_id"] = traceID - tmpMessage := openai.ChatCompletionMessage{ - Content: sw.responseStr, - Role: sw.role, - // Name is not provided in the stream response, so we don't include it in token counting - Name: "", - } - tokenCount, tokensCounted := TokenCountingHelper(app, tmpMessage, sw.model) - if tokensCounted { - ChatCompletionMessageData["token_count"] = tokenCount + // If custom attributes are set, add them to the data + ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) + // Record Custom Event for each message + app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + sequence = i } - - // If custom attributes are set, add them to the data - ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) - chatCompletionMessageSpan.End() - // Record Custom Event for each message - app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + return sequence } -func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic.Application, req openai.ChatCompletionRequest, uuid uuid.UUID, cw *ClientWrapper) { +// NRCreateChatCompletionMessage captures the completion response messages and records a custom event in New Relic for each message +func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper, sequence int) { spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID - appCfg, configErr := app.Config() - if !configErr { - appCfg.AppName = "Unknown" - } - integrationsupport.AddAgentAttribute(txn, "llm", "", true) - chatCompletionMessageSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessage") - - ChatCompletionMessageData := map[string]interface{}{} - // if the response doesn't have an ID, use the UUID from the summary - ChatCompletionMessageData["id"] = uuid.String() + "-0" - - // Response Data - ChatCompletionMessageData["response.model"] = req.Model - - if appCfg.AIMonitoring.RecordContent.Enabled { - ChatCompletionMessageData["content"] = req.Messages[0].Content - } - - ChatCompletionMessageData["role"] = req.Messages[0].Role - - // New Relic Attributes - ChatCompletionMessageData["sequence"] = 0 - ChatCompletionMessageData["vendor"] = "OpenAI" - ChatCompletionMessageData["ingest_source"] = "Go" - ChatCompletionMessageData["span_id"] = spanID - ChatCompletionMessageData["trace_id"] = traceID - contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(req.Model, req.Messages[0].Content) - - if contentCounted && app.HasLLMTokenCountCallback() { - ChatCompletionMessageData["token_count"] = contentTokens - } - - // If custom attributes are set, add them to the data - ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) - chatCompletionMessageSpan.End() - // Record Custom Event for each message - app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) + appCfg, _ := app.Config() -} - -// NRCreateChatCompletionMessage captures the completion messages and records a custom event in New Relic for each message -func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper) { - spanID := txn.GetTraceMetadata().SpanID - traceID := txn.GetTraceMetadata().TraceID - appCfg, configErr := app.Config() - if !configErr { - appCfg.AppName = "Unknown" - } integrationsupport.AddAgentAttribute(txn, "llm", "", true) - chatCompletionMessageSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletionMessage") + sequence += 1 for i, choice := range resp.Choices { ChatCompletionMessageData := map[string]interface{}{} // if the response doesn't have an ID, use the UUID from the summary if resp.ID == "" { - ChatCompletionMessageData["id"] = uuid.String() + "-" + fmt.Sprint(i+1) + ChatCompletionMessageData["id"] = uuid.String() } else { ChatCompletionMessageData["id"] = resp.ID } @@ -420,14 +372,16 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl ChatCompletionMessageData["content"] = choice.Message.Content } + ChatCompletionMessageData["completion_id"] = uuid.String() ChatCompletionMessageData["role"] = choice.Message.Role // Request Headers ChatCompletionMessageData["request_id"] = resp.Header().Get("X-Request-Id") // New Relic Attributes - ChatCompletionMessageData["sequence"] = i + 1 - ChatCompletionMessageData["vendor"] = "OpenAI" + ChatCompletionMessageData["is_response"] = true + ChatCompletionMessageData["sequence"] = sequence + i + ChatCompletionMessageData["vendor"] = "openai" ChatCompletionMessageData["ingest_source"] = "Go" ChatCompletionMessageData["span_id"] = spanID ChatCompletionMessageData["trace_id"] = traceID @@ -443,10 +397,59 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) } +} + +func NRCreateChatCompletionMessageStream(app *newrelic.Application, uuid uuid.UUID, sw *ChatCompletionStreamWrapper, cw *ClientWrapper, sequence int) { + + spanID := sw.txn.GetTraceMetadata().SpanID + traceID := sw.txn.GetTraceMetadata().TraceID + + appCfg, _ := app.Config() + + integrationsupport.AddAgentAttribute(sw.txn, "llm", "", true) + + ChatCompletionMessageData := map[string]interface{}{} + // if the response doesn't have an ID, use the UUID from the summary + + ChatCompletionMessageData["id"] = sw.streamResp.ID + + // Response Data + ChatCompletionMessageData["request.model"] = sw.model + + if appCfg.AIMonitoring.RecordContent.Enabled { + ChatCompletionMessageData["content"] = sw.responseStr + } + + ChatCompletionMessageData["role"] = sw.role + ChatCompletionMessageData["is_response"] = true + + // New Relic Attributes + ChatCompletionMessageData["sequence"] = sequence + 1 + ChatCompletionMessageData["vendor"] = "openai" + ChatCompletionMessageData["ingest_source"] = "Go" + ChatCompletionMessageData["completion_id"] = uuid.String() + ChatCompletionMessageData["span_id"] = spanID + ChatCompletionMessageData["trace_id"] = traceID + tmpMessage := openai.ChatCompletionMessage{ + Content: sw.responseStr, + Role: sw.role, + // Name is not provided in the stream response, so we don't include it in token counting + Name: "", + } + tokenCount, tokensCounted := TokenCountingHelper(app, tmpMessage, sw.model) + if tokensCounted { + ChatCompletionMessageData["token_count"] = tokenCount + } + + // If custom attributes are set, add them to the data + ChatCompletionMessageData = AppendCustomAttributesToEvent(cw, ChatCompletionMessageData) + // Record Custom Event for each message + app.RecordCustomEvent("LlmChatCompletionMessage", ChatCompletionMessageData) - chatCompletionMessageSpan.End() } +// Calculates tokens using the LLmTokenCountCallback +// In order to calculate total tokens of a message, we need to factor in the Content, Role, and Name (if it exists) func TokenCountingHelper(app *newrelic.Application, message openai.ChatCompletionMessage, model string) (numTokens int, tokensCounted bool) { contentTokens, contentCounted := app.InvokeLLMTokenCountCallback(model, message.Content) roleTokens, roleCounted := app.InvokeLLMTokenCountCallback(model, message.Role) @@ -460,13 +463,79 @@ func TokenCountingHelper(app *newrelic.Application, message openai.ChatCompletio return numTokens, (contentCounted && roleCounted) } +func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req openai.ChatCompletionRequest, app *newrelic.Application) (*ChatCompletionStreamWrapper, error) { + txn := app.StartTransaction("OpenAIChatCompletionStream") + + config, _ := app.Config() + + if !config.AIMonitoring.Streaming.Enabled { + if reportStreamingDisabled != nil { + reportStreamingDisabled() + } + } + // If AI Monitoring OR AIMonitoring.Streaming is disabled, do not start a transaction but still perform the request + if !config.AIMonitoring.Enabled || !config.AIMonitoring.Streaming.Enabled { + stream, err := cw.Client.CreateChatCompletionStream(ctx, req) + if err != nil { + return &ChatCompletionStreamWrapper{stream: stream}, err + } + return &ChatCompletionStreamWrapper{stream: stream}, errAIMonitoringDisabled + } + + streamSpan := txn.StartSegment("Llm/completion/OpenAI/CreateChatCompletion") + + spanID := txn.GetTraceMetadata().SpanID + traceID := txn.GetTraceMetadata().TraceID + StreamingData := map[string]interface{}{} + uuid := uuid.New() + integrationsupport.AddAgentAttribute(txn, "llm", "", true) + start := time.Now() + stream, err := cw.Client.CreateChatCompletionStream(ctx, req) + duration := time.Since(start).Milliseconds() + + if err != nil { + StreamingData["error"] = true + txn.NoticeError(newrelic.Error{ + Message: err.Error(), + Class: "OpenAIError", + }) + txn.End() + return nil, err + } + + // Request Data + StreamingData["request.model"] = string(req.Model) + StreamingData["request.temperature"] = req.Temperature + StreamingData["request.max_tokens"] = req.MaxTokens + StreamingData["model"] = req.Model + + StreamingData["duration"] = duration + + // New Relic Attributes + StreamingData["id"] = uuid.String() + StreamingData["span_id"] = spanID + StreamingData["trace_id"] = traceID + StreamingData["vendor"] = "openai" + StreamingData["ingest_source"] = "Go" + + sequence := NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) + return &ChatCompletionStreamWrapper{ + app: app, + stream: stream, + txn: txn, + span: streamSpan, + uuid: uuid.String(), + cw: cw, + StreamingData: StreamingData, + TraceID: traceID, + sequence: sequence}, nil + +} + // NRCreateChatCompletion is a wrapper for the OpenAI CreateChatCompletion method. // If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateChatCompletion method and return the response with no New Relic instrumentation func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, app *newrelic.Application) (ChatCompletionResponseWrapper, error) { - config, cfgErr := app.Config() - if !cfgErr { - config.AppName = "Unknown" - } + config, _ := app.Config() resp := ChatCompletionResponseWrapper{} // If AI Monitoring is disabled, do not start a transaction but still perform the request @@ -489,10 +558,7 @@ func NRCreateChatCompletion(cw *ClientWrapper, req openai.ChatCompletionRequest, // NRCreateEmbedding is a wrapper for the OpenAI CreateEmbedding method. // If AI Monitoring is disabled, the wrapped function will still call the OpenAI CreateEmbedding method and return the response with no New Relic instrumentation func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newrelic.Application) (openai.EmbeddingResponse, error) { - config, cfgErr := app.Config() - if !cfgErr { - config.AppName = "Unknown" - } + config, _ := app.Config() resp := openai.EmbeddingResponse{} @@ -508,6 +574,7 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr // Start NR Transaction txn := app.StartTransaction("OpenAIEmbedding") + embeddingSpan := txn.StartSegment("Llm/embedding/OpenAI/CreateEmbedding") spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID @@ -515,7 +582,6 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr uuid := uuid.New() integrationsupport.AddAgentAttribute(txn, "llm", "", true) - embeddingSpan := txn.StartSegment("Llm/embedding/OpenAI/CreateEmbedding") start := time.Now() resp, err := cw.Client.CreateEmbeddings(context.Background(), req) duration := time.Since(start).Milliseconds() @@ -538,7 +604,6 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr } EmbeddingsData["request_id"] = resp.Header().Get("X-Request-Id") - EmbeddingsData["api_key_last_four_digits"] = cw.LicenseKeyLastFour EmbeddingsData["request.model"] = string(req.Model) EmbeddingsData["duration"] = duration @@ -566,7 +631,7 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr // New Relic Attributes EmbeddingsData["id"] = uuid.String() - EmbeddingsData["vendor"] = "OpenAI" + EmbeddingsData["vendor"] = "openai" EmbeddingsData["ingest_source"] = "Go" EmbeddingsData["span_id"] = spanID EmbeddingsData["trace_id"] = traceID @@ -575,68 +640,3 @@ func NRCreateEmbedding(cw *ClientWrapper, req openai.EmbeddingRequest, app *newr txn.End() return resp, nil } - -func NRCreateChatCompletionStream(cw *ClientWrapper, ctx context.Context, req openai.ChatCompletionRequest, app *newrelic.Application) (*ChatCompletionStreamWrapper, error) { - config, cfgErr := app.Config() - if !cfgErr { - config.AppName = "Unknown" - } - if !config.AIMonitoring.Streaming.Enabled { - if reportStreamingDisabled != nil { - reportStreamingDisabled() - } - } - // If AI Monitoring OR AIMonitoring.Streaming is disabled, do not start a transaction but still perform the request - if !config.AIMonitoring.Enabled || !config.AIMonitoring.Streaming.Enabled { - stream, err := cw.Client.CreateChatCompletionStream(ctx, req) - if err != nil { - - return &ChatCompletionStreamWrapper{stream: stream}, err - } - return &ChatCompletionStreamWrapper{stream: stream}, errAIMonitoringDisabled - } - - txn := app.StartTransaction("OpenAIChatCompletionStream") - spanID := txn.GetTraceMetadata().SpanID - traceID := txn.GetTraceMetadata().TraceID - StreamingData := map[string]interface{}{} - uuid := uuid.New() - integrationsupport.AddAgentAttribute(txn, "llm", "", true) - streamSpan := txn.StartSegment("Llm/completion/OpenAI/stream") - start := time.Now() - stream, err := cw.Client.CreateChatCompletionStream(ctx, req) - duration := time.Since(start).Milliseconds() - streamSpan.End() - - if err != nil { - StreamingData["error"] = true - txn.NoticeError(newrelic.Error{ - Message: err.Error(), - Class: "OpenAIError", - }) - txn.End() - return nil, err - } - - // Request Data - StreamingData["api_key_last_four_digits"] = cw.LicenseKeyLastFour - StreamingData["request.model"] = string(req.Model) - StreamingData["request.temperature"] = req.Temperature - StreamingData["request.max_tokens"] = req.MaxTokens - StreamingData["model"] = req.Model - - StreamingData["duration"] = duration - - // New Relic Attributes - StreamingData["id"] = uuid.String() - StreamingData["span_id"] = spanID - StreamingData["trace_id"] = traceID - StreamingData["api_key_last_four_digits"] = cw.LicenseKeyLastFour - StreamingData["vendor"] = "OpenAI" - StreamingData["ingest_source"] = "Go" - StreamingData["appName"] = config.AppName - - NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) - return &ChatCompletionStreamWrapper{app: app, stream: stream, txn: txn, uuid: uuid.String(), cw: cw, StreamingData: StreamingData, TraceID: traceID}, nil - -} diff --git a/v3/integrations/nropenai/nropenai_test.go b/v3/integrations/nropenai/nropenai_test.go index 76c762b6a..e845e9892 100644 --- a/v3/integrations/nropenai/nropenai_test.go +++ b/v3/integrations/nropenai/nropenai_test.go @@ -6,6 +6,7 @@ import ( "net/http" "testing" + "github.com/google/uuid" "github.com/newrelic/go-agent/v3/internal" "github.com/newrelic/go-agent/v3/internal/integrationsupport" "github.com/newrelic/go-agent/v3/newrelic" @@ -113,21 +114,10 @@ func (m *MockOpenAIClient) CreateChatCompletionStream(ctx context.Context, reque return m.MockCreateChatCompletionStream, m.MockCreateChatCompletionErr } -func TestFormatAPIKey(t *testing.T) { - dummyAPIKey := "sk-12345678900abcdefghijklmnop" - formattedKey := FormatAPIKey(dummyAPIKey) - if formattedKey != "sk-mnop" { - t.Errorf("Formatted API key is incorrect: expected: %s actual: %s", "sk-mnop", formattedKey) - - } -} func TestDefaultConfig(t *testing.T) { dummyAPIKey := "sk-12345678900abcdefghijklmnop" cfg := NRDefaultConfig(dummyAPIKey) // Default Values - if cfg.LicenseKeyLastFour != "sk-mnop" { - t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", cfg.LicenseKeyLastFour) - } if cfg.Config.OrgID != "" { t.Errorf("OrgID is incorrect: expected: %s actual: %s", "", cfg.Config.OrgID) } @@ -142,9 +132,6 @@ func TestDefaultConfigAzure(t *testing.T) { baseURL := "https://azure-base-url.com" cfg := NRDefaultAzureConfig(dummyAPIKey, baseURL) // Default Values - if cfg.LicenseKeyLastFour != "sk-mnop" { - t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", cfg.LicenseKeyLastFour) - } if cfg.Config.BaseURL != baseURL { t.Errorf("baseURL is incorrect: expected: %s actual: %s", baseURL, cfg.Config.BaseURL) } @@ -154,38 +141,6 @@ func TestDefaultConfigAzure(t *testing.T) { } } -func TestNRNewClient(t *testing.T) { - dummyAPIKey := "sk-12345678900abcdefghijklmnop" - client := NRNewClient(dummyAPIKey) - if client.LicenseKeyLastFour != "sk-mnop" { - t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", client.LicenseKeyLastFour) - } -} - -func TestNRNewClientWithConfigs(t *testing.T) { - // Regular Config - dummyAPIKey := "sk-12345678900abcdefghijklmnop" - cfg := NRDefaultConfig(dummyAPIKey) - client := NRNewClientWithConfig(cfg) - if client.LicenseKeyLastFour != "sk-mnop" { - t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", client.LicenseKeyLastFour) - } - // Azure Config - baseURL := "https://azure-base-url.com" - azureCfg := NRDefaultAzureConfig(dummyAPIKey, baseURL) - azureClient := NRNewClientWithConfig(azureCfg) - if azureClient.LicenseKeyLastFour != "sk-mnop" { - t.Errorf("API Key is incorrect: expected: %s actual: %s", "sk-mnop", azureClient.LicenseKeyLastFour) - } - if azureCfg.Config.BaseURL != baseURL { - t.Errorf("baseURL is incorrect: expected: %s actual: %s", baseURL, azureCfg.Config.BaseURL) - } - // Default Value set by openai package - if azureCfg.Config.APIType != openai.APITypeAzure { - t.Errorf("API Type is incorrect: expected: %s actual: %s", openai.APITypeAzure, azureCfg.Config.APIType) - } -} - func TestAddCustomAttributes(t *testing.T) { client := NRNewClient("sk-12345678900abcdefghijklmnop") client.AddCustomAttributes(map[string]interface{}{ @@ -208,8 +163,7 @@ func TestAddCustomAttributesIncorrectPrefix(t *testing.T) { func TestNRCreateChatCompletion(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, @@ -238,16 +192,14 @@ func TestNRCreateChatCompletion(t *testing.T) { }, UserAttributes: map[string]interface{}{ "ingest_source": "Go", - "vendor": "OpenAI", + "vendor": "openai", "model": "gpt-3.5-turbo", "id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, - "appName": "my app", "duration": 0, "response.choices.finish_reason": internal.MatchAnything, "request.temperature": 0, - "api_key_last_four_digits": "sk-mnop", "request_id": "chatcmpl-123", "request.model": "gpt-3.5-turbo", "request.max_tokens": 150, @@ -269,13 +221,14 @@ func TestNRCreateChatCompletion(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ + "completion_id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "id": internal.MatchAnything, "sequence": 0, "role": "user", "content": "What is 8*5", - "vendor": "OpenAI", + "vendor": "openai", "ingest_source": "Go", "response.model": "gpt-3.5-turbo", }, @@ -289,13 +242,15 @@ func TestNRCreateChatCompletion(t *testing.T) { UserAttributes: map[string]interface{}{ "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, + "completion_id": internal.MatchAnything, "id": "chatcmpl-123", "sequence": 1, "role": "assistant", "content": "\n\nHello there, how may I assist you today?", "request_id": "chatcmpl-123", - "vendor": "OpenAI", + "vendor": "openai", "ingest_source": "Go", + "is_response": true, "response.model": "gpt-3.5-turbo", }, AgentAttributes: map[string]interface{}{}, @@ -307,8 +262,7 @@ func TestNRCreateChatCompletion(t *testing.T) { func TestNRCreateChatCompletionAIMonitoringNotEnabled(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, @@ -337,8 +291,7 @@ func TestNRCreateChatCompletionAIMonitoringNotEnabled(t *testing.T) { func TestNRCreateChatCompletionError(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, @@ -365,15 +318,13 @@ func TestNRCreateChatCompletionError(t *testing.T) { UserAttributes: map[string]interface{}{ "error": true, "ingest_source": "Go", - "vendor": "OpenAI", + "vendor": "openai", "model": "gpt-3.5-turbo", "id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, - "appName": "my app", "duration": 0, "request.temperature": 0, - "api_key_last_four_digits": "sk-mnop", "request_id": "", "request.model": "gpt-3.5-turbo", "request.max_tokens": 150, @@ -395,8 +346,9 @@ func TestNRCreateChatCompletionError(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ + "completion_id": internal.MatchAnything, "ingest_source": "Go", - "vendor": "OpenAI", + "vendor": "openai", "id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, @@ -429,8 +381,7 @@ func TestNRCreateChatCompletionError(t *testing.T) { func TestNRCreateEmbedding(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } embeddingReq := openai.EmbeddingRequest{ Input: []string{ @@ -455,13 +406,12 @@ func TestNRCreateEmbedding(t *testing.T) { }, UserAttributes: map[string]interface{}{ "ingest_source": "Go", - "vendor": "OpenAI", + "vendor": "openai", "id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "duration": 0, "request_id": "chatcmpl-123", - "api_key_last_four_digits": "sk-mnop", "request.model": "text-embedding-ada-002", "response.headers.llmVersion": "2020-10-01", "response.organization": "user-123", @@ -482,8 +432,7 @@ func TestNRCreateEmbedding(t *testing.T) { func TestNRCreateEmbeddingAIMonitoringNotEnabled(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } embeddingReq := openai.EmbeddingRequest{ Input: []string{ @@ -510,8 +459,7 @@ func TestNRCreateEmbeddingAIMonitoringNotEnabled(t *testing.T) { func TestNRCreateEmbeddingError(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } embeddingReq := openai.EmbeddingRequest{ Input: "testError", @@ -534,12 +482,11 @@ func TestNRCreateEmbeddingError(t *testing.T) { }, UserAttributes: map[string]interface{}{ "ingest_source": "Go", - "vendor": "OpenAI", + "vendor": "openai", "id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "duration": 0, - "api_key_last_four_digits": "sk-mnop", "request_id": "chatcmpl-123", "request.model": "text-embedding-ada-002", "response.headers.llmVersion": "2020-10-01", @@ -575,11 +522,56 @@ func TestNRCreateEmbeddingError(t *testing.T) { }}) } +func TestNRCreateChatCompletionMessageStream(t *testing.T) { + mockStreamWrapper := ChatCompletionStreamWrapper{} + mockClient := &MockOpenAIClient{} + cw := &ClientWrapper{ + Client: mockClient, + } + + app := integrationsupport.NewTestApp(nil, newrelic.ConfigAIMonitoringEnabled(true)) + txn := app.StartTransaction("NRCreateChatCompletionMessageStream") + uuid := uuid.New() + mockStreamWrapper.txn = txn + mockStreamWrapper.finishReason = "stop" + mockStreamWrapper.uuid = uuid.String() + mockStreamWrapper.isError = false + mockStreamWrapper.responseStr = "Hello there, how may I assist you today?" + mockStreamWrapper.role = openai.ChatMessageRoleAssistant + mockStreamWrapper.model = "gpt-3.5-turbo" + mockStreamWrapper.sequence = 1 + + NRCreateChatCompletionMessageStream(app.Application, uuid, &mockStreamWrapper, cw, 1) + txn.End() + + app.ExpectCustomEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "type": "LlmChatCompletionMessage", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "completion_id": internal.MatchAnything, + "trace_id": internal.MatchAnything, + "span_id": internal.MatchAnything, + "id": internal.MatchAnything, + "sequence": 2, + "role": "assistant", + "content": "Hello there, how may I assist you today?", + "vendor": "openai", + "ingest_source": "Go", + "request.model": "gpt-3.5-turbo", + "is_response": true, + }, + AgentAttributes: map[string]interface{}{}, + }, + }) + +} func TestNRCreateStream(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, @@ -605,13 +597,14 @@ func TestNRCreateStream(t *testing.T) { "timestamp": internal.MatchAnything, }, UserAttributes: map[string]interface{}{ + "completion_id": internal.MatchAnything, "trace_id": internal.MatchAnything, "span_id": internal.MatchAnything, "id": internal.MatchAnything, "sequence": 0, "role": "user", "content": "Say this is a test", - "vendor": "OpenAI", + "vendor": "openai", "ingest_source": "Go", "response.model": "gpt-3.5-turbo", }, @@ -623,8 +616,7 @@ func TestNRCreateStream(t *testing.T) { func TestNRCreateStreamAIMonitoringNotEnabled(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, @@ -651,8 +643,7 @@ func TestNRCreateStreamAIMonitoringNotEnabled(t *testing.T) { func TestNRCreateStreamError(t *testing.T) { mockClient := &MockOpenAIClient{} cw := &ClientWrapper{ - Client: mockClient, - LicenseKeyLastFour: "sk-mnop", + Client: mockClient, } req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, From 171a65bc8dd060801e6a98986d37ed8809bfb588 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 27 Mar 2024 10:44:37 -0700 Subject: [PATCH 27/38] updated release notes --- CHANGELOG.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 619b051cc..472b040db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,14 @@ ## 3.31.0 ### Added * Integration packages to instrument AI model invocations (see below). - * New package nrawsbedrock v1.0.0 introduced to instrument calls to Amazon Bedrock Runtime Client API `InvokeModel` and `InvokeModelWithResponseStream` calls. - * New package nropenai v1.0.0 introduced to instrument OpenAI model invocations. + * New package nrawsbedrock v1.0.0 introduced to instrument calls to Amazon Bedrock Runtime Client API `InvokeModel` and `InvokeModelWithResponseStream` calls. Also provides a simple one-step method which invokes stream invocations and harvests the response stream data for you. + * New package nropenai v1.0.0 introduced to instrument calls to OpenAI using `NRCreateChatCompletion`, `NRCreateChatCompletionStream`, and `NRCreateEmbedding` calls. + +### Fixed + * `.Ignore` was not ignoring transaction. Fixes [Issue #845](https://github.com/newrelic/go-agent/issues/845). + * Added nil error check in wrap function. Fixes [Issue #862](https://github.com/newrelic/go-agent/issues/862). + * `WrapBackgroundCore` background logger was not sending logs to New Relic. Fixes [Issue #859](https://github.com/newrelic/go-agent/issues/859). + * Corrected pgx5 integration example which caused a race condition. Thanks to @WillAbides! Fixes [Issue #855](https://github.com/newrelic/go-agent/issues/855). ### AI Monitoring Configuration New configuration options are available specific to [AI monitoring](). These settings include: From cde46de104c77ed91194c7852b13a3d52b94f357 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 27 Mar 2024 10:46:32 -0700 Subject: [PATCH 28/38] updated release notes --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 472b040db..8253bde68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ * Corrected pgx5 integration example which caused a race condition. Thanks to @WillAbides! Fixes [Issue #855](https://github.com/newrelic/go-agent/issues/855). ### AI Monitoring Configuration -New configuration options are available specific to [AI monitoring](). These settings include: +New configuration options are available specific to AI monitoring. These settings include: * `AIMonitoring.Enabled`, configured via `ConfigAIMonitoring.Enabled(`_bool_`)` [default `false`] * `AIMonitoring.Streaming.Enabled`, configured via `ConfigAIMonitoringStreamingEnabled(`_bool_`)` [default `true`] * `AIMonitoring.Content.Enabled`, configured via `ConfigAIMonitoringContentEnabled(`_bool_`)` [default `true`] From 2fc1cc8c64c14cb79e4ac1c590767281759bf797 Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 27 Mar 2024 10:52:07 -0700 Subject: [PATCH 29/38] updated release notes --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8253bde68..434c3e13d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,10 @@ * Added nil error check in wrap function. Fixes [Issue #862](https://github.com/newrelic/go-agent/issues/862). * `WrapBackgroundCore` background logger was not sending logs to New Relic. Fixes [Issue #859](https://github.com/newrelic/go-agent/issues/859). * Corrected pgx5 integration example which caused a race condition. Thanks to @WillAbides! Fixes [Issue #855](https://github.com/newrelic/go-agent/issues/855). + * Updated third-party library versions due to reported security or other supportability issues: + * `github.com/jackc/pgx/v5` to 5.5.4 in `nrpgx5` integration + * `google.gopang.org/protobuf` to 1.33.0 in `nrmicro` and `nrgrpc` integrations + * `github.com/jackc/pgx/v4` to 4.18.2 in `nrpgx` integration ### AI Monitoring Configuration New configuration options are available specific to AI monitoring. These settings include: From 8bfcbfc7fd0f09f4952707c0758e16f5a31c0f27 Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Wed, 27 Mar 2024 13:17:36 -0500 Subject: [PATCH 30/38] Add request model to chat completion message (#885) --- v3/integrations/nropenai/nropenai.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index da927b039..aed9902d9 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -289,7 +289,7 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl // Capture request message, returns a sequence of the messages already sent in the request. We will use that during the response message counting sequence := NRCreateChatCompletionMessageInput(txn, app, req, uuid, cw) // Capture completion messages - NRCreateChatCompletionMessage(txn, app, resp, uuid, cw, sequence) + NRCreateChatCompletionMessage(txn, app, resp, uuid, cw, sequence, req) chatCompletionSpan.End() txn.End() @@ -349,7 +349,7 @@ func NRCreateChatCompletionMessageInput(txn *newrelic.Transaction, app *newrelic } // NRCreateChatCompletionMessage captures the completion response messages and records a custom event in New Relic for each message -func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper, sequence int) { +func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Application, resp openai.ChatCompletionResponse, uuid uuid.UUID, cw *ClientWrapper, sequence int, req openai.ChatCompletionRequest) { spanID := txn.GetTraceMetadata().SpanID traceID := txn.GetTraceMetadata().TraceID appCfg, _ := app.Config() @@ -365,6 +365,9 @@ func NRCreateChatCompletionMessage(txn *newrelic.Transaction, app *newrelic.Appl ChatCompletionMessageData["id"] = resp.ID } + // Request Data + ChatCompletionMessageData["request.model"] = req.Model + // Response Data ChatCompletionMessageData["response.model"] = resp.Model From 49c08a8dd0f914b8297a912b7890db5ce0009027 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:05:47 -0500 Subject: [PATCH 31/38] Bump github.com/jackc/pgx/v4 in /v3/integrations/nrpgx (#866) Bumps [github.com/jackc/pgx/v4](https://github.com/jackc/pgx) from 4.13.0 to 4.18.2. - [Changelog](https://github.com/jackc/pgx/blob/v4.18.2/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v4.13.0...v4.18.2) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v4 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: Steve Willoughby <76975199+nr-swilloughby@users.noreply.github.com> Co-authored-by: Emilio Garcia Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- v3/integrations/nrpgx/go.mod | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/v3/integrations/nrpgx/go.mod b/v3/integrations/nrpgx/go.mod index 9bf8b686a..3528d3478 100644 --- a/v3/integrations/nrpgx/go.mod +++ b/v3/integrations/nrpgx/go.mod @@ -4,9 +4,28 @@ go 1.19 require ( github.com/jackc/pgx v3.6.2+incompatible - github.com/jackc/pgx/v4 v4.13.0 + github.com/jackc/pgx/v4 v4.18.2 github.com/newrelic/go-agent/v3 v3.30.0 ) +require ( + github.com/golang/protobuf v1.5.3 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/pkg/errors v0.8.1 // indirect + golang.org/x/crypto v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) replace github.com/newrelic/go-agent/v3 => ../.. From dbc2a312b212034e2cf0cd70222e121a9b45bf00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:06:00 -0500 Subject: [PATCH 32/38] Bump github.com/jackc/pgx/v5 from 5.0.3 to 5.5.4 in /v3/integrations/nrpgx5 (#870) * Bump github.com/jackc/pgx/v5 in /v3/integrations/nrpgx5 Bumps [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) from 5.0.3 to 5.5.4. - [Changelog](https://github.com/jackc/pgx/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v5.0.3...v5.5.4) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v5 dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Removed Indirect dependancies --------- Signed-off-by: dependabot[bot] Co-authored-by: Steve Willoughby <76975199+nr-swilloughby@users.noreply.github.com> Co-authored-by: Emilio Garcia Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Mirac Kara <55501260+mirackara@users.noreply.github.com> --- v3/integrations/nrpgx5/go.mod | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/v3/integrations/nrpgx5/go.mod b/v3/integrations/nrpgx5/go.mod index 554db5aaa..b740e3455 100644 --- a/v3/integrations/nrpgx5/go.mod +++ b/v3/integrations/nrpgx5/go.mod @@ -4,10 +4,9 @@ go 1.19 require ( github.com/egon12/pgsnap v0.0.0-20221022154027-2847f0124ed8 - github.com/jackc/pgx/v5 v5.0.3 + github.com/jackc/pgx/v5 v5.5.4 github.com/newrelic/go-agent/v3 v3.30.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 ) - replace github.com/newrelic/go-agent/v3 => ../.. From 819313c1b642ff1804e84734aa8e8de29c4a7868 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:06:13 -0500 Subject: [PATCH 33/38] Bump google.golang.org/protobuf in /v3/integrations/nrgrpc (#868) Bumps google.golang.org/protobuf from 1.30.0 to 1.33.0. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: Steve Willoughby <76975199+nr-swilloughby@users.noreply.github.com> Co-authored-by: Emilio Garcia Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- v3/integrations/nrgrpc/go.mod | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/v3/integrations/nrgrpc/go.mod b/v3/integrations/nrgrpc/go.mod index e6f3e5c75..e36c53b53 100644 --- a/v3/integrations/nrgrpc/go.mod +++ b/v3/integrations/nrgrpc/go.mod @@ -10,9 +10,21 @@ require ( github.com/newrelic/go-agent/v3/integrations/nrsecurityagent v1.1.0 // v1.15.0 is the earliest version of grpc using modules. google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.30.0 + google.golang.org/protobuf v1.33.0 ) +require ( + github.com/dlclark/regexp2 v1.9.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/k2io/hookingo v1.0.5 // indirect + github.com/newrelic/csec-go-agent v1.0.0 // indirect + golang.org/x/arch v0.4.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) replace github.com/newrelic/go-agent/v3/integrations/nrsecurityagent => ../../integrations/nrsecurityagent From 742f90f66bee55d53eb39df53d6314b821b28ba0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:06:26 -0500 Subject: [PATCH 34/38] Bump google.golang.org/protobuf from 1.32.0 to 1.33.0 in /v3/integrations/nrmicro (#869) * Bump google.golang.org/protobuf in /v3/integrations/nrmicro Bumps google.golang.org/protobuf from 1.32.0 to 1.33.0. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Removed indirect dependancies --------- Signed-off-by: dependabot[bot] Co-authored-by: Steve Willoughby <76975199+nr-swilloughby@users.noreply.github.com> Co-authored-by: Emilio Garcia Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Mirac Kara <55501260+mirackara@users.noreply.github.com> --- v3/integrations/nrmicro/go.mod | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v3/integrations/nrmicro/go.mod b/v3/integrations/nrmicro/go.mod index e47bea1e0..d4d29decc 100644 --- a/v3/integrations/nrmicro/go.mod +++ b/v3/integrations/nrmicro/go.mod @@ -5,10 +5,10 @@ module github.com/newrelic/go-agent/v3/integrations/nrmicro go 1.19 require ( - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/micro/go-micro v1.8.0 github.com/newrelic/go-agent/v3 v3.30.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/protobuf v1.33.0 ) From b6e465e9daa5d4e0b06aaff41e7a7bb7fab9179b Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:13:23 -0500 Subject: [PATCH 35/38] Fix finish reason string formatting in NRCreateChatCompletionSummary (#886) --- v3/integrations/nropenai/nropenai.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/v3/integrations/nropenai/nropenai.go b/v3/integrations/nropenai/nropenai.go index aed9902d9..b858aa10b 100644 --- a/v3/integrations/nropenai/nropenai.go +++ b/v3/integrations/nropenai/nropenai.go @@ -255,6 +255,7 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl if len(resp.Choices) > 0 { finishReason, err := resp.Choices[0].FinishReason.MarshalJSON() + if err != nil { ChatCompletionSummaryData["error"] = true txn.NoticeError(newrelic.Error{ @@ -262,7 +263,16 @@ func NRCreateChatCompletionSummary(txn *newrelic.Transaction, app *newrelic.Appl Class: "OpenAIError", }) } else { - ChatCompletionSummaryData["response.choices.finish_reason"] = string(finishReason) + s := string(finishReason) + if len(s) > 0 && s[0] == '"' { + s = s[1:] + } + if len(s) > 0 && s[len(s)-1] == '"' { + s = s[:len(s)-1] + } + + // strip quotes from the finish reason before setting it + ChatCompletionSummaryData["response.choices.finish_reason"] = s } } From 471c1f3546766ba7607e98e58195ebb0d1afc4ae Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 27 Mar 2024 12:15:28 -0700 Subject: [PATCH 36/38] updated release notes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 434c3e13d..90083c249 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ * Integration packages to instrument AI model invocations (see below). * New package nrawsbedrock v1.0.0 introduced to instrument calls to Amazon Bedrock Runtime Client API `InvokeModel` and `InvokeModelWithResponseStream` calls. Also provides a simple one-step method which invokes stream invocations and harvests the response stream data for you. * New package nropenai v1.0.0 introduced to instrument calls to OpenAI using `NRCreateChatCompletion`, `NRCreateChatCompletionStream`, and `NRCreateEmbedding` calls. + * Dockerfile in the `examples/server` sample app which facilitates the easy creation of a containerized ready-to-run sample app for situations where that makes testing easier. ### Fixed * `.Ignore` was not ignoring transaction. Fixes [Issue #845](https://github.com/newrelic/go-agent/issues/845). From 1a002039d5b8934f96d55b4e0e06d8e57701471c Mon Sep 17 00:00:00 2001 From: Mirac Kara <55501260+mirackara@users.noreply.github.com> Date: Wed, 27 Mar 2024 14:30:23 -0500 Subject: [PATCH 37/38] Add request.model attribute to NRCreateChatCompletion test (#887) --- v3/integrations/nropenai/nropenai_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/v3/integrations/nropenai/nropenai_test.go b/v3/integrations/nropenai/nropenai_test.go index e845e9892..3fad3e7db 100644 --- a/v3/integrations/nropenai/nropenai_test.go +++ b/v3/integrations/nropenai/nropenai_test.go @@ -252,6 +252,7 @@ func TestNRCreateChatCompletion(t *testing.T) { "ingest_source": "Go", "is_response": true, "response.model": "gpt-3.5-turbo", + "request.model": "gpt-3.5-turbo", }, AgentAttributes: map[string]interface{}{}, }, From 27d840e5ad75e296f202f45c9b74b17eaea3336d Mon Sep 17 00:00:00 2001 From: Steve Willoughby Date: Wed, 27 Mar 2024 12:44:01 -0700 Subject: [PATCH 38/38] staged for release --- v3/go.mod | 7 ------ .../logcontext-v2/logWriter/go.mod | 2 +- v3/integrations/logcontext-v2/nrlogrus/go.mod | 2 +- v3/integrations/logcontext-v2/nrslog/go.mod | 3 ++- v3/integrations/logcontext-v2/nrwriter/go.mod | 2 +- v3/integrations/logcontext-v2/nrzap/go.mod | 2 +- .../logcontext-v2/nrzerolog/go.mod | 2 +- .../logcontext-v2/zerologWriter/go.mod | 2 +- .../logcontext/nrlogrusplugin/go.mod | 2 +- v3/integrations/nramqp/go.mod | 2 +- v3/integrations/nrawsbedrock/go.mod | 23 +------------------ v3/integrations/nrawssdk-v1/go.mod | 2 +- v3/integrations/nrawssdk-v2/go.mod | 2 +- v3/integrations/nrb3/go.mod | 2 +- v3/integrations/nrecho-v3/go.mod | 2 +- v3/integrations/nrecho-v4/go.mod | 2 +- v3/integrations/nrelasticsearch-v7/go.mod | 2 +- .../examples/client-fasthttp/go.mod | 2 +- .../examples/server-fasthttp/go.mod | 2 +- v3/integrations/nrfasthttp/go.mod | 2 +- v3/integrations/nrgin/go.mod | 2 +- v3/integrations/nrgorilla/go.mod | 2 +- v3/integrations/nrgraphgophers/go.mod | 2 +- v3/integrations/nrgraphqlgo/example/go.mod | 2 +- v3/integrations/nrgraphqlgo/go.mod | 2 +- v3/integrations/nrgrpc/go.mod | 14 +---------- v3/integrations/nrhttprouter/go.mod | 2 +- v3/integrations/nrlambda/go.mod | 2 +- v3/integrations/nrlogrus/go.mod | 2 +- v3/integrations/nrlogxi/go.mod | 2 +- v3/integrations/nrmicro/go.mod | 2 +- v3/integrations/nrmongo/go.mod | 2 +- v3/integrations/nrmssql/go.mod | 2 +- v3/integrations/nrmysql/go.mod | 2 +- v3/integrations/nrnats/go.mod | 2 +- v3/integrations/nrnats/test/go.mod | 2 +- v3/integrations/nropenai/go.mod | 14 ++--------- v3/integrations/nrpgx/example/sqlx/go.mod | 2 +- v3/integrations/nrpgx/go.mod | 21 +---------------- v3/integrations/nrpgx5/go.mod | 3 ++- v3/integrations/nrpkgerrors/go.mod | 2 +- v3/integrations/nrpq/example/sqlx/go.mod | 2 +- v3/integrations/nrpq/go.mod | 2 +- v3/integrations/nrredis-v7/go.mod | 2 +- v3/integrations/nrredis-v8/go.mod | 2 +- v3/integrations/nrredis-v9/go.mod | 2 +- v3/integrations/nrsarama/go.mod | 2 +- v3/integrations/nrsecurityagent/go.mod | 2 +- v3/integrations/nrsnowflake/go.mod | 2 +- v3/integrations/nrsqlite3/go.mod | 2 +- v3/integrations/nrstan/examples/go.mod | 2 +- v3/integrations/nrstan/go.mod | 2 +- v3/integrations/nrstan/test/go.mod | 2 +- v3/integrations/nrzap/go.mod | 2 +- 54 files changed, 56 insertions(+), 123 deletions(-) diff --git a/v3/go.mod b/v3/go.mod index d98016abe..c2d68c2a3 100644 --- a/v3/go.mod +++ b/v3/go.mod @@ -8,13 +8,6 @@ require ( google.golang.org/grpc v1.56.3 ) -require ( - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/protobuf v1.30.0 // indirect -) retract v3.22.0 // release process error corrected in v3.22.1 diff --git a/v3/integrations/logcontext-v2/logWriter/go.mod b/v3/integrations/logcontext-v2/logWriter/go.mod index ee0a9d49a..f61edf225 100644 --- a/v3/integrations/logcontext-v2/logWriter/go.mod +++ b/v3/integrations/logcontext-v2/logWriter/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/logWriter go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrwriter v1.0.0 ) diff --git a/v3/integrations/logcontext-v2/nrlogrus/go.mod b/v3/integrations/logcontext-v2/nrlogrus/go.mod index 98ed0571f..fb1f34055 100644 --- a/v3/integrations/logcontext-v2/nrlogrus/go.mod +++ b/v3/integrations/logcontext-v2/nrlogrus/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrlogrus go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/sirupsen/logrus v1.8.1 ) diff --git a/v3/integrations/logcontext-v2/nrslog/go.mod b/v3/integrations/logcontext-v2/nrslog/go.mod index c88288932..4cdd8758c 100644 --- a/v3/integrations/logcontext-v2/nrslog/go.mod +++ b/v3/integrations/logcontext-v2/nrslog/go.mod @@ -2,6 +2,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrslog go 1.19 -require github.com/newrelic/go-agent/v3 v3.30.0 +require github.com/newrelic/go-agent/v3 v3.31.0 + replace github.com/newrelic/go-agent/v3 => ../../.. diff --git a/v3/integrations/logcontext-v2/nrwriter/go.mod b/v3/integrations/logcontext-v2/nrwriter/go.mod index ddaa12e0b..b0e8dfa37 100644 --- a/v3/integrations/logcontext-v2/nrwriter/go.mod +++ b/v3/integrations/logcontext-v2/nrwriter/go.mod @@ -2,7 +2,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrwriter go 1.19 -require github.com/newrelic/go-agent/v3 v3.30.0 +require github.com/newrelic/go-agent/v3 v3.31.0 replace github.com/newrelic/go-agent/v3 => ../../.. diff --git a/v3/integrations/logcontext-v2/nrzap/go.mod b/v3/integrations/logcontext-v2/nrzap/go.mod index 745aba1fb..8eb005634 100644 --- a/v3/integrations/logcontext-v2/nrzap/go.mod +++ b/v3/integrations/logcontext-v2/nrzap/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzap go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 go.uber.org/zap v1.24.0 ) diff --git a/v3/integrations/logcontext-v2/nrzerolog/go.mod b/v3/integrations/logcontext-v2/nrzerolog/go.mod index 87e64586c..b54d7233a 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/go.mod +++ b/v3/integrations/logcontext-v2/nrzerolog/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/rs/zerolog v1.26.1 ) diff --git a/v3/integrations/logcontext-v2/zerologWriter/go.mod b/v3/integrations/logcontext-v2/zerologWriter/go.mod index dab236cba..c7c423c18 100644 --- a/v3/integrations/logcontext-v2/zerologWriter/go.mod +++ b/v3/integrations/logcontext-v2/zerologWriter/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/zerologWriter go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrwriter v1.0.0 github.com/rs/zerolog v1.27.0 ) diff --git a/v3/integrations/logcontext/nrlogrusplugin/go.mod b/v3/integrations/logcontext/nrlogrusplugin/go.mod index 443798ee8..358ca628b 100644 --- a/v3/integrations/logcontext/nrlogrusplugin/go.mod +++ b/v3/integrations/logcontext/nrlogrusplugin/go.mod @@ -5,7 +5,7 @@ module github.com/newrelic/go-agent/v3/integrations/logcontext/nrlogrusplugin go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 // v1.4.0 is required for for the log.WithContext. github.com/sirupsen/logrus v1.4.0 ) diff --git a/v3/integrations/nramqp/go.mod b/v3/integrations/nramqp/go.mod index 260518338..b41a912b2 100644 --- a/v3/integrations/nramqp/go.mod +++ b/v3/integrations/nramqp/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/nramqp go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/rabbitmq/amqp091-go v1.9.0 ) replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrawsbedrock/go.mod b/v3/integrations/nrawsbedrock/go.mod index ea8a78835..cab2c2de9 100644 --- a/v3/integrations/nrawsbedrock/go.mod +++ b/v3/integrations/nrawsbedrock/go.mod @@ -8,29 +8,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/bedrock v1.7.3 github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.7.1 github.com/google/uuid v1.3.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) -require ( - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect - github.com/aws/smithy-go v1.20.1 // indirect - github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.56.3 // indirect - google.golang.org/protobuf v1.30.0 // indirect -) replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrawssdk-v1/go.mod b/v3/integrations/nrawssdk-v1/go.mod index 4837d9ab6..8f224f3d9 100644 --- a/v3/integrations/nrawssdk-v1/go.mod +++ b/v3/integrations/nrawssdk-v1/go.mod @@ -8,7 +8,7 @@ go 1.19 require ( // v1.15.0 is the first aws-sdk-go version with module support. github.com/aws/aws-sdk-go v1.34.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrawssdk-v2/go.mod b/v3/integrations/nrawssdk-v2/go.mod index 1597772e1..b04fac799 100644 --- a/v3/integrations/nrawssdk-v2/go.mod +++ b/v3/integrations/nrawssdk-v2/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/lambda v1.24.5 github.com/aws/aws-sdk-go-v2/service/s3 v1.27.10 github.com/aws/smithy-go v1.13.3 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrb3/go.mod b/v3/integrations/nrb3/go.mod index 3c15e47e2..ba44d1ea6 100644 --- a/v3/integrations/nrb3/go.mod +++ b/v3/integrations/nrb3/go.mod @@ -2,7 +2,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrb3 go 1.19 -require github.com/newrelic/go-agent/v3 v3.30.0 +require github.com/newrelic/go-agent/v3 v3.31.0 replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrecho-v3/go.mod b/v3/integrations/nrecho-v3/go.mod index d8758722d..76d71b4d9 100644 --- a/v3/integrations/nrecho-v3/go.mod +++ b/v3/integrations/nrecho-v3/go.mod @@ -8,7 +8,7 @@ require ( // v3.1.0 is the earliest v3 version of Echo that works with modules due // to the github.com/rsc/letsencrypt import of v3.0.0. github.com/labstack/echo v3.1.0+incompatible - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrecho-v4/go.mod b/v3/integrations/nrecho-v4/go.mod index 176fdbc3e..fde8e8d4c 100644 --- a/v3/integrations/nrecho-v4/go.mod +++ b/v3/integrations/nrecho-v4/go.mod @@ -6,7 +6,7 @@ go 1.19 require ( github.com/labstack/echo/v4 v4.9.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrelasticsearch-v7/go.mod b/v3/integrations/nrelasticsearch-v7/go.mod index 7db1722de..109f83fcc 100644 --- a/v3/integrations/nrelasticsearch-v7/go.mod +++ b/v3/integrations/nrelasticsearch-v7/go.mod @@ -6,7 +6,7 @@ go 1.19 require ( github.com/elastic/go-elasticsearch/v7 v7.17.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrfasthttp/examples/client-fasthttp/go.mod b/v3/integrations/nrfasthttp/examples/client-fasthttp/go.mod index a99a17b73..f83576a4a 100644 --- a/v3/integrations/nrfasthttp/examples/client-fasthttp/go.mod +++ b/v3/integrations/nrfasthttp/examples/client-fasthttp/go.mod @@ -3,7 +3,7 @@ module client-example go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrfasthttp v1.0.0 github.com/valyala/fasthttp v1.49.0 ) diff --git a/v3/integrations/nrfasthttp/examples/server-fasthttp/go.mod b/v3/integrations/nrfasthttp/examples/server-fasthttp/go.mod index f24af19bc..ffe9afaee 100644 --- a/v3/integrations/nrfasthttp/examples/server-fasthttp/go.mod +++ b/v3/integrations/nrfasthttp/examples/server-fasthttp/go.mod @@ -3,7 +3,7 @@ module server-example go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrfasthttp v1.0.0 github.com/valyala/fasthttp v1.49.0 ) diff --git a/v3/integrations/nrfasthttp/go.mod b/v3/integrations/nrfasthttp/go.mod index 2968bff93..084624aa6 100644 --- a/v3/integrations/nrfasthttp/go.mod +++ b/v3/integrations/nrfasthttp/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrfasthttp go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/valyala/fasthttp v1.49.0 ) diff --git a/v3/integrations/nrgin/go.mod b/v3/integrations/nrgin/go.mod index 9ee324ae9..151d5d70a 100644 --- a/v3/integrations/nrgin/go.mod +++ b/v3/integrations/nrgin/go.mod @@ -6,7 +6,7 @@ go 1.19 require ( github.com/gin-gonic/gin v1.9.1 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrgorilla/go.mod b/v3/integrations/nrgorilla/go.mod index 38ec4acd8..3f90d6488 100644 --- a/v3/integrations/nrgorilla/go.mod +++ b/v3/integrations/nrgorilla/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( // v1.7.0 is the earliest version of Gorilla using modules. github.com/gorilla/mux v1.7.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrgraphgophers/go.mod b/v3/integrations/nrgraphgophers/go.mod index e57bb3496..d31aa17fe 100644 --- a/v3/integrations/nrgraphgophers/go.mod +++ b/v3/integrations/nrgraphgophers/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( // graphql-go has no tagged releases as of Jan 2020. github.com/graph-gophers/graphql-go v1.3.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrgraphqlgo/example/go.mod b/v3/integrations/nrgraphqlgo/example/go.mod index 721e46a44..6658d1500 100644 --- a/v3/integrations/nrgraphqlgo/example/go.mod +++ b/v3/integrations/nrgraphqlgo/example/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/graphql-go/graphql v0.8.1 github.com/graphql-go/graphql-go-handler v0.2.3 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrgraphqlgo v1.0.0 ) diff --git a/v3/integrations/nrgraphqlgo/go.mod b/v3/integrations/nrgraphqlgo/go.mod index 220749a52..f812ba0de 100644 --- a/v3/integrations/nrgraphqlgo/go.mod +++ b/v3/integrations/nrgraphqlgo/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/graphql-go/graphql v0.8.1 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrgrpc/go.mod b/v3/integrations/nrgrpc/go.mod index e36c53b53..bd71b2b31 100644 --- a/v3/integrations/nrgrpc/go.mod +++ b/v3/integrations/nrgrpc/go.mod @@ -6,25 +6,13 @@ require ( // protobuf v1.3.0 is the earliest version using modules, we use v1.3.1 // because all dependencies were removed in this version. github.com/golang/protobuf v1.5.3 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrsecurityagent v1.1.0 // v1.15.0 is the earliest version of grpc using modules. google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 ) -require ( - github.com/dlclark/regexp2 v1.9.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/k2io/hookingo v1.0.5 // indirect - github.com/newrelic/csec-go-agent v1.0.0 // indirect - golang.org/x/arch v0.4.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect -) replace github.com/newrelic/go-agent/v3/integrations/nrsecurityagent => ../../integrations/nrsecurityagent diff --git a/v3/integrations/nrhttprouter/go.mod b/v3/integrations/nrhttprouter/go.mod index ba2d3b551..231bd7667 100644 --- a/v3/integrations/nrhttprouter/go.mod +++ b/v3/integrations/nrhttprouter/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( // v1.3.0 is the earliest version of httprouter using modules. github.com/julienschmidt/httprouter v1.3.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrlambda/go.mod b/v3/integrations/nrlambda/go.mod index 08f1c5d5e..e4ffb607b 100644 --- a/v3/integrations/nrlambda/go.mod +++ b/v3/integrations/nrlambda/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/aws/aws-lambda-go v1.41.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrlogrus/go.mod b/v3/integrations/nrlogrus/go.mod index 8e40e94bb..dddba8e82 100644 --- a/v3/integrations/nrlogrus/go.mod +++ b/v3/integrations/nrlogrus/go.mod @@ -5,7 +5,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrlogrus go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrlogrus v1.0.0 // v1.1.0 is required for the Logger.GetLevel method, and is the earliest // version of logrus using modules. diff --git a/v3/integrations/nrlogxi/go.mod b/v3/integrations/nrlogxi/go.mod index 4996ebf02..118d30cb4 100644 --- a/v3/integrations/nrlogxi/go.mod +++ b/v3/integrations/nrlogxi/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( // 'v1', at commit aebf8a7d67ab, is the only logxi release. github.com/mgutz/logxi v0.0.0-20161027140823-aebf8a7d67ab - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrmicro/go.mod b/v3/integrations/nrmicro/go.mod index d4d29decc..d345b6d57 100644 --- a/v3/integrations/nrmicro/go.mod +++ b/v3/integrations/nrmicro/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( github.com/golang/protobuf v1.5.4 github.com/micro/go-micro v1.8.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 google.golang.org/protobuf v1.33.0 ) diff --git a/v3/integrations/nrmongo/go.mod b/v3/integrations/nrmongo/go.mod index c20c3adba..23103f360 100644 --- a/v3/integrations/nrmongo/go.mod +++ b/v3/integrations/nrmongo/go.mod @@ -5,7 +5,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrmongo go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 // mongo-driver does not support modules as of Nov 2019. go.mongodb.org/mongo-driver v1.10.2 ) diff --git a/v3/integrations/nrmssql/go.mod b/v3/integrations/nrmssql/go.mod index 142e82f53..77fb20cb9 100644 --- a/v3/integrations/nrmssql/go.mod +++ b/v3/integrations/nrmssql/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/microsoft/go-mssqldb v0.19.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrmysql/go.mod b/v3/integrations/nrmysql/go.mod index bc7c39e99..674a4ec68 100644 --- a/v3/integrations/nrmysql/go.mod +++ b/v3/integrations/nrmysql/go.mod @@ -7,7 +7,7 @@ require ( // v1.5.0 is the first mysql version to support gomod github.com/go-sql-driver/mysql v1.6.0 // v3.3.0 includes the new location of ParseQuery - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrnats/go.mod b/v3/integrations/nrnats/go.mod index 4f88ca955..380cdba69 100644 --- a/v3/integrations/nrnats/go.mod +++ b/v3/integrations/nrnats/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( github.com/nats-io/nats-server v1.4.1 github.com/nats-io/nats.go v1.28.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrnats/test/go.mod b/v3/integrations/nrnats/test/go.mod index fde2c4dd5..d390cbb67 100644 --- a/v3/integrations/nrnats/test/go.mod +++ b/v3/integrations/nrnats/test/go.mod @@ -8,7 +8,7 @@ replace github.com/newrelic/go-agent/v3/integrations/nrnats v1.0.0 => ../ require ( github.com/nats-io/nats-server v1.4.1 github.com/nats-io/nats.go v1.17.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrnats v1.0.0 ) diff --git a/v3/integrations/nropenai/go.mod b/v3/integrations/nropenai/go.mod index a565f429e..a434d09a9 100644 --- a/v3/integrations/nropenai/go.mod +++ b/v3/integrations/nropenai/go.mod @@ -1,23 +1,13 @@ module github.com/newrelic/go-agent/v3/integrations/nropenai -go 1.21.0 +go 1.19 require ( github.com/google/uuid v1.6.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/pkoukk/tiktoken-go v0.1.6 github.com/sashabaranov/go-openai v1.20.2 ) -require ( - github.com/dlclark/regexp2 v1.10.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.56.3 // indirect - google.golang.org/protobuf v1.30.0 // indirect -) replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrpgx/example/sqlx/go.mod b/v3/integrations/nrpgx/example/sqlx/go.mod index e45d6ce4a..797b84d3e 100644 --- a/v3/integrations/nrpgx/example/sqlx/go.mod +++ b/v3/integrations/nrpgx/example/sqlx/go.mod @@ -4,7 +4,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrpgx/example/sqlx go 1.19 require ( github.com/jmoiron/sqlx v1.2.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrpgx v0.0.0 ) replace github.com/newrelic/go-agent/v3/integrations/nrpgx => ../../ diff --git a/v3/integrations/nrpgx/go.mod b/v3/integrations/nrpgx/go.mod index 3528d3478..821dd13c5 100644 --- a/v3/integrations/nrpgx/go.mod +++ b/v3/integrations/nrpgx/go.mod @@ -5,27 +5,8 @@ go 1.19 require ( github.com/jackc/pgx v3.6.2+incompatible github.com/jackc/pgx/v4 v4.18.2 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) -require ( - github.com/golang/protobuf v1.5.3 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgconn v1.14.3 // indirect - github.com/jackc/pgio v1.0.0 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.3 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgtype v1.14.0 // indirect - github.com/pkg/errors v0.8.1 // indirect - golang.org/x/crypto v0.20.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.56.3 // indirect - google.golang.org/protobuf v1.30.0 // indirect -) replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrpgx5/go.mod b/v3/integrations/nrpgx5/go.mod index b740e3455..9914ca701 100644 --- a/v3/integrations/nrpgx5/go.mod +++ b/v3/integrations/nrpgx5/go.mod @@ -5,8 +5,9 @@ go 1.19 require ( github.com/egon12/pgsnap v0.0.0-20221022154027-2847f0124ed8 github.com/jackc/pgx/v5 v5.5.4 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/stretchr/testify v1.8.1 ) + replace github.com/newrelic/go-agent/v3 => ../.. diff --git a/v3/integrations/nrpkgerrors/go.mod b/v3/integrations/nrpkgerrors/go.mod index e70ff4b6e..7830f0065 100644 --- a/v3/integrations/nrpkgerrors/go.mod +++ b/v3/integrations/nrpkgerrors/go.mod @@ -5,7 +5,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrpkgerrors go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 // v0.8.0 was the last release in 2016, and when // major development on pkg/errors stopped. github.com/pkg/errors v0.8.0 diff --git a/v3/integrations/nrpq/example/sqlx/go.mod b/v3/integrations/nrpq/example/sqlx/go.mod index 922e4fd1a..43604e893 100644 --- a/v3/integrations/nrpq/example/sqlx/go.mod +++ b/v3/integrations/nrpq/example/sqlx/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/jmoiron/sqlx v1.2.0 github.com/lib/pq v1.1.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrpq v0.0.0 ) replace github.com/newrelic/go-agent/v3/integrations/nrpq => ../../ diff --git a/v3/integrations/nrpq/go.mod b/v3/integrations/nrpq/go.mod index cae881021..f2c639f4d 100644 --- a/v3/integrations/nrpq/go.mod +++ b/v3/integrations/nrpq/go.mod @@ -6,7 +6,7 @@ require ( // NewConnector dsn parsing tests expect v1.1.0 error return behavior. github.com/lib/pq v1.1.0 // v3.3.0 includes the new location of ParseQuery - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrredis-v7/go.mod b/v3/integrations/nrredis-v7/go.mod index aece5ff8a..9879a1878 100644 --- a/v3/integrations/nrredis-v7/go.mod +++ b/v3/integrations/nrredis-v7/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/go-redis/redis/v7 v7.0.0-beta.5 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrredis-v8/go.mod b/v3/integrations/nrredis-v8/go.mod index 25725d257..30b446fdb 100644 --- a/v3/integrations/nrredis-v8/go.mod +++ b/v3/integrations/nrredis-v8/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/go-redis/redis/v8 v8.4.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrredis-v9/go.mod b/v3/integrations/nrredis-v9/go.mod index 740764936..bd52c7fb3 100644 --- a/v3/integrations/nrredis-v9/go.mod +++ b/v3/integrations/nrredis-v9/go.mod @@ -4,7 +4,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrredis-v9 go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/redis/go-redis/v9 v9.0.2 ) diff --git a/v3/integrations/nrsarama/go.mod b/v3/integrations/nrsarama/go.mod index fc0dfabfc..b3fe4825e 100644 --- a/v3/integrations/nrsarama/go.mod +++ b/v3/integrations/nrsarama/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/Shopify/sarama v1.38.1 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/stretchr/testify v1.8.1 ) diff --git a/v3/integrations/nrsecurityagent/go.mod b/v3/integrations/nrsecurityagent/go.mod index 979a8dd7b..8e419fe8d 100644 --- a/v3/integrations/nrsecurityagent/go.mod +++ b/v3/integrations/nrsecurityagent/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/newrelic/csec-go-agent v1.0.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrsqlite3 v1.2.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/v3/integrations/nrsnowflake/go.mod b/v3/integrations/nrsnowflake/go.mod index a92e5d9c0..3b58181da 100644 --- a/v3/integrations/nrsnowflake/go.mod +++ b/v3/integrations/nrsnowflake/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrsnowflake go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/snowflakedb/gosnowflake v1.6.19 ) diff --git a/v3/integrations/nrsqlite3/go.mod b/v3/integrations/nrsqlite3/go.mod index df13a4ea1..c930c0569 100644 --- a/v3/integrations/nrsqlite3/go.mod +++ b/v3/integrations/nrsqlite3/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( github.com/mattn/go-sqlite3 v1.0.0 // v3.3.0 includes the new location of ParseQuery - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrstan/examples/go.mod b/v3/integrations/nrstan/examples/go.mod index dc9012afd..a698c1873 100644 --- a/v3/integrations/nrstan/examples/go.mod +++ b/v3/integrations/nrstan/examples/go.mod @@ -3,7 +3,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrstan/examples go 1.19 require ( github.com/nats-io/stan.go v0.5.0 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrnats v0.0.0 github.com/newrelic/go-agent/v3/integrations/nrstan v0.0.0 ) diff --git a/v3/integrations/nrstan/go.mod b/v3/integrations/nrstan/go.mod index 41756b99c..af124b900 100644 --- a/v3/integrations/nrstan/go.mod +++ b/v3/integrations/nrstan/go.mod @@ -6,7 +6,7 @@ go 1.19 require ( github.com/nats-io/stan.go v0.10.4 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 ) diff --git a/v3/integrations/nrstan/test/go.mod b/v3/integrations/nrstan/test/go.mod index ab595bd27..2c6f47a70 100644 --- a/v3/integrations/nrstan/test/go.mod +++ b/v3/integrations/nrstan/test/go.mod @@ -7,7 +7,7 @@ go 1.19 require ( github.com/nats-io/nats-streaming-server v0.25.6 github.com/nats-io/stan.go v0.10.4 - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 github.com/newrelic/go-agent/v3/integrations/nrstan v0.0.0 ) diff --git a/v3/integrations/nrzap/go.mod b/v3/integrations/nrzap/go.mod index 13cb5af27..e4bf61761 100644 --- a/v3/integrations/nrzap/go.mod +++ b/v3/integrations/nrzap/go.mod @@ -5,7 +5,7 @@ module github.com/newrelic/go-agent/v3/integrations/nrzap go 1.19 require ( - github.com/newrelic/go-agent/v3 v3.30.0 + github.com/newrelic/go-agent/v3 v3.31.0 // v1.12.0 is the earliest version of zap using modules. go.uber.org/zap v1.12.0 )