Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Added support for account level governance of AI Monitoring #2326

Merged
merged 6 commits into from
Jul 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 10 additions & 4 deletions lib/config/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,12 @@ Config.prototype._fromServer = function _fromServer(params, key) {
case 'high_security':
break

// interpret AI Monitoring account setting
case 'collect_ai':
this._disableOption(params.collect_ai, 'ai_monitoring')
this.emit('change', this)
break

// always accept these settings
case 'cross_process_id':
case 'encoding_key':
Expand Down Expand Up @@ -1071,10 +1077,10 @@ function setFromEnv({ config, key, envVar, formatter, paths }) {
/**
* Recursively visit the nodes of the config definition and look for environment variable names, overriding any configuration values that are found.
*
* @param {object} [config=this] The current level of the configuration object.
* @param {object} [data=configDefinition] The current level of the config definition object.
* @param {Array} [paths=[]] keeps track of the nested path to properly derive the env var
* @param {number} [objectKeys=1] indicator of how many keys exist in current node to know when to remove current node after all keys are processed
* @param {object} [config] The current level of the configuration object.
* @param {object} [data] The current level of the config definition object.
* @param {Array} [paths] keeps track of the nested path to properly derive the env var
* @param {number} [objectKeys] indicator of how many keys exist in current node to know when to remove current node after all keys are processed
*/
Config.prototype._fromEnvironment = function _fromEnvironment(
config = this,
Expand Down
25 changes: 23 additions & 2 deletions lib/instrumentation/aws-sdk/v3/bedrock.js
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,21 @@ function addLlmMeta({ agent, segment }) {
* @param {BedrockCommand} params.bedrockCommand parsed input
* @param {Error|null} params.err error from request if exists
* @param params.bedrockResponse
* @param params.shim
*/
function recordChatCompletionMessages({ agent, segment, bedrockCommand, bedrockResponse, err }) {
function recordChatCompletionMessages({
agent,
shim,
segment,
bedrockCommand,
bedrockResponse,
err
}) {
if (shouldSkipInstrumentation(agent.config) === true) {
shim.logger.debug('skipping sending of ai data')
return
}

const summary = new LlmChatCompletionSummary({
agent,
bedrockResponse,
Expand Down Expand Up @@ -133,12 +146,18 @@ function recordChatCompletionMessages({ agent, segment, bedrockCommand, bedrockR
*
* @param {object} params function params
* @param {object} params.agent instance of agent
* @param {object} params.shim current shim instance
* @param {object} params.segment active segment
* @param {BedrockCommand} params.bedrockCommand parsed input
* @param {Error|null} params.err error from request if exists
* @param params.bedrockResponse
*/
function recordEmbeddingMessage({ agent, segment, bedrockCommand, bedrockResponse, err }) {
function recordEmbeddingMessage({ agent, shim, segment, bedrockCommand, bedrockResponse, err }) {
if (shouldSkipInstrumentation(agent.config) === true) {
shim.logger.debug('skipping sending of ai data')
return
}

const embedding = new LlmEmbedding({
agent,
segment,
Expand Down Expand Up @@ -239,6 +258,7 @@ function handleResponse({ shim, err, response, segment, bedrockCommand, modelTyp
if (modelType === 'completion') {
recordChatCompletionMessages({
agent,
shim,
segment,
bedrockCommand,
bedrockResponse,
Expand All @@ -247,6 +267,7 @@ function handleResponse({ shim, err, response, segment, bedrockCommand, modelTyp
} else if (modelType === 'embedding') {
recordEmbeddingMessage({
agent,
shim,
segment,
bedrockCommand,
bedrockResponse,
Expand Down
14 changes: 13 additions & 1 deletion lib/instrumentation/langchain/runnable.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,14 @@ const LlmErrorMessage = require('../../llm-events/error-message')
const { DESTINATIONS } = require('../../config/attribute-filter')
const { langchainRunId } = require('../../symbols')
const { RecorderSpec } = require('../../shim/specs')
const { shouldSkipInstrumentation } = require('./common')

module.exports = function initialize(shim, langchain) {
const { agent, pkgVersion } = shim

if (common.shouldSkipInstrumentation(agent.config)) {
shim.logger.debug(
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
)
return
}
Expand Down Expand Up @@ -186,6 +187,15 @@ function wrapNextHandler({ shim, output, segment, request, metadata, tags }) {
function recordChatCompletionEvents({ segment, messages, events, metadata, tags, err, shim }) {
const { pkgVersion, agent } = shim
segment.end()

if (shouldSkipInstrumentation(shim.agent.config) === true) {
// We need this check inside the wrapper because it is possible for monitoring
// to be disabled at the account level. In such a case, the value is set
// after the instrumentation has been initialized.
shim.logger.debug('skipping sending of ai data')
return
}

const completionSummary = new LangChainCompletionSummary({
agent,
messages,
Expand All @@ -198,6 +208,7 @@ function recordChatCompletionEvents({ segment, messages, events, metadata, tags,

common.recordEvent({
agent,
shim,
type: 'LlmChatCompletionSummary',
pkgVersion,
msg: completionSummary
Expand Down Expand Up @@ -266,6 +277,7 @@ function recordCompletions({ events, completionSummary, agent, segment, shim })

common.recordEvent({
agent,
shim,
type: 'LlmChatCompletionMessage',
pkgVersion: shim.pkgVersion,
msg: completionMsg
Expand Down
11 changes: 10 additions & 1 deletion lib/instrumentation/langchain/tools.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@ module.exports = function initialize(shim, tools) {
const metadata = mergeMetadata(instanceMeta, paramsMeta)
const tags = mergeTags(instanceTags, paramsTags)
segment.end()

if (shouldSkipInstrumentation(shim.agent.config) === true) {
// We need this check inside the wrapper because it is possible for monitoring
// to be disabled at the account level. In such a case, the value is set
// after the instrumentation has been initialized.
shim.logger.debug('skipping sending of ai data')
return
}

const toolEvent = new LangChainTool({
agent,
description,
Expand All @@ -47,7 +56,7 @@ module.exports = function initialize(shim, tools) {
segment,
error: err != null
})
recordEvent({ agent, type: 'LlmTool', pkgVersion, msg: toolEvent })
recordEvent({ agent, shim, type: 'LlmTool', pkgVersion, msg: toolEvent })

if (err) {
agent.errors.add(
Expand Down
18 changes: 14 additions & 4 deletions lib/instrumentation/langchain/vectorstore.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,12 @@ const LlmErrorMessage = require('../../llm-events/error-message')
* @param {number} params.k vector search top k
* @param {object} params.output vector search documents
* @param {Agent} params.agent NR agent instance
* @param {Shim} params.shim current shim instance
* @param {TraceSegment} params.segment active segment from vector search
* @param {string} params.pkgVersion langchain version
* @param {err} params.err if it exists
* @param {Error} params.err if it exists
*/
function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, err }) {
function recordVectorSearch({ request, k, output, agent, shim, segment, pkgVersion, err }) {
const vectorSearch = new LangChainVectorSearch({
agent,
segment,
Expand All @@ -37,7 +38,7 @@ function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, er
error: err !== null
})

recordEvent({ agent, type: 'LlmVectorSearch', pkgVersion, msg: vectorSearch })
recordEvent({ agent, shim, type: 'LlmVectorSearch', pkgVersion, msg: vectorSearch })

output.forEach((document, sequence) => {
const vectorSearchResult = new LangChainVectorSearchResult({
Expand All @@ -51,6 +52,7 @@ function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, er

recordEvent({
agent,
shim,
type: 'LlmVectorSearchResult',
pkgVersion,
msg: vectorSearchResult
Expand Down Expand Up @@ -97,7 +99,15 @@ module.exports = function initialize(shim, vectorstores) {
}

segment.end()
recordVectorSearch({ request, k, output, agent, segment, pkgVersion, err })
if (shouldSkipInstrumentation(shim.agent.config) === true) {
// We need this check inside the wrapper because it is possible for monitoring
// to be disabled at the account level. In such a case, the value is set
// after the instrumentation has been initialized.
shim.logger.debug('skipping sending of ai data')
return
}

recordVectorSearch({ request, k, output, agent, shim, segment, pkgVersion, err })

segment.transaction.trace.attributes.addAttribute(DESTINATIONS.TRANS_EVENT, 'llm', true)
}
Expand Down
23 changes: 21 additions & 2 deletions lib/instrumentation/openai.js
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,18 @@ function addLlmMeta({ agent, segment }) {
*
* @param {object} params input params
* @param {Agent} params.agent NR agent instance
* @param {Shim} params.shim the current shim instance
* @param {TraceSegment} params.segment active segment from chat completion
* @param {object} params.request chat completion params
* @param {object} params.response chat completion response
* @param {boolean} [params.err] err if it exists
*/
function recordChatCompletionMessages({ agent, segment, request, response, err }) {
function recordChatCompletionMessages({ agent, shim, segment, request, response, err }) {
bizob2828 marked this conversation as resolved.
Show resolved Hide resolved
if (shouldSkipInstrumentation(agent.config, shim) === true) {
shim.logger.debug('skipping sending of ai data')
return
}

if (!response) {
// If we get an error, it is possible that `response = null`.
// In that case, we define it to be an empty object.
Expand Down Expand Up @@ -195,6 +201,7 @@ function instrumentStream({ agent, shim, request, response, segment }) {

recordChatCompletionMessages({
agent: shim.agent,
shim,
segment,
request,
response: chunk,
Expand All @@ -205,6 +212,7 @@ function instrumentStream({ agent, shim, request, response, segment }) {
})
}

/* eslint-disable sonarjs/cognitive-complexity */
module.exports = function initialize(agent, openai, moduleName, shim) {
if (shouldSkipInstrumentation(agent.config, shim)) {
shim.logger.debug(
Expand Down Expand Up @@ -268,6 +276,7 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
} else {
recordChatCompletionMessages({
agent,
shim,
segment,
request,
response,
Expand Down Expand Up @@ -301,10 +310,20 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
// In that case, we define it to be an empty object.
response = {}
}

segment.end()
if (shouldSkipInstrumentation(shim.agent.config, shim) === true) {
// We need this check inside the wrapper because it is possible for monitoring
// to be disabled at the account level. In such a case, the value is set
// after the instrumentation has been initialized.
shim.logger.debug('skipping sending of ai data')
return
}

response.headers = segment[openAiHeaders]
// explicitly end segment to get consistent duration
// for both LLM events and the segment
segment.end()

const embedding = new LlmEmbedding({
agent,
segment,
Expand Down
9 changes: 9 additions & 0 deletions test/unit/config/config-server-side.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,15 @@ tap.test('when receiving server-side configuration', (t) => {
t.end()
})

t.test('should disable ai monitoring', (t) => {
config.ai_monitoring.enabled = true
t.equal(config.ai_monitoring.enabled, true)
config.onConnect({ collect_ai: false })
t.equal(config.ai_monitoring.enabled, false)

t.end()
})

t.test('should configure cross application tracing', (t) => {
config.cross_application_tracer.enabled = true

Expand Down
2 changes: 1 addition & 1 deletion test/unit/instrumentation/langchain/runnables.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ test('langchain/core/runnables unit tests', (t) => {
t.equal(shim.logger.debug.callCount, 1, 'should log 1 debug messages')
t.equal(
shim.logger.debug.args[0][0],
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
)
const isWrapped = shim.isWrapped(MockRunnable.RunnableSequence.prototype.invoke)
t.equal(isWrapped, false, 'should not wrap runnable invoke')
Expand Down
Loading