Skip to content

Commit 7069335

Browse files
authored
feat: Added support for account level governance of AI Monitoring (#2326)
1 parent f8e2e8e commit 7069335

File tree

8 files changed

+101
-15
lines changed

8 files changed

+101
-15
lines changed

lib/config/index.js

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,12 @@ Config.prototype._fromServer = function _fromServer(params, key) {
306306
case 'high_security':
307307
break
308308

309+
// interpret AI Monitoring account setting
310+
case 'collect_ai':
311+
this._disableOption(params.collect_ai, 'ai_monitoring')
312+
this.emit('change', this)
313+
break
314+
309315
// always accept these settings
310316
case 'cross_process_id':
311317
case 'encoding_key':
@@ -1071,10 +1077,10 @@ function setFromEnv({ config, key, envVar, formatter, paths }) {
10711077
/**
10721078
* Recursively visit the nodes of the config definition and look for environment variable names, overriding any configuration values that are found.
10731079
*
1074-
* @param {object} [config=this] The current level of the configuration object.
1075-
* @param {object} [data=configDefinition] The current level of the config definition object.
1076-
* @param {Array} [paths=[]] keeps track of the nested path to properly derive the env var
1077-
* @param {number} [objectKeys=1] indicator of how many keys exist in current node to know when to remove current node after all keys are processed
1080+
* @param {object} [config] The current level of the configuration object.
1081+
* @param {object} [data] The current level of the config definition object.
1082+
* @param {Array} [paths] keeps track of the nested path to properly derive the env var
1083+
* @param {number} [objectKeys] indicator of how many keys exist in current node to know when to remove current node after all keys are processed
10781084
*/
10791085
Config.prototype._fromEnvironment = function _fromEnvironment(
10801086
config = this,

lib/instrumentation/aws-sdk/v3/bedrock.js

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,21 @@ function addLlmMeta({ agent, segment }) {
8585
* @param {BedrockCommand} params.bedrockCommand parsed input
8686
* @param {Error|null} params.err error from request if exists
8787
* @param params.bedrockResponse
88+
* @param params.shim
8889
*/
89-
function recordChatCompletionMessages({ agent, segment, bedrockCommand, bedrockResponse, err }) {
90+
function recordChatCompletionMessages({
91+
agent,
92+
shim,
93+
segment,
94+
bedrockCommand,
95+
bedrockResponse,
96+
err
97+
}) {
98+
if (shouldSkipInstrumentation(agent.config) === true) {
99+
shim.logger.debug('skipping sending of ai data')
100+
return
101+
}
102+
90103
const summary = new LlmChatCompletionSummary({
91104
agent,
92105
bedrockResponse,
@@ -133,12 +146,18 @@ function recordChatCompletionMessages({ agent, segment, bedrockCommand, bedrockR
133146
*
134147
* @param {object} params function params
135148
* @param {object} params.agent instance of agent
149+
* @param {object} params.shim current shim instance
136150
* @param {object} params.segment active segment
137151
* @param {BedrockCommand} params.bedrockCommand parsed input
138152
* @param {Error|null} params.err error from request if exists
139153
* @param params.bedrockResponse
140154
*/
141-
function recordEmbeddingMessage({ agent, segment, bedrockCommand, bedrockResponse, err }) {
155+
function recordEmbeddingMessage({ agent, shim, segment, bedrockCommand, bedrockResponse, err }) {
156+
if (shouldSkipInstrumentation(agent.config) === true) {
157+
shim.logger.debug('skipping sending of ai data')
158+
return
159+
}
160+
142161
const embedding = new LlmEmbedding({
143162
agent,
144163
segment,
@@ -239,6 +258,7 @@ function handleResponse({ shim, err, response, segment, bedrockCommand, modelTyp
239258
if (modelType === 'completion') {
240259
recordChatCompletionMessages({
241260
agent,
261+
shim,
242262
segment,
243263
bedrockCommand,
244264
bedrockResponse,
@@ -247,6 +267,7 @@ function handleResponse({ shim, err, response, segment, bedrockCommand, modelTyp
247267
} else if (modelType === 'embedding') {
248268
recordEmbeddingMessage({
249269
agent,
270+
shim,
250271
segment,
251272
bedrockCommand,
252273
bedrockResponse,

lib/instrumentation/langchain/runnable.js

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,14 @@ const LlmErrorMessage = require('../../llm-events/error-message')
1616
const { DESTINATIONS } = require('../../config/attribute-filter')
1717
const { langchainRunId } = require('../../symbols')
1818
const { RecorderSpec } = require('../../shim/specs')
19+
const { shouldSkipInstrumentation } = require('./common')
1920

2021
module.exports = function initialize(shim, langchain) {
2122
const { agent, pkgVersion } = shim
2223

2324
if (common.shouldSkipInstrumentation(agent.config)) {
2425
shim.logger.debug(
25-
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
26+
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
2627
)
2728
return
2829
}
@@ -186,6 +187,15 @@ function wrapNextHandler({ shim, output, segment, request, metadata, tags }) {
186187
function recordChatCompletionEvents({ segment, messages, events, metadata, tags, err, shim }) {
187188
const { pkgVersion, agent } = shim
188189
segment.end()
190+
191+
if (shouldSkipInstrumentation(shim.agent.config) === true) {
192+
// We need this check inside the wrapper because it is possible for monitoring
193+
// to be disabled at the account level. In such a case, the value is set
194+
// after the instrumentation has been initialized.
195+
shim.logger.debug('skipping sending of ai data')
196+
return
197+
}
198+
189199
const completionSummary = new LangChainCompletionSummary({
190200
agent,
191201
messages,
@@ -198,6 +208,7 @@ function recordChatCompletionEvents({ segment, messages, events, metadata, tags,
198208

199209
common.recordEvent({
200210
agent,
211+
shim,
201212
type: 'LlmChatCompletionSummary',
202213
pkgVersion,
203214
msg: completionSummary
@@ -266,6 +277,7 @@ function recordCompletions({ events, completionSummary, agent, segment, shim })
266277

267278
common.recordEvent({
268279
agent,
280+
shim,
269281
type: 'LlmChatCompletionMessage',
270282
pkgVersion: shim.pkgVersion,
271283
msg: completionMsg

lib/instrumentation/langchain/tools.js

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,15 @@ module.exports = function initialize(shim, tools) {
3535
const metadata = mergeMetadata(instanceMeta, paramsMeta)
3636
const tags = mergeTags(instanceTags, paramsTags)
3737
segment.end()
38+
39+
if (shouldSkipInstrumentation(shim.agent.config) === true) {
40+
// We need this check inside the wrapper because it is possible for monitoring
41+
// to be disabled at the account level. In such a case, the value is set
42+
// after the instrumentation has been initialized.
43+
shim.logger.debug('skipping sending of ai data')
44+
return
45+
}
46+
3847
const toolEvent = new LangChainTool({
3948
agent,
4049
description,
@@ -47,7 +56,7 @@ module.exports = function initialize(shim, tools) {
4756
segment,
4857
error: err != null
4958
})
50-
recordEvent({ agent, type: 'LlmTool', pkgVersion, msg: toolEvent })
59+
recordEvent({ agent, shim, type: 'LlmTool', pkgVersion, msg: toolEvent })
5160

5261
if (err) {
5362
agent.errors.add(

lib/instrumentation/langchain/vectorstore.js

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,12 @@ const LlmErrorMessage = require('../../llm-events/error-message')
2323
* @param {number} params.k vector search top k
2424
* @param {object} params.output vector search documents
2525
* @param {Agent} params.agent NR agent instance
26+
* @param {Shim} params.shim current shim instance
2627
* @param {TraceSegment} params.segment active segment from vector search
2728
* @param {string} params.pkgVersion langchain version
28-
* @param {err} params.err if it exists
29+
* @param {Error} params.err if it exists
2930
*/
30-
function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, err }) {
31+
function recordVectorSearch({ request, k, output, agent, shim, segment, pkgVersion, err }) {
3132
const vectorSearch = new LangChainVectorSearch({
3233
agent,
3334
segment,
@@ -37,7 +38,7 @@ function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, er
3738
error: err !== null
3839
})
3940

40-
recordEvent({ agent, type: 'LlmVectorSearch', pkgVersion, msg: vectorSearch })
41+
recordEvent({ agent, shim, type: 'LlmVectorSearch', pkgVersion, msg: vectorSearch })
4142

4243
output.forEach((document, sequence) => {
4344
const vectorSearchResult = new LangChainVectorSearchResult({
@@ -51,6 +52,7 @@ function recordVectorSearch({ request, k, output, agent, segment, pkgVersion, er
5152

5253
recordEvent({
5354
agent,
55+
shim,
5456
type: 'LlmVectorSearchResult',
5557
pkgVersion,
5658
msg: vectorSearchResult
@@ -97,7 +99,15 @@ module.exports = function initialize(shim, vectorstores) {
9799
}
98100

99101
segment.end()
100-
recordVectorSearch({ request, k, output, agent, segment, pkgVersion, err })
102+
if (shouldSkipInstrumentation(shim.agent.config) === true) {
103+
// We need this check inside the wrapper because it is possible for monitoring
104+
// to be disabled at the account level. In such a case, the value is set
105+
// after the instrumentation has been initialized.
106+
shim.logger.debug('skipping sending of ai data')
107+
return
108+
}
109+
110+
recordVectorSearch({ request, k, output, agent, shim, segment, pkgVersion, err })
101111

102112
segment.transaction.trace.attributes.addAttribute(DESTINATIONS.TRANS_EVENT, 'llm', true)
103113
}

lib/instrumentation/openai.js

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,12 +99,18 @@ function addLlmMeta({ agent, segment }) {
9999
*
100100
* @param {object} params input params
101101
* @param {Agent} params.agent NR agent instance
102+
* @param {Shim} params.shim the current shim instance
102103
* @param {TraceSegment} params.segment active segment from chat completion
103104
* @param {object} params.request chat completion params
104105
* @param {object} params.response chat completion response
105106
* @param {boolean} [params.err] err if it exists
106107
*/
107-
function recordChatCompletionMessages({ agent, segment, request, response, err }) {
108+
function recordChatCompletionMessages({ agent, shim, segment, request, response, err }) {
109+
if (shouldSkipInstrumentation(agent.config, shim) === true) {
110+
shim.logger.debug('skipping sending of ai data')
111+
return
112+
}
113+
108114
if (!response) {
109115
// If we get an error, it is possible that `response = null`.
110116
// In that case, we define it to be an empty object.
@@ -195,6 +201,7 @@ function instrumentStream({ agent, shim, request, response, segment }) {
195201

196202
recordChatCompletionMessages({
197203
agent: shim.agent,
204+
shim,
198205
segment,
199206
request,
200207
response: chunk,
@@ -205,6 +212,7 @@ function instrumentStream({ agent, shim, request, response, segment }) {
205212
})
206213
}
207214

215+
/* eslint-disable sonarjs/cognitive-complexity */
208216
module.exports = function initialize(agent, openai, moduleName, shim) {
209217
if (shouldSkipInstrumentation(agent.config, shim)) {
210218
shim.logger.debug(
@@ -268,6 +276,7 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
268276
} else {
269277
recordChatCompletionMessages({
270278
agent,
279+
shim,
271280
segment,
272281
request,
273282
response,
@@ -301,10 +310,20 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
301310
// In that case, we define it to be an empty object.
302311
response = {}
303312
}
313+
314+
segment.end()
315+
if (shouldSkipInstrumentation(shim.agent.config, shim) === true) {
316+
// We need this check inside the wrapper because it is possible for monitoring
317+
// to be disabled at the account level. In such a case, the value is set
318+
// after the instrumentation has been initialized.
319+
shim.logger.debug('skipping sending of ai data')
320+
return
321+
}
322+
304323
response.headers = segment[openAiHeaders]
305324
// explicitly end segment to get consistent duration
306325
// for both LLM events and the segment
307-
segment.end()
326+
308327
const embedding = new LlmEmbedding({
309328
agent,
310329
segment,

test/unit/config/config-server-side.test.js

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,15 @@ tap.test('when receiving server-side configuration', (t) => {
161161
t.end()
162162
})
163163

164+
t.test('should disable ai monitoring', (t) => {
165+
config.ai_monitoring.enabled = true
166+
t.equal(config.ai_monitoring.enabled, true)
167+
config.onConnect({ collect_ai: false })
168+
t.equal(config.ai_monitoring.enabled, false)
169+
170+
t.end()
171+
})
172+
164173
t.test('should configure cross application tracing', (t) => {
165174
config.cross_application_tracer.enabled = true
166175

test/unit/instrumentation/langchain/runnables.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ test('langchain/core/runnables unit tests', (t) => {
4646
t.equal(shim.logger.debug.callCount, 1, 'should log 1 debug messages')
4747
t.equal(
4848
shim.logger.debug.args[0][0],
49-
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
49+
'langchain instrumentation is disabled. To enable set `config.ai_monitoring.enabled` to true'
5050
)
5151
const isWrapped = shim.isWrapped(MockRunnable.RunnableSequence.prototype.invoke)
5252
t.equal(isWrapped, false, 'should not wrap runnable invoke')

0 commit comments

Comments
 (0)