Skip to content

Commit 84c265f

Browse files
committed
feat: Update span name and op as per new conventions
1 parent 704af3c commit 84c265f

File tree

4 files changed

+294
-93
lines changed

4 files changed

+294
-93
lines changed

dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import * as Sentry from '@sentry/node';
22
import { generateText } from 'ai';
33
import { MockLanguageModelV1 } from 'ai/test';
4+
import { z } from 'zod';
45

56
async function run() {
67
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
@@ -30,6 +31,35 @@ async function run() {
3031
prompt: 'Where is the second span?',
3132
});
3233

34+
// This span should include tool calls and tool results
35+
await generateText({
36+
model: new MockLanguageModelV1({
37+
doGenerate: async () => ({
38+
rawCall: { rawPrompt: null, rawSettings: {} },
39+
finishReason: 'tool-calls',
40+
usage: { promptTokens: 15, completionTokens: 25 },
41+
text: 'Tool call completed!',
42+
toolCalls: [
43+
{
44+
toolCallType: 'function',
45+
toolCallId: 'call-1',
46+
toolName: 'getWeather',
47+
args: '{ "location": "San Francisco" }',
48+
},
49+
],
50+
}),
51+
}),
52+
tools: {
53+
getWeather: {
54+
parameters: z.object({ location: z.string() }),
55+
execute: async args => {
56+
return `Weather in ${args.location}: Sunny, 72°F`;
57+
},
58+
},
59+
},
60+
prompt: 'What is the weather in San Francisco?',
61+
});
62+
3363
// This span should not be captured because we've disabled telemetry
3464
await generateText({
3565
experimental_telemetry: { isEnabled: false },

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 170 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,19 @@ describe('Vercel AI integration', () => {
2626
'gen_ai.usage.output_tokens': 20,
2727
'gen_ai.usage.total_tokens': 30,
2828
'operation.name': 'ai.generateText',
29-
'sentry.op': 'ai.pipeline.generateText',
29+
'sentry.op': 'ai.pipeline.generate_text',
3030
'sentry.origin': 'auto.vercelai.otel',
3131
},
3232
description: 'generateText',
33-
op: 'ai.pipeline.generateText',
33+
op: 'ai.pipeline.generate_text',
3434
origin: 'auto.vercelai.otel',
3535
status: 'ok',
3636
}),
3737
// Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false
3838
expect.objectContaining({
3939
data: {
4040
'sentry.origin': 'auto.vercelai.otel',
41-
'sentry.op': 'ai.run.doGenerate',
41+
'sentry.op': 'gen_ai.generate_text',
4242
'operation.name': 'ai.generateText.doGenerate',
4343
'ai.operationId': 'ai.generateText.doGenerate',
4444
'ai.model.provider': 'mock-provider',
@@ -59,8 +59,8 @@ describe('Vercel AI integration', () => {
5959
'gen_ai.response.model': 'mock-model-id',
6060
'gen_ai.usage.total_tokens': 30,
6161
},
62-
description: 'generateText.doGenerate',
63-
op: 'ai.run.doGenerate',
62+
description: 'generate_text mock-model-id',
63+
op: 'gen_ai.generate_text',
6464
origin: 'auto.vercelai.otel',
6565
status: 'ok',
6666
}),
@@ -83,19 +83,19 @@ describe('Vercel AI integration', () => {
8383
'gen_ai.usage.output_tokens': 20,
8484
'gen_ai.usage.total_tokens': 30,
8585
'operation.name': 'ai.generateText',
86-
'sentry.op': 'ai.pipeline.generateText',
86+
'sentry.op': 'ai.pipeline.generate_text',
8787
'sentry.origin': 'auto.vercelai.otel',
8888
},
8989
description: 'generateText',
90-
op: 'ai.pipeline.generateText',
90+
op: 'ai.pipeline.generate_text',
9191
origin: 'auto.vercelai.otel',
9292
status: 'ok',
9393
}),
9494
// Fourth span - doGenerate for explicit telemetry enabled call
9595
expect.objectContaining({
9696
data: {
9797
'sentry.origin': 'auto.vercelai.otel',
98-
'sentry.op': 'ai.run.doGenerate',
98+
'sentry.op': 'gen_ai.generate_text',
9999
'operation.name': 'ai.generateText.doGenerate',
100100
'ai.operationId': 'ai.generateText.doGenerate',
101101
'ai.model.provider': 'mock-provider',
@@ -119,8 +119,77 @@ describe('Vercel AI integration', () => {
119119
'gen_ai.response.model': 'mock-model-id',
120120
'gen_ai.usage.total_tokens': 30,
121121
},
122-
description: 'generateText.doGenerate',
123-
op: 'ai.run.doGenerate',
122+
description: 'generate_text mock-model-id',
123+
op: 'gen_ai.generate_text',
124+
origin: 'auto.vercelai.otel',
125+
status: 'ok',
126+
}),
127+
// Fifth span - tool call generateText span
128+
expect.objectContaining({
129+
data: {
130+
'ai.model.id': 'mock-model-id',
131+
'ai.model.provider': 'mock-provider',
132+
'ai.operationId': 'ai.generateText',
133+
'ai.pipeline.name': 'generateText',
134+
'ai.response.finishReason': 'tool-calls',
135+
'ai.settings.maxRetries': 2,
136+
'ai.settings.maxSteps': 1,
137+
'ai.streaming': false,
138+
'gen_ai.response.model': 'mock-model-id',
139+
'gen_ai.usage.input_tokens': 15,
140+
'gen_ai.usage.output_tokens': 25,
141+
'gen_ai.usage.total_tokens': 40,
142+
'operation.name': 'ai.generateText',
143+
'sentry.op': 'ai.pipeline.generate_text',
144+
'sentry.origin': 'auto.vercelai.otel',
145+
},
146+
description: 'generateText',
147+
op: 'ai.pipeline.generate_text',
148+
origin: 'auto.vercelai.otel',
149+
status: 'ok',
150+
}),
151+
// Sixth span - tool call doGenerate span
152+
expect.objectContaining({
153+
data: {
154+
'ai.model.id': 'mock-model-id',
155+
'ai.model.provider': 'mock-provider',
156+
'ai.operationId': 'ai.generateText.doGenerate',
157+
'ai.pipeline.name': 'generateText.doGenerate',
158+
'ai.response.finishReason': 'tool-calls',
159+
'ai.response.id': expect.any(String),
160+
'ai.response.model': 'mock-model-id',
161+
'ai.response.timestamp': expect.any(String),
162+
'ai.settings.maxRetries': 2,
163+
'ai.streaming': false,
164+
'gen_ai.request.model': 'mock-model-id',
165+
'gen_ai.response.finish_reasons': ['tool-calls'],
166+
'gen_ai.response.id': expect.any(String),
167+
'gen_ai.response.model': 'mock-model-id',
168+
'gen_ai.system': 'mock-provider',
169+
'gen_ai.usage.input_tokens': 15,
170+
'gen_ai.usage.output_tokens': 25,
171+
'gen_ai.usage.total_tokens': 40,
172+
'operation.name': 'ai.generateText.doGenerate',
173+
'sentry.op': 'gen_ai.generate_text',
174+
'sentry.origin': 'auto.vercelai.otel',
175+
},
176+
description: 'generate_text mock-model-id',
177+
op: 'gen_ai.generate_text',
178+
origin: 'auto.vercelai.otel',
179+
status: 'ok',
180+
}),
181+
// Seventh span - tool call execution span
182+
expect.objectContaining({
183+
data: {
184+
'ai.operationId': 'ai.toolCall',
185+
'ai.toolCall.id': 'call-1',
186+
'ai.toolCall.name': 'getWeather',
187+
'operation.name': 'ai.toolCall',
188+
'sentry.op': 'gen_ai.execute_tool',
189+
'sentry.origin': 'auto.vercelai.otel',
190+
},
191+
description: 'execute_tool getWeather',
192+
op: 'gen_ai.execute_tool',
124193
origin: 'auto.vercelai.otel',
125194
status: 'ok',
126195
}),
@@ -149,11 +218,11 @@ describe('Vercel AI integration', () => {
149218
'gen_ai.usage.output_tokens': 20,
150219
'gen_ai.usage.total_tokens': 30,
151220
'operation.name': 'ai.generateText',
152-
'sentry.op': 'ai.pipeline.generateText',
221+
'sentry.op': 'ai.pipeline.generate_text',
153222
'sentry.origin': 'auto.vercelai.otel',
154223
},
155224
description: 'generateText',
156-
op: 'ai.pipeline.generateText',
225+
op: 'ai.pipeline.generate_text',
157226
origin: 'auto.vercelai.otel',
158227
status: 'ok',
159228
}),
@@ -182,11 +251,11 @@ describe('Vercel AI integration', () => {
182251
'gen_ai.usage.output_tokens': 20,
183252
'gen_ai.usage.total_tokens': 30,
184253
'operation.name': 'ai.generateText.doGenerate',
185-
'sentry.op': 'ai.run.doGenerate',
254+
'sentry.op': 'gen_ai.generate_text',
186255
'sentry.origin': 'auto.vercelai.otel',
187256
},
188-
description: 'generateText.doGenerate',
189-
op: 'ai.run.doGenerate',
257+
description: 'generate_text mock-model-id',
258+
op: 'gen_ai.generate_text',
190259
origin: 'auto.vercelai.otel',
191260
status: 'ok',
192261
}),
@@ -209,19 +278,19 @@ describe('Vercel AI integration', () => {
209278
'gen_ai.usage.output_tokens': 20,
210279
'gen_ai.usage.total_tokens': 30,
211280
'operation.name': 'ai.generateText',
212-
'sentry.op': 'ai.pipeline.generateText',
281+
'sentry.op': 'ai.pipeline.generate_text',
213282
'sentry.origin': 'auto.vercelai.otel',
214283
},
215284
description: 'generateText',
216-
op: 'ai.pipeline.generateText',
285+
op: 'ai.pipeline.generate_text',
217286
origin: 'auto.vercelai.otel',
218287
status: 'ok',
219288
}),
220289
// Fourth span - doGenerate for explicitly enabled telemetry call
221290
expect.objectContaining({
222291
data: {
223292
'sentry.origin': 'auto.vercelai.otel',
224-
'sentry.op': 'ai.run.doGenerate',
293+
'sentry.op': 'gen_ai.generate_text',
225294
'operation.name': 'ai.generateText.doGenerate',
226295
'ai.operationId': 'ai.generateText.doGenerate',
227296
'ai.model.provider': 'mock-provider',
@@ -245,8 +314,89 @@ describe('Vercel AI integration', () => {
245314
'gen_ai.response.model': 'mock-model-id',
246315
'gen_ai.usage.total_tokens': 30,
247316
},
248-
description: 'generateText.doGenerate',
249-
op: 'ai.run.doGenerate',
317+
description: 'generate_text mock-model-id',
318+
op: 'gen_ai.generate_text',
319+
origin: 'auto.vercelai.otel',
320+
status: 'ok',
321+
}),
322+
// Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
323+
expect.objectContaining({
324+
data: {
325+
'ai.model.id': 'mock-model-id',
326+
'ai.model.provider': 'mock-provider',
327+
'ai.operationId': 'ai.generateText',
328+
'ai.pipeline.name': 'generateText',
329+
'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
330+
'ai.response.finishReason': 'tool-calls',
331+
'ai.response.text': 'Tool call completed!',
332+
'ai.response.toolCalls': expect.any(String),
333+
'ai.settings.maxRetries': 2,
334+
'ai.settings.maxSteps': 1,
335+
'ai.streaming': false,
336+
'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
337+
'gen_ai.response.model': 'mock-model-id',
338+
'gen_ai.usage.input_tokens': 15,
339+
'gen_ai.usage.output_tokens': 25,
340+
'gen_ai.usage.total_tokens': 40,
341+
'operation.name': 'ai.generateText',
342+
'sentry.op': 'ai.pipeline.generate_text',
343+
'sentry.origin': 'auto.vercelai.otel',
344+
},
345+
description: 'generateText',
346+
op: 'ai.pipeline.generate_text',
347+
origin: 'auto.vercelai.otel',
348+
status: 'ok',
349+
}),
350+
// Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
351+
expect.objectContaining({
352+
data: {
353+
'ai.model.id': 'mock-model-id',
354+
'ai.model.provider': 'mock-provider',
355+
'ai.operationId': 'ai.generateText.doGenerate',
356+
'ai.pipeline.name': 'generateText.doGenerate',
357+
'ai.prompt.format': expect.any(String),
358+
'ai.prompt.messages': expect.any(String),
359+
'ai.prompt.toolChoice': expect.any(String),
360+
'ai.prompt.tools': expect.any(Array),
361+
'ai.response.finishReason': 'tool-calls',
362+
'ai.response.id': expect.any(String),
363+
'ai.response.model': 'mock-model-id',
364+
'ai.response.text': 'Tool call completed!',
365+
'ai.response.timestamp': expect.any(String),
366+
'ai.response.toolCalls': expect.any(String),
367+
'ai.settings.maxRetries': 2,
368+
'ai.streaming': false,
369+
'gen_ai.request.model': 'mock-model-id',
370+
'gen_ai.response.finish_reasons': ['tool-calls'],
371+
'gen_ai.response.id': expect.any(String),
372+
'gen_ai.response.model': 'mock-model-id',
373+
'gen_ai.system': 'mock-provider',
374+
'gen_ai.usage.input_tokens': 15,
375+
'gen_ai.usage.output_tokens': 25,
376+
'gen_ai.usage.total_tokens': 40,
377+
'operation.name': 'ai.generateText.doGenerate',
378+
'sentry.op': 'gen_ai.generate_text',
379+
'sentry.origin': 'auto.vercelai.otel',
380+
},
381+
description: 'generate_text mock-model-id',
382+
op: 'gen_ai.generate_text',
383+
origin: 'auto.vercelai.otel',
384+
status: 'ok',
385+
}),
386+
// Seventh span - tool call execution span
387+
expect.objectContaining({
388+
data: {
389+
'ai.operationId': 'ai.toolCall',
390+
'ai.toolCall.args': expect.any(String),
391+
'ai.toolCall.id': 'call-1',
392+
'ai.toolCall.name': 'getWeather',
393+
'ai.toolCall.result': expect.any(String),
394+
'operation.name': 'ai.toolCall',
395+
'sentry.op': 'gen_ai.execute_tool',
396+
'sentry.origin': 'auto.vercelai.otel',
397+
},
398+
description: 'execute_tool getWeather',
399+
op: 'gen_ai.execute_tool',
250400
origin: 'auto.vercelai.otel',
251401
status: 'ok',
252402
}),

0 commit comments

Comments
 (0)