Skip to content

feat(node): Update Vercel AI span attributes #16580

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ test('should create AI spans with correct attributes', async ({ page }) => {
expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id');
expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider');
expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?');
expect(firstPipelineSpan?.data?.['ai.response.text']).toBe('First span here!');
expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!');
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */

// Second AI call - explicitly enabled telemetry
const secondPipelineSpan = aiPipelineSpans[0];
expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?');
expect(secondPipelineSpan?.data?.['ai.response.text']).toContain('Second span here!');
expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!');

// Third AI call - with tool calls
/* const thirdPipelineSpan = aiPipelineSpans[2];
Expand All @@ -51,7 +51,7 @@ test('should create AI spans with correct attributes', async ({ page }) => {
expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */

// Tool call span
/* const toolSpan = toolCallSpans[0];
/* const toolSpan = toolCallSpans[0];
expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather');
expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1');
expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco');
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
integrations: [Sentry.vercelAIIntegration({ force: true })],
});
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ Sentry.init({
release: '1.0',
tracesSampleRate: 1.0,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
integrations: [Sentry.vercelAIIntegration({ force: true })],
});
30 changes: 15 additions & 15 deletions dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ describe('Vercel AI integration', () => {
'ai.pipeline.name': 'generateText',
'ai.prompt': '{"prompt":"Where is the second span?"}',
'ai.response.finishReason': 'stop',
'ai.response.text': expect.any(String),
'gen_ai.response.text': expect.any(String),
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
Expand Down Expand Up @@ -108,10 +108,10 @@ describe('Vercel AI integration', () => {
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.response.id': expect.any(String),
'ai.response.text': expect.any(String),
'gen_ai.response.text': expect.any(String),
'ai.response.timestamp': expect.any(String),
'ai.prompt.format': expect.any(String),
'ai.prompt.messages': expect.any(String),
'gen_ai.request.messages': expect.any(String),
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
Expand Down Expand Up @@ -210,7 +210,7 @@ describe('Vercel AI integration', () => {
'ai.pipeline.name': 'generateText',
'ai.prompt': '{"prompt":"Where is the first span?"}',
'ai.response.finishReason': 'stop',
'ai.response.text': 'First span here!',
'gen_ai.response.text': 'First span here!',
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
Expand All @@ -236,11 +236,11 @@ describe('Vercel AI integration', () => {
'ai.operationId': 'ai.generateText.doGenerate',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.prompt.format': 'prompt',
'ai.prompt.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
'ai.response.finishReason': 'stop',
'ai.response.id': expect.any(String),
'ai.response.model': 'mock-model-id',
'ai.response.text': 'First span here!',
'gen_ai.response.text': 'First span here!',
'ai.response.timestamp': expect.any(String),
'ai.settings.maxRetries': 2,
'ai.streaming': false,
Expand Down Expand Up @@ -270,7 +270,7 @@ describe('Vercel AI integration', () => {
'ai.pipeline.name': 'generateText',
'ai.prompt': '{"prompt":"Where is the second span?"}',
'ai.response.finishReason': 'stop',
'ai.response.text': expect.any(String),
'gen_ai.response.text': expect.any(String),
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
Expand Down Expand Up @@ -305,10 +305,10 @@ describe('Vercel AI integration', () => {
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.response.id': expect.any(String),
'ai.response.text': expect.any(String),
'gen_ai.response.text': expect.any(String),
'ai.response.timestamp': expect.any(String),
'ai.prompt.format': expect.any(String),
'ai.prompt.messages': expect.any(String),
'gen_ai.request.messages': expect.any(String),
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
Expand All @@ -330,8 +330,8 @@ describe('Vercel AI integration', () => {
'ai.pipeline.name': 'generateText',
'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
'ai.response.finishReason': 'tool-calls',
'ai.response.text': 'Tool call completed!',
'ai.response.toolCalls': expect.any(String),
'gen_ai.response.text': 'Tool call completed!',
'gen_ai.response.tool_calls': expect.any(String),
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
Expand All @@ -357,15 +357,15 @@ describe('Vercel AI integration', () => {
'ai.operationId': 'ai.generateText.doGenerate',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.prompt.format': expect.any(String),
'ai.prompt.messages': expect.any(String),
'gen_ai.request.messages': expect.any(String),
'ai.prompt.toolChoice': expect.any(String),
'ai.prompt.tools': expect.any(Array),
'gen_ai.request.available_tools': expect.any(Array),
'ai.response.finishReason': 'tool-calls',
'ai.response.id': expect.any(String),
'ai.response.model': 'mock-model-id',
'ai.response.text': 'Tool call completed!',
'gen_ai.response.text': 'Tool call completed!',
'ai.response.timestamp': expect.any(String),
'ai.response.toolCalls': expect.any(String),
'gen_ai.response.tool_calls': expect.any(String),
'ai.settings.maxRetries': 2,
'ai.streaming': false,
'gen_ai.request.model': 'mock-model-id',
Expand Down
22 changes: 22 additions & 0 deletions packages/node/src/integrations/tracing/vercelai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ import {
AI_MODEL_ID_ATTRIBUTE,
AI_MODEL_PROVIDER_ATTRIBUTE,
AI_PROMPT_ATTRIBUTE,
AI_PROMPT_MESSAGES_ATTRIBUTE,
AI_PROMPT_TOOLS_ATTRIBUTE,
AI_RESPONSE_TEXT_ATTRIBUTE,
AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE,
AI_TOOL_CALL_ID_ATTRIBUTE,
AI_TOOL_CALL_NAME_ATTRIBUTE,
Expand Down Expand Up @@ -193,6 +197,24 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
attributes['gen_ai.usage.total_tokens'] =
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE];
}

// Rename AI SDK attributes to standardized gen_ai attributes
if (attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] != undefined) {
attributes['gen_ai.request.messages'] = attributes[AI_PROMPT_MESSAGES_ATTRIBUTE];
delete attributes[AI_PROMPT_MESSAGES_ATTRIBUTE];
}
if (attributes[AI_RESPONSE_TEXT_ATTRIBUTE] != undefined) {
attributes['gen_ai.response.text'] = attributes[AI_RESPONSE_TEXT_ATTRIBUTE];
delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE];
}
if (attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] != undefined) {
attributes['gen_ai.response.tool_calls'] = attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE];
delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE];
}
if (attributes[AI_PROMPT_TOOLS_ATTRIBUTE] != undefined) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

l: this is fine, but maybe we could even add a utility to make this easier to grok, e.g.:

function renameAttributeKey(attributes: SpanAttributes, oldKey: string, newKey: string) {
  if (attributes[oldKey] != null) {
    attributes[newKey] = attributes[oldKey];
    delete attributes[oldKey];
  }
}

// and then
renameAttributeKey(AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text');
// etc...

attributes['gen_ai.request.available_tools'] = attributes[AI_PROMPT_TOOLS_ATTRIBUTE];
delete attributes[AI_PROMPT_TOOLS_ATTRIBUTE];
}
}
}

Expand Down
Loading