diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/scenario-with-response.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-with-response.mjs new file mode 100644 index 000000000000..f7c23351996a --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-with-response.mjs @@ -0,0 +1,90 @@ +import * as Sentry from '@sentry/node'; +import express from 'express'; +import OpenAI from 'openai'; + +function startMockServer() { + const app = express(); + app.use(express.json()); + + app.post('/openai/chat/completions', (req, res) => { + const { model } = req.body; + + res.set({ + 'x-request-id': 'req_withresponse_test', + 'openai-organization': 'test-org', + 'openai-processing-ms': '150', + 'openai-version': '2020-10-01', + }); + + res.send({ + id: 'chatcmpl-withresponse', + object: 'chat.completion', + created: 1677652288, + model: model, + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Testing .withResponse() method!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 8, + completion_tokens: 12, + total_tokens: 20, + }, + }); + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); +} + +async function run() { + const server = await startMockServer(); + + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, + apiKey: 'mock-api-key', + }); + + // Test 1: Verify .withResponse() method exists and can be called + const result = client.chat.completions.create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Test withResponse' }], + }); + + // Verify method exists + if (typeof result.withResponse !== 'function') { + throw new Error('.withResponse() method does not exist'); + } + + // Call .withResponse() and verify structure + const { data } = await result.withResponse(); + + if (!data) { + throw new Error('.withResponse() did not return data'); + } + + // Test 2: Verify regular await still works + const result2 = await client.chat.completions.create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Test regular await' }], + }); + + if (!result2 || result2.id !== 'chatcmpl-withresponse') { + throw new Error('Regular await failed'); + } + }); + + server.close(); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index df432d292bba..fd7bb060aecb 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -945,4 +945,42 @@ describe('OpenAI integration', () => { }); }, ); + + createEsmAndCjsTests(__dirname, 'scenario-with-response.mjs', 'instrument.mjs', (createRunner, test) => { + test('preserves .withResponse() method and works correctly', async () => { + await createRunner() + .ignore('event') + .expect({ + transaction: { + transaction: 'main', + spans: expect.arrayContaining([ + // First call using .withResponse() + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse', + }), + description: 'chat gpt-4', + op: 'gen_ai.chat', + status: 'ok', + }), + // Second call using regular await + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse', + }), + description: 'chat gpt-4', + op: 'gen_ai.chat', + status: 'ok', + }), + ]), + }, + }) + .start() + .completed(); + }); + }); }); diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index b0d26f92c36c..cde62b9a6496 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -4,6 +4,7 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; +import { isThenable } from '../../utils/is'; import { GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, @@ -161,6 +162,74 @@ function addRequestAttributes(span: Span, params: Record, opera } } +/** + * Creates a wrapped version of .withResponse() that replaces the data field + * with the instrumented result while preserving metadata (response, request_id). + */ +function createWithResponseWrapper( + originalWithResponse: Promise, + instrumentedPromise: Promise, +): Promise { + return instrumentedPromise.then(async instrumentedResult => { + try { + const originalWrapper = await originalWithResponse; + + // If it's a wrapper object with data property, replace data with instrumented result + if (originalWrapper && typeof originalWrapper === 'object' && 'data' in originalWrapper) { + return { + ...originalWrapper, + data: instrumentedResult, + }; + } + + // Otherwise return the instrumented result as-is + return instrumentedResult; + } catch (error) { + // If getting the original wrapper fails, capture the error but still throw + // This ensures errors are visible while still being tracked in Sentry + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.openai', + }, + }); + throw error; + } + }); +} + +/** + * Wraps a promise-like object to preserve additional methods (like .withResponse()) + */ +function wrapPromiseWithMethods(originalPromiseLike: T, instrumentedPromise: Promise>): T { + // If the original result is not thenable, return the instrumented promise + if (!isThenable(originalPromiseLike)) { + return instrumentedPromise as T; + } + + // Create a proxy that forwards Promise methods to instrumentedPromise + // and preserves additional methods from the original result + return new Proxy(originalPromiseLike, { + get(target: object, prop: string | symbol): unknown { + const useInstrumentedPromise = prop in Promise.prototype || prop === Symbol.toStringTag; + const source = useInstrumentedPromise ? instrumentedPromise : target; + + const value = Reflect.get(source, prop) as unknown; + + // Special handling for .withResponse() to preserve instrumentation + // .withResponse() returns { data: T, response: Response, request_id: string } + if (prop === 'withResponse' && typeof value === 'function') { + return function wrappedWithResponse(this: unknown): unknown { + const originalWithResponse = (value as (...args: unknown[]) => unknown).call(target); + return createWithResponseWrapper(originalWithResponse, instrumentedPromise); + }; + } + + return typeof value === 'function' ? value.bind(source) : value; + }, + }) as T; +} + /** * Instrument a method with Sentry spans * Following Sentry AI Agents Manual Instrumentation conventions @@ -172,7 +241,7 @@ function instrumentMethod( context: unknown, options: OpenAiOptions, ): (...args: T) => Promise { - return async function instrumentedMethod(...args: T): Promise { + return function instrumentedMethod(...args: T): Promise { const requestAttributes = extractRequestAttributes(args, methodPath); const model = (requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] as string) || 'unknown'; const operationName = getOperationName(methodPath); @@ -180,77 +249,71 @@ function instrumentMethod( const params = args[0] as Record | undefined; const isStreamRequested = params && typeof params === 'object' && params.stream === true; + // Call the original method to get the result with all its methods + const originalResult = originalMethod.apply(context, args); + + const spanConfig = { + name: `${operationName} ${model}${isStreamRequested ? ' stream-response' : ''}`, + op: getSpanOperation(methodPath), + attributes: requestAttributes as Record, + }; + + let instrumentedPromise: Promise; + if (isStreamRequested) { // For streaming responses, use manual span management to properly handle the async generator lifecycle - return startSpanManual( - { - name: `${operationName} ${model} stream-response`, - op: getSpanOperation(methodPath), - attributes: requestAttributes as Record, - }, - async (span: Span) => { - try { - if (options.recordInputs && params) { - addRequestAttributes(span, params, operationName); - } - - const result = await originalMethod.apply(context, args); - - return instrumentStream( - result as OpenAIStream, - span, - options.recordOutputs ?? false, - ) as unknown as R; - } catch (error) { - // For streaming requests that fail before stream creation, we still want to record - // them as streaming requests but end the span gracefully - span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); - captureException(error, { - mechanism: { - handled: false, - type: 'auto.ai.openai.stream', - data: { - function: methodPath, - }, - }, - }); - span.end(); - throw error; + instrumentedPromise = startSpanManual(spanConfig, async (span: Span) => { + try { + if (options.recordInputs && params) { + addRequestAttributes(span, params, operationName); } - }, - ); + + const result = await originalResult; + + return instrumentStream( + result as OpenAIStream, + span, + options.recordOutputs ?? false, + ) as unknown as R; + } catch (error) { + // For streaming requests that fail before stream creation, end the span gracefully + span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.openai.stream', + data: { function: methodPath }, + }, + }); + span.end(); + throw error; + } + }); } else { - // Non-streaming responses - return startSpan( - { - name: `${operationName} ${model}`, - op: getSpanOperation(methodPath), - attributes: requestAttributes as Record, - }, - async (span: Span) => { - try { - if (options.recordInputs && params) { - addRequestAttributes(span, params, operationName); - } - - const result = await originalMethod.apply(context, args); - addResponseAttributes(span, result, options.recordOutputs); - return result; - } catch (error) { - captureException(error, { - mechanism: { - handled: false, - type: 'auto.ai.openai', - data: { - function: methodPath, - }, - }, - }); - throw error; + // Non-streaming responses + instrumentedPromise = startSpan(spanConfig, async (span: Span) => { + try { + if (options.recordInputs && params) { + addRequestAttributes(span, params, operationName); } - }, - ); + + const result = await originalResult; + addResponseAttributes(span, result, options.recordOutputs); + return result; + } catch (error) { + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.openai', + data: { function: methodPath }, + }, + }); + throw error; + } + }); } + + return wrapPromiseWithMethods(originalResult, instrumentedPromise); }; }