diff --git a/packages/datadog-instrumentations/src/openai.js b/packages/datadog-instrumentations/src/openai.js index 10f43076a82..3f8020ec325 100644 --- a/packages/datadog-instrumentations/src/openai.js +++ b/packages/datadog-instrumentations/src/openai.js @@ -31,7 +31,21 @@ const V4_PACKAGE_SHIMS = [ file: 'resources/files', targetClass: 'Files', baseResource: 'files', - methods: ['create', 'del', 'list', 'retrieve'] + methods: ['create', 'list', 'retrieve'] + }, + { + file: 'resources/files', + targetClass: 'Files', + baseResource: 'files', + methods: ['del'], + versions: ['>=4.0.0 <5.0.0'] + }, + { + file: 'resources/files', + targetClass: 'Files', + baseResource: 'files', + methods: ['delete'], + versions: ['>=5'] }, { file: 'resources/files', @@ -78,7 +92,21 @@ const V4_PACKAGE_SHIMS = [ file: 'resources/models', targetClass: 'Models', baseResource: 'models', - methods: ['del', 'list', 'retrieve'] + methods: ['list', 'retrieve'] + }, + { + file: 'resources/models', + targetClass: 'Models', + baseResource: 'models', + methods: ['del'], + versions: ['>=4 <5'] + }, + { + file: 'resources/models', + targetClass: 'Models', + baseResource: 'models', + methods: ['delete'], + versions: ['>=5'] }, { file: 'resources/moderations', diff --git a/packages/datadog-plugin-openai/src/tracing.js b/packages/datadog-plugin-openai/src/tracing.js index 5e5939273bf..35ceba3c578 100644 --- a/packages/datadog-plugin-openai/src/tracing.js +++ b/packages/datadog-plugin-openai/src/tracing.js @@ -366,6 +366,7 @@ function normalizeMethodName (methodName) { case 'files.retrieve': return 'retrieveFile' case 'files.del': + case 'files.delete': return 'deleteFile' case 'files.retrieveContent': case 'files.content': @@ -410,6 +411,7 @@ function normalizeMethodName (methodName) { case 'models.retrieve': return 'retrieveModel' case 'models.del': + case 'models.delete': return 'deleteModel' default: return methodName @@ -954,6 +956,7 @@ function normalizeRequestPayload (methodName, args) { case 'deleteFile': case 'files.del': + case 'files.delete': case 'retrieveFile': case 'files.retrieve': case 'downloadFile': @@ -974,6 +977,7 @@ function normalizeRequestPayload (methodName, args) { case 'fine-tune.retrieve': case 'deleteModel': case 'models.del': + case 'models.delete': case 'cancelFineTune': case 'fine_tuning.jobs.cancel': case 'fine-tune.cancel': diff --git a/packages/datadog-plugin-openai/test/index.spec.js b/packages/datadog-plugin-openai/test/index.spec.js index 9c0a7c10225..d74bd1b1d9d 100644 --- a/packages/datadog-plugin-openai/test/index.spec.js +++ b/packages/datadog-plugin-openai/test/index.spec.js @@ -4,7 +4,6 @@ const fs = require('fs') const Path = require('path') const { expect } = require('chai') const semver = require('semver') -const nock = require('nock') const sinon = require('sinon') const { spawn } = require('child_process') @@ -15,7 +14,9 @@ const Sampler = require('../../dd-trace/src/sampler') const tracerRequirePath = '../../dd-trace' -const { DD_MAJOR } = require('../../../version') +const { DD_MAJOR, NODE_MAJOR } = require('../../../version') + +const { startMockServer, stopMockServer } = require('./mock-server') describe('Plugin', () => { let openai @@ -25,38 +26,59 @@ describe('Plugin', () => { let realVersion let tracer + let mockServerPort + let globalFile + describe('openai', () => { - // TODO: Remove the range once we support openai 5 - withVersions('openai', 'openai', '<5.0.0', version => { + withVersions('openai', 'openai', version => { const moduleRequirePath = `../../../versions/openai@${version}` - before(() => { + before(async () => { + mockServerPort = await startMockServer() tracer = require(tracerRequirePath) return agent.load('openai') }) - after(() => { + after(async () => { + if (semver.satisfies(realVersion, '>=5.0.0') && NODE_MAJOR < 20) { + global.File = globalFile + } + + await stopMockServer() return agent.close({ ritmReset: false }) }) - beforeEach(() => { + beforeEach(async () => { clock = sinon.useFakeTimers() const requiredModule = require(moduleRequirePath) const module = requiredModule.get() realVersion = requiredModule.version() + if (semver.satisfies(realVersion, '>=5.0.0') && NODE_MAJOR < 20) { + /** + * resolves the following error for OpenAI v5 + * + * Error: `File` is not defined as a global, which is required for file uploads. + * Update to Node 20 LTS or newer, or set `globalThis.File` to `import('node:buffer').File`. + */ + globalFile = global.File + global.File = require('node:buffer').File + } + if (semver.satisfies(realVersion, '>=4.0.0')) { const OpenAI = module openai = new OpenAI({ - apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS' + apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS', + baseURL: `http://localhost:${mockServerPort}/v1` }) } else { const { Configuration, OpenAIApi } = module const configuration = new Configuration({ - apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS' + apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS', + basePath: `http://localhost:${mockServerPort}/v1` }) openai = new OpenAIApi(configuration) @@ -87,27 +109,11 @@ describe('Plugin', () => { }) describe('with error', () => { - let scope - - beforeEach(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/models') - .reply(400, { - error: { - message: 'fake message', - type: 'fake type', - param: 'fake param', - code: null - } - }) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) + it('should attach the error to the span', async function () { + if (semver.satisfies(realVersion, '3.0.0')) { + this.skip() + } - it('should attach the error to the span', async () => { const checkTraces = agent .assertSomeTraces(traces => { expect(traces[0][0]).to.have.property('error', 1) @@ -119,9 +125,15 @@ describe('Plugin', () => { try { if (semver.satisfies(realVersion, '>=4.0.0')) { - await openai.models.list() + await openai.chat.completions.create({ + model: 'gpt-4o', + messages: 5 // trigger an error + }) } else { - await openai.listModels() + await openai.createChatCompletion({ + model: 'gpt-4o', + messages: 5 // trigger an error + }) } } catch { // ignore, we expect an error @@ -147,27 +159,7 @@ describe('Plugin', () => { }) describe('maintains context', () => { - afterEach(() => { - nock.cleanAll() - }) - it('should maintain the context with a non-streamed call', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, { - id: 'cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM', - object: 'text_completion', - created: 1684171461, - model: 'text-davinci-002', - choices: [{ - text: 'FOO BAR BAZ', - index: 0, - logprobs: null, - finish_reason: 'length' - }], - usage: { prompt_tokens: 3, completion_tokens: 16, total_tokens: 19 } - }) - await tracer.trace('outer', async (outerSpan) => { const params = { model: 'text-davinci-002', @@ -176,10 +168,10 @@ describe('Plugin', () => { if (semver.satisfies(realVersion, '>=4.0.0')) { const result = await openai.completions.create(params) - expect(result.id).to.eql('cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM') + expect(result.id).to.exist } else { const result = await openai.createCompletion(params) - expect(result.data.id).to.eql('cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM') + expect(result.data.id).to.exist } tracer.trace('child of outer', innerSpan => { @@ -190,26 +182,16 @@ describe('Plugin', () => { if (semver.intersects('>4.1.0', version)) { it('should maintain the context with a streamed call', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - await tracer.trace('outer', async (outerSpan) => { - const stream = await openai.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }], - temperature: 0.5, + const stream = await openai.completions.create({ + model: 'text-davinci-002', + prompt: 'Hello, world!', stream: true }) for await (const part of stream) { expect(part).to.have.property('choices') - expect(part.choices[0]).to.have.property('delta') + expect(part.choices[0]).to.have.property('text') } tracer.trace('child of outer', innerSpan => { @@ -221,43 +203,7 @@ describe('Plugin', () => { }) describe('create completion', () => { - afterEach(() => { - nock.cleanAll() - }) - it('makes a successful call', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, { - id: 'cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM', - object: 'text_completion', - created: 1684171461, - model: 'text-davinci-002', - choices: [{ - text: 'FOO BAR BAZ', - index: 0, - logprobs: null, - finish_reason: 'length' - }], - usage: { prompt_tokens: 3, completion_tokens: 16, total_tokens: 19 } - }, [ - 'Date', 'Mon, 15 May 2023 17:24:22 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '349', - 'Connection', 'close', - 'openai-model', 'text-davinci-002', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '442', - 'openai-version', '2020-10-01', - 'x-ratelimit-limit-requests', '3000', - 'x-ratelimit-limit-tokens', '250000', - 'x-ratelimit-remaining-requests', '2999', - 'x-ratelimit-remaining-tokens', '249984', - 'x-ratelimit-reset-requests', '20ms', - 'x-ratelimit-reset-tokens', '3ms', - 'x-request-id', '7df89d8afe7bf24dc04e2c4dd4962d7f' - ]) - const checkTraces = agent .assertSomeTraces(traces => { expect(traces[0][0]).to.have.property('name', 'openai.request') @@ -272,16 +218,16 @@ describe('Plugin', () => { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/completions') expect(traces[0][0].meta).to.have.property('component', 'openai') - expect(traces[0][0].meta).to.have.property('openai.api_base', 'https://api.openai.com/v1') - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.api_base', `http://localhost:${mockServerPort}/v1`) + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-davinci-002') expect(traces[0][0].meta).to.have.property('openai.request.prompt', 'Hello, \\n\\nFriend\\t\\tHi') expect(traces[0][0].meta).to.have.property('openai.request.stop', 'time') expect(traces[0][0].meta).to.have.property('openai.request.suffix', 'foo') expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'length') + expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'stop') expect(traces[0][0].meta).to.have.property('openai.response.choices.0.logprobs', 'returned') - expect(traces[0][0].meta).to.have.property('openai.response.choices.0.text', 'FOO BAR BAZ') + expect(traces[0][0].meta).to.have.property('openai.response.choices.0.text', '\\n\\nHello, world!') expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-davinci-002') expect(traces[0][0].meta).to.have.property('openai.user.api_key', 'sk-...ESTS') expect(traces[0][0].metrics).to.have.property('openai.request.best_of', 2) @@ -320,10 +266,10 @@ describe('Plugin', () => { if (semver.satisfies(realVersion, '>=4.0.0')) { const result = await openai.completions.create(params) - expect(result.id).to.eql('cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM') + expect(result.id).to.exist } else { const result = await openai.createCompletion(params) - expect(result.data.id).to.eql('cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM') + expect(result.data.id).to.exist } await checkTraces @@ -332,7 +278,7 @@ describe('Plugin', () => { const expectedTags = [ 'error:0', - 'org:kill-9', + 'org:datadog', 'endpoint:/v1/completions', 'model:text-davinci-002' ] @@ -355,89 +301,18 @@ describe('Plugin', () => { prompt: 'Hello, \n\nFriend\t\tHi', choices: [ { - text: 'FOO BAR BAZ', + text: '\n\nHello, world!', index: 0, logprobs: null, - finish_reason: 'length' + finish_reason: 'stop' } ] }) }) - - it('should not throw with empty response body', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, {}, [ - 'Date', 'Mon, 15 May 2023 17:24:22 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '349', - 'Connection', 'close', - 'openai-model', 'text-davinci-002', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '442', - 'openai-version', '2020-10-01', - 'x-ratelimit-limit-requests', '3000', - 'x-ratelimit-limit-tokens', '250000', - 'x-ratelimit-remaining-requests', '2999', - 'x-ratelimit-remaining-tokens', '249984', - 'x-ratelimit-reset-requests', '20ms', - 'x-ratelimit-reset-tokens', '3ms', - 'x-request-id', '7df89d8afe7bf24dc04e2c4dd4962d7f' - ]) - - const checkTraces = agent - .assertSomeTraces(traces => { - expect(traces[0][0]).to.have.property('name', 'openai.request') - }) - - const params = { - model: 'text-davinci-002', - prompt: 'Hello, ', - suffix: 'foo' - } - - if (semver.satisfies(realVersion, '>=4.0.0')) { - await openai.completions.create(params) - } else { - await openai.createCompletion(params) - } - - await checkTraces - - clock.tick(10 * 1000) - }) }) - describe('create embedding with stream:true', () => { - after(() => { - nock.cleanAll() - }) - + describe('create embedding', () => { it('makes a successful call', async () => { - nock('https://api.openai.com:443') - .post('/v1/embeddings') - .reply(200, { - object: 'list', - data: [{ - object: 'embedding', - index: 0, - embedding: [-0.0034387498, -0.026400521] - }], - model: 'text-embedding-ada-002-v2', - usage: { - prompt_tokens: 2, - total_tokens: 2 - } - }, [ - 'Date', 'Mon, 15 May 2023 20:49:06 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '75', - 'access-control-allow-origin', '*', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '344', - 'openai-version', '2020-10-01' - ]) - const checkTraces = agent .assertSomeTraces(traces => { expect(traces[0][0]).to.have.property('name', 'openai.request') @@ -451,15 +326,16 @@ describe('Plugin', () => { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/embeddings') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.input', 'Cat?') expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-embedding-ada-002') expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-embedding-ada-002-v2') + expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-embedding-ada-002') expect(traces[0][0].metrics).to.have.property('openai.response.embeddings_count', 1) - expect(traces[0][0].metrics).to.have.property('openai.response.embedding.0.embedding_length', 2) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 2) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 2) + expect(traces[0][0].metrics).to.have.property('openai.response.embedding.0.embedding_length', 1536) + expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 1) + expect(traces[0][0].metrics).to.not.have.property('openai.response.usage.completion_tokens') + expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 1) }) const params = { @@ -471,10 +347,10 @@ describe('Plugin', () => { if (semver.satisfies(realVersion, '>=4.0.0')) { const result = await openai.embeddings.create(params) - expect(result.model).to.eql('text-embedding-ada-002-v2') + expect(result.model).to.eql('text-embedding-ada-002') } else { const result = await openai.createEmbedding(params) - expect(result.data.model).to.eql('text-embedding-ada-002-v2') + expect(result.data.model).to.eql('text-embedding-ada-002') } await checkTraces @@ -490,29 +366,6 @@ describe('Plugin', () => { it('makes a successful call with stream true', async () => { // Testing that adding stream:true to the params doesn't break the instrumentation - nock('https://api.openai.com:443') - .post('/v1/embeddings') - .reply(200, { - object: 'list', - data: [{ - object: 'embedding', - index: 0, - embedding: [-0.0034387498, -0.026400521] - }], - model: 'text-embedding-ada-002-v2', - usage: { - prompt_tokens: 2, - total_tokens: 2 - } - }, [ - 'Date', 'Mon, 15 May 2023 20:49:06 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '75', - 'access-control-allow-origin', '*', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '344', - 'openai-version', '2020-10-01' - ]) const checkTraces = agent .assertSomeTraces(traces => { @@ -527,15 +380,16 @@ describe('Plugin', () => { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/embeddings') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.input', 'Cat?') expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-embedding-ada-002') expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-embedding-ada-002-v2') + expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-embedding-ada-002') expect(traces[0][0].metrics).to.have.property('openai.response.embeddings_count', 1) - expect(traces[0][0].metrics).to.have.property('openai.response.embedding.0.embedding_length', 2) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 2) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 2) + expect(traces[0][0].metrics).to.have.property('openai.response.embedding.0.embedding_length', 1536) + expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 1) + expect(traces[0][0].metrics).to.not.have.property('openai.response.usage.completion_tokens') + expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 1) }) const params = { @@ -548,10 +402,10 @@ describe('Plugin', () => { if (semver.satisfies(realVersion, '>=4.0.0')) { const result = await openai.embeddings.create(params) - expect(result.model).to.eql('text-embedding-ada-002-v2') + expect(result.model).to.eql('text-embedding-ada-002') } else { const result = await openai.createEmbedding(params) - expect(result.data.model).to.eql('text-embedding-ada-002-v2') + expect(result.data.model).to.eql('text-embedding-ada-002') } await checkTraces @@ -566,127 +420,7 @@ describe('Plugin', () => { }) }) - describe('embedding with missing usages', () => { - afterEach(() => { - nock.cleanAll() - }) - - it('makes a successful call', async () => { - nock('https://api.openai.com:443') - .post('/v1/embeddings') - .reply(200, { - object: 'list', - data: [{ - object: 'embedding', - index: 0, - embedding: [-0.0034387498, -0.026400521] - }], - model: 'text-embedding-ada-002-v2', - usage: { - prompt_tokens: 0 - } - }, []) - - const checkTraces = agent - .assertSomeTraces(traces => { - expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 0) - expect(traces[0][0].metrics).to.not.have.property('openai.response.usage.completion_tokens') - expect(traces[0][0].metrics).to.not.have.property('openai.response.usage.total_tokens') - }) - - const params = { - model: 'text-embedding-ada-002', - input: '', - user: 'hunter2', - encoding_format: 'float' - } - - if (semver.satisfies(realVersion, '>=4.0.0')) { - const result = await openai.embeddings.create(params) - expect(result.model).to.eql('text-embedding-ada-002-v2') - } else { - const result = await openai.createEmbedding(params) - expect(result.data.model).to.eql('text-embedding-ada-002-v2') - } - - await checkTraces - - expect(metricStub).to.have.been.calledWith('openai.request.duration') // timing value not guaranteed - expect(metricStub).to.have.been.calledWith('openai.tokens.prompt') - expect(metricStub).to.not.have.been.calledWith('openai.tokens.completion') - expect(metricStub).to.not.have.been.calledWith('openai.tokens.total') - }) - }) - describe('list models', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/models') - .reply(200, { - object: 'list', - data: [ - { - id: 'whisper-1', - object: 'model', - created: 1677532384, - owned_by: 'openai-internal', - permission: [{ - id: 'modelperm-KlsZlfft3Gma8pI6A8rTnyjs', - object: 'model_permission', - created: 1683912666, - allow_create_engine: false, - allow_sampling: true, - allow_logprobs: true, - allow_search_indices: false, - allow_view: true, - allow_fine_tuning: false, - organization: '*', - group: null, - is_blocking: false - }], - root: 'whisper-1', - parent: null - }, - { - id: 'babbage', - object: 'model', - created: 1649358449, - owned_by: 'openai', - permission: [{ - id: 'modelperm-49FUp5v084tBB49tC4z8LPH5', - object: 'model_permission', - created: 1669085501, - allow_create_engine: false, - allow_sampling: true, - allow_logprobs: true, - allow_search_indices: false, - allow_view: true, - allow_fine_tuning: false, - organization: '*', - group: null, - is_blocking: false - }], - root: 'babbage', - parent: null - } - ] - }, [ - 'Date', 'Mon, 15 May 2023 23:26:42 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '63979', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '164' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -720,47 +454,6 @@ describe('Plugin', () => { }) describe('retrieve model', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/models/gpt-4') - .reply(200, { - id: 'gpt-4', - object: 'model', - created: 1678604602, - owned_by: 'openai', - permission: [{ - id: 'modelperm-ffiDrbtOGIZuczdJcFuOo2Mi', - object: 'model_permission', - created: 1684185078, - allow_create_engine: false, - allow_sampling: false, - allow_logprobs: false, - allow_search_indices: false, - allow_view: false, - allow_fine_tuning: false, - organization: '*', - group: null, - is_blocking: false - }], - root: 'gpt-4', - parent: 'stevebob' - }, [ - 'Date', 'Mon, 15 May 2023 23:41:40 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '548', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '27' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -778,7 +471,7 @@ describe('Plugin', () => { // expect(traces[0][0].meta).to.have.property('openai.response.permission.group', null) expect(traces[0][0].meta).to.have.property('openai.request.id', 'gpt-4') expect(traces[0][0].meta).to.have.property('openai.response.owned_by', 'openai') - expect(traces[0][0].meta).to.have.property('openai.response.parent', 'stevebob') + expect(traces[0][0].meta).to.have.property('openai.response.parent', 'gpt-4') expect(traces[0][0].meta).to.have.property('openai.response.permission.id', 'modelperm-ffiDrbtOGIZuczdJcFuOo2Mi') expect(traces[0][0].meta).to.have.property('openai.response.permission.organization', '*') @@ -807,160 +500,106 @@ describe('Plugin', () => { }) }) - describe('create edit', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/edits') - .reply(200, { - object: 'edit', - created: 1684267309, - choices: [{ - text: 'What day of the week is it, Bob?\n', - index: 0 - }], - usage: { - prompt_tokens: 25, - completion_tokens: 28, - total_tokens: 53 + describe('delete model', () => { + it('makes a successful call', async () => { + const checkTraces = agent + .assertSomeTraces(traces => { + expect(traces[0][0]).to.have.property('name', 'openai.request') + expect(traces[0][0]).to.have.property('type', 'openai') + if (semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6) { + expect(traces[0][0]).to.have.property('resource', 'models.del') + } else { + expect(traces[0][0]).to.have.property('resource', 'deleteModel') } - }, [ - 'Date', 'Tue, 16 May 2023 20:01:49 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '172', - 'Connection', 'close', - 'openai-model', 'text-davinci-edit:001', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '920', - 'openai-version', '2020-10-01', - 'x-ratelimit-limit-requests', '20', - 'x-ratelimit-remaining-requests', '19', - 'x-ratelimit-reset-requests', '3s', - 'x-request-id', 'aa28029fd9758334bcead67af867e8fc' - - ]) - }) + expect(traces[0][0]).to.have.property('error', 0) + expect(traces[0][0].meta).to.have.property('openai.request.method', 'DELETE') + expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/models/*') + + expect(traces[0][0].meta).to.have.property('openai.request.fine_tune_id', 'ft-10RCfqSvgyEcauomw7VpiYco') + expect(traces[0][0].metrics).to.have.property('openai.response.deleted', 1) + expect(traces[0][0].meta).to.have.property('openai.response.id', 'ft-10RCfqSvgyEcauomw7VpiYco') + }) + + if (semver.satisfies(realVersion, '>=4.0.0')) { + const operation = semver.satisfies(realVersion, '>=5') ? 'delete' : 'del' + const result = await openai.models[operation]('ft-10RCfqSvgyEcauomw7VpiYco') + + expect(result.deleted).to.eql(true) + } else { + const result = await openai.deleteModel('ft-10RCfqSvgyEcauomw7VpiYco') - after(() => { - nock.removeInterceptor(scope) - scope.done() + expect(result.data.deleted).to.eql(true) + } + + await checkTraces }) + }) - if (semver.satisfies(realVersion, '<4.0.0')) { - // `edits.create` was deprecated and removed after 4.0.0 - it('makes a successful call', async () => { - const checkTraces = agent - .assertSomeTraces(traces => { - expect(traces[0][0]).to.have.property('name', 'openai.request') - expect(traces[0][0]).to.have.property('type', 'openai') - expect(traces[0][0]).to.have.property('resource', 'createEdit') - expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') - expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') - expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/edits') + describe('create edit', () => { + it('makes a successful call', async function () { + if (semver.satisfies(realVersion, '>=4.0.0')) { + // `edits.create` was deprecated and removed after 4.0.0 + this.skip() + } - expect(traces[0][0].meta).to.have.property('openai.request.input', 'What day of the wek is it?') - expect(traces[0][0].meta).to.have.property('openai.request.instruction', 'Fix the spelling mistakes') - expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-davinci-edit-001') - expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.choices.0.text', - 'What day of the week is it, Bob?\\n') - expect(traces[0][0].metrics).to.have.property('openai.request.n', 1) - expect(traces[0][0].metrics).to.have.property('openai.request.temperature', 1.00001) - expect(traces[0][0].metrics).to.have.property('openai.request.top_p', 0.999) - expect(traces[0][0].metrics).to.have.property('openai.response.choices_count', 1) - expect(traces[0][0].metrics).to.have.property('openai.response.created', 1684267309) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.completion_tokens', 28) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 25) - expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 53) - }) + const checkTraces = agent + .assertSomeTraces(traces => { + expect(traces[0][0]).to.have.property('name', 'openai.request') + expect(traces[0][0]).to.have.property('type', 'openai') + expect(traces[0][0]).to.have.property('resource', 'createEdit') + expect(traces[0][0]).to.have.property('error', 0) + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') + expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') + expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/edits') - const result = await openai.createEdit({ - model: 'text-davinci-edit-001', - input: 'What day of the wek is it?', - instruction: 'Fix the spelling mistakes', - n: 1, - temperature: 1.00001, - top_p: 0.999, - user: 'hunter2' + expect(traces[0][0].meta).to.have.property('openai.request.input', 'What day of the wek is it?') + expect(traces[0][0].meta).to.have.property('openai.request.instruction', 'Fix the spelling mistakes') + expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-davinci-edit-001') + expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') + expect(traces[0][0].meta).to.have.property('openai.response.choices.0.text', + 'Edited: What day of the wek is it?\\n') + expect(traces[0][0].metrics).to.have.property('openai.request.n', 1) + expect(traces[0][0].metrics).to.have.property('openai.request.temperature', 1.00001) + expect(traces[0][0].metrics).to.have.property('openai.request.top_p', 0.999) + expect(traces[0][0].metrics).to.have.property('openai.response.choices_count', 1) + expect(traces[0][0].metrics).to.have.property('openai.response.created', 1684267309) + expect(traces[0][0].metrics).to.have.property('openai.response.usage.completion_tokens', 28) + expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 25) + expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 53) }) - expect(result.data.choices[0].text).to.eql('What day of the week is it, Bob?\n') + const result = await openai.createEdit({ + model: 'text-davinci-edit-001', + input: 'What day of the wek is it?', + instruction: 'Fix the spelling mistakes', + n: 1, + temperature: 1.00001, + top_p: 0.999, + user: 'hunter2' + }) - clock.tick(10 * 1000) + expect(result.data.choices[0].text).to.exist - await checkTraces - - const expectedTags = [ - 'error:0', - 'org:kill-9', - 'endpoint:/v1/edits', - 'model:text-davinci-edit:001' - ] + clock.tick(10 * 1000) - expect(metricStub).to.be.calledWith('openai.ratelimit.requests', 20, 'g', expectedTags) - expect(metricStub).to.be.calledWith('openai.ratelimit.remaining.requests', 19, 'g', expectedTags) + await checkTraces - expect(externalLoggerStub).to.have.been.calledWith({ - status: 'info', - message: semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6 - ? 'sampled edits.create' - : 'sampled createEdit', - input: 'What day of the wek is it?', - instruction: 'Fix the spelling mistakes', - choices: [{ - text: 'What day of the week is it, Bob?\n', - index: 0 - }] - }) + expect(externalLoggerStub).to.have.been.calledWith({ + status: 'info', + message: semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6 + ? 'sampled edits.create' + : 'sampled createEdit', + input: 'What day of the wek is it?', + instruction: 'Fix the spelling mistakes', + choices: [{ + text: 'Edited: What day of the wek is it?\n', + index: 0 + }] }) - } + }) }) describe('list files', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/files') - .reply(200, { - object: 'list', - data: [{ - object: 'file', - id: 'file-foofoofoo', - purpose: 'fine-tune-results', - filename: 'compiled_results.csv', - bytes: 3460, - created_at: 1684000162, - status: 'processed', - status_details: null - }, { - object: 'file', - id: 'file-barbarbar', - purpose: 'fine-tune-results', - filename: 'compiled_results.csv', - bytes: 13595, - created_at: 1684000508, - status: 'processed', - status_details: null - }] - }, [ - 'Date', 'Wed, 17 May 2023 21:34:04 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '25632', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '660' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -972,7 +611,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'listFiles') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/files') expect(traces[0][0].meta).to.have.property('openai.request.method', 'GET') @@ -983,12 +622,12 @@ describe('Plugin', () => { const result = await openai.files.list() expect(result.data.length).to.eql(2) - expect(result.data[0].id).to.eql('file-foofoofoo') + expect(result.data[0].id).to.exist } else { const result = await openai.listFiles() expect(result.data.data.length).to.eql(2) - expect(result.data.data[0].id).to.eql('file-foofoofoo') + expect(result.data.data[0].id).to.exist } await checkTraces @@ -996,36 +635,6 @@ describe('Plugin', () => { }) describe('create file', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/files') - .reply(200, { - object: 'file', - id: 'file-268aYWYhvxWwHb4nIzP9FHM6', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684362764, - status: 'uploaded', - status_details: 'foo' // dummy value for testing - }, [ - 'Date', 'Wed, 17 May 2023 22:32:44 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '216', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '1021' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1037,7 +646,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createFile') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/files') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') @@ -1071,30 +680,6 @@ describe('Plugin', () => { }) describe('delete file', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .delete('/v1/files/file-268aYWYhvxWwHb4nIzP9FHM6') - .reply(200, { - object: 'file', - id: 'file-268aYWYhvxWwHb4nIzP9FHM6', - deleted: true - }, [ - 'Date', 'Wed, 17 May 2023 23:03:54 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '83', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1106,7 +691,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'deleteFile') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'DELETE') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/files/*') @@ -1116,7 +701,8 @@ describe('Plugin', () => { }) if (semver.satisfies(realVersion, '>=4.0.0')) { - const result = await openai.files.del('file-268aYWYhvxWwHb4nIzP9FHM6') + const operation = semver.satisfies(realVersion, '>=5') ? 'delete' : 'del' + const result = await openai.files[operation]('file-268aYWYhvxWwHb4nIzP9FHM6') expect(result.deleted).to.eql(true) } else { @@ -1130,36 +716,6 @@ describe('Plugin', () => { }) describe('retrieve file', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/files/file-fIkEUgQPWnVXNKPJsr4pEWiz') - .reply(200, { - object: 'file', - id: 'file-fIkEUgQPWnVXNKPJsr4pEWiz', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684362764, - status: 'uploaded', - status_details: 'foo' // dummy value for testing - }, [ - 'Date', 'Wed, 17 May 2023 23:14:02 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '240', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '18' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1171,7 +727,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'retrieveFile') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'GET') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/files/*') @@ -1200,28 +756,6 @@ describe('Plugin', () => { }) describe('download file', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .get('/v1/files/file-t3k1gVSQDHrfZnPckzftlZ4A/content') - .reply(200, '{"prompt": "foo?", "completion": "bar."}\n{"prompt": "foofoo?", "completion": "barbar."}\n', [ - 'Date', 'Wed, 17 May 2023 23:26:01 GMT', - 'Content-Type', 'application/octet-stream', - 'Transfer-Encoding', 'chunked', - 'Connection', 'close', - 'content-disposition', 'attachment; filename="dave-hal.jsonl"', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '128' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - // TODO: issues with content being async arraybuffer, how to compute byteLength before promise resolves? it('makes a successful call', async () => { const checkTraces = agent @@ -1236,7 +770,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'downloadFile') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'GET') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/files/*/content') @@ -1272,75 +806,6 @@ describe('Plugin', () => { }) describe('create finetune', () => { - let scope - - beforeEach(() => { - const response = { - id: 'ft-10RCfqSvgyEcauomw7VpiYco', - created_at: 1684442489, - updated_at: 1684442489, - organization_id: 'org-COOLORG', - model: 'curie', - fine_tuned_model: 'huh', - status: 'pending', - result_files: [] - } - if (semver.satisfies(realVersion, '>=4.1.0')) { - response.object = 'fine_tuning.job' - response.hyperparameters = { - n_epochs: 5, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.validation_file = null - response.training_file = 'file-t3k1gVSQDHrfZnPckzftlZ4A' - } else { - response.object = 'fine-tunes' - response.hyperparams = { - n_epochs: 5, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.training_files = [{ - object: 'file', - id: 'file-t3k1gVSQDHrfZnPckzftlZ4A', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684365950, - status: 'processed', - status_details: null - }] - response.validation_files = [] - response.events = [{ - object: 'fine-tune-event', - level: 'info', - message: 'Created fine-tune: ft-10RCfqSvgyEcauomw7VpiYco', - created_at: 1684442489 - }] - } - - scope = nock('https://api.openai.com:443') - .post( - semver.satisfies(realVersion, '>=4.1.0') ? '/v1/fine_tuning/jobs' : '/v1/fine-tunes' - ) - .reply(200, response, [ - 'Date', 'Thu, 18 May 2023 20:41:30 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '898', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '116' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1354,7 +819,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createFineTune') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.id', 'org-COOLORG') // no name just id + expect(traces[0][0].meta).to.have.property('openai.organization.id', 'datadog') // no name just id expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') if (semver.satisfies(realVersion, '>=4.1.0')) { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/fine_tuning/jobs') @@ -1458,154 +923,6 @@ describe('Plugin', () => { }) describe('retrieve finetune', () => { - let scope - - beforeEach(() => { - const response = { - id: 'ft-10RCfqSvgyEcauomw7VpiYco', - organization_id: 'org-COOLORG', - model: 'curie', - created_at: 1684442489, - updated_at: 1684442697, - status: 'succeeded', - fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56' - } - - if (semver.satisfies(realVersion, '>=4.1.0')) { - response.object = 'fine_tuning.job' - response.hyperparameters = { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.result_files = [ - 'file-bJyf8TM0jeSZueBo4jpodZVQ' - ] - response.validation_file = null - response.training_file = 'file-t3k1gVSQDHrfZnPckzftlZ4A' - } else { - response.object = 'fine-tune' - response.hyperparams = { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.result_files = [ - { - object: 'file', - id: 'file-bJyf8TM0jeSZueBo4jpodZVQ', - purpose: 'fine-tune-results', - filename: 'compiled_results.csv', - bytes: 410, - created_at: 1684442697, - status: 'processed', - status_details: null - } - ] - response.validation_files = [] - response.training_files = [{ - object: 'file', - id: 'file-t3k1gVSQDHrfZnPckzftlZ4A', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684365950, - status: 'processed', - status_details: null - }] - response.events = [ - { - object: 'fine-tune-event', - level: 'info', - message: 'Created fine-tune: ft-10RCfqSvgyEcauomw7VpiYco', - created_at: 1684442489 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Fine-tune costs $0.00', - created_at: 1684442612 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Fine-tune enqueued. Queue number: 0', - created_at: 1684442612 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Fine-tune started', - created_at: 1684442614 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Completed epoch 1/4', - created_at: 1684442677 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Completed epoch 2/4', - created_at: 1684442677 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Completed epoch 3/4', - created_at: 1684442678 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Completed epoch 4/4', - created_at: 1684442679 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Uploaded model: curie:ft-foo:deleteme-2023-05-18-20-44-56', - created_at: 1684442696 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Uploaded result file: file-bJyf8TM0jeSZueBo4jpodZVQ', - created_at: 1684442697 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Fine-tune succeeded', - created_at: 1684442697 - } - ] - } - - scope = nock('https://api.openai.com:443') - .get( - semver.satisfies(realVersion, '>=4.1.0') - ? '/v1/fine_tuning/jobs/ft-10RCfqSvgyEcauomw7VpiYco' - : '/v1/fine-tunes/ft-10RCfqSvgyEcauomw7VpiYco' - ) - .reply(200, response, [ - 'Date', 'Thu, 18 May 2023 22:11:53 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '2727', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '51' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1619,7 +936,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'retrieveFineTune') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.id', 'org-COOLORG') // no name just id + expect(traces[0][0].meta).to.have.property('openai.organization.id', 'datadog') // no name just id expect(traces[0][0].meta).to.have.property('openai.request.method', 'GET') if (semver.satisfies(realVersion, '>=4.1.0')) { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/fine_tuning/jobs/*') @@ -1671,87 +988,6 @@ describe('Plugin', () => { }) describe('list finetunes', () => { - let scope - - beforeEach(() => { - const response = { - object: 'list' - } - - if (semver.satisfies(realVersion, '>=4.1.0')) { - response.data = [{ - object: 'fine-tuning.jobs', - id: 'ft-10RCfqSvgyEcauomw7VpiYco', - hyperparameters: { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - }, - created_at: 1684442489, - updated_at: 1684442697, - organization_id: 'org-COOLORG', - model: 'curie', - fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56', - result_files: [], - status: 'succeeded', - validation_file: null, - training_file: 'file-t3k1gVSQDHrfZnPckzftlZ4A' - }] - } else { - response.data = [{ - object: 'fine-tune', - id: 'ft-10RCfqSvgyEcauomw7VpiYco', - hyperparams: { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - }, - organization_id: 'org-COOLORG', - model: 'curie', - training_files: [{ - object: 'file', - id: 'file-t3k1gVSQDHrfZnPckzftlZ4A', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684365950, - status: 'processed', - status_details: null - }], - validation_files: [], - result_files: [{ - object: 'file', - id: 'file-bJyf8TM0jeSZueBo4jpodZVQ', - purpose: 'fine-tune-results', - filename: 'compiled_results.csv', - bytes: 410, - created_at: 1684442697, - status: 'processed', - status_details: null - }], - created_at: 1684442489, - updated_at: 1684442697, - status: 'succeeded', - fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56' - }] - } - - scope = nock('https://api.openai.com:443') - .get( - semver.satisfies(realVersion, '>=4.1.0') - ? '/v1/fine_tuning/jobs' - : '/v1/fine-tunes' - ) - .reply(200, response) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1794,99 +1030,6 @@ describe('Plugin', () => { }) describe('list finetune events', () => { - let scope - - beforeEach(() => { // beforeEach allows realVersion to be set first before nocking the call - const response = { - object: 'list', - data: [ - { - level: 'info', - message: 'Created fine-tune: ft-10RCfqSvgyEcauomw7VpiYco', - created_at: 1684442489 - }, - { - level: 'info', - message: 'Fine-tune costs $0.00', - created_at: 1684442612 - }, - { - level: 'info', - message: 'Fine-tune enqueued. Queue number: 0', - created_at: 1684442612 - }, - { - level: 'info', - message: 'Fine-tune started', - created_at: 1684442614 - }, - { - level: 'info', - message: 'Completed epoch 1/4', - created_at: 1684442677 - }, - { - level: 'info', - message: 'Completed epoch 2/4', - created_at: 1684442677 - }, - { - level: 'info', - message: 'Completed epoch 3/4', - created_at: 1684442678 - }, - { - level: 'info', - message: 'Completed epoch 4/4', - created_at: 1684442679 - }, - { - level: 'info', - message: 'Uploaded model: curie:ft-foo:deleteme-2023-05-18-20-44-56', - created_at: 1684442696 - }, - { - level: 'info', - message: 'Uploaded result file: file-bJyf8TM0jeSZueBo4jpodZVQ', - created_at: 1684442697 - }, - { - level: 'info', - message: 'Fine-tune succeeded', - created_at: 1684442697 - } - ] - } - - for (const event of response.data) { - if (semver.satisfies(realVersion, '>=4.1.0')) { - event.object = 'fine_tuning.job.event' - } else { - event.object = 'fine-tune-event' - } - } - - scope = nock('https://api.openai.com:443') - .get( - semver.satisfies(realVersion, '>=4.1.0') - ? '/v1/fine_tuning/jobs/ft-10RCfqSvgyEcauomw7VpiYco/events' - : '/v1/fine-tunes/ft-10RCfqSvgyEcauomw7VpiYco/events' - ) - .reply(200, response, [ - 'Date', 'Thu, 18 May 2023 22:47:17 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '1718', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '33' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -1929,147 +1072,7 @@ describe('Plugin', () => { }) }) - describe('delete model', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .delete('/v1/models/ft-10RCfqSvgyEcauomw7VpiYco') - .reply(200, { // guessing on response format here since my key lacks permissions - object: 'model', - id: 'ft-10RCfqSvgyEcauomw7VpiYco', - deleted: true - }, [ - 'Date', 'Thu, 18 May 2023 22:59:08 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '152', - 'Connection', 'close', - 'access-control-allow-origin', '*', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '23' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - - it('makes a successful call', async () => { - const checkTraces = agent - .assertSomeTraces(traces => { - expect(traces[0][0]).to.have.property('name', 'openai.request') - expect(traces[0][0]).to.have.property('type', 'openai') - if (semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6) { - expect(traces[0][0]).to.have.property('resource', 'models.del') - } else { - expect(traces[0][0]).to.have.property('resource', 'deleteModel') - } - expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.request.method', 'DELETE') - expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/models/*') - - expect(traces[0][0].meta).to.have.property('openai.request.fine_tune_id', 'ft-10RCfqSvgyEcauomw7VpiYco') - expect(traces[0][0].metrics).to.have.property('openai.response.deleted', 1) - expect(traces[0][0].meta).to.have.property('openai.response.id', 'ft-10RCfqSvgyEcauomw7VpiYco') - }) - - if (semver.satisfies(realVersion, '>=4.0.0')) { - const result = await openai.models.del('ft-10RCfqSvgyEcauomw7VpiYco') - - expect(result.deleted).to.eql(true) - } else { - const result = await openai.deleteModel('ft-10RCfqSvgyEcauomw7VpiYco') - - expect(result.data.deleted).to.eql(true) - } - - await checkTraces - }) - }) - describe('cancel finetune', () => { - let scope - - beforeEach(() => { - const response = { - id: 'ft-TVpNqwlvermMegfRVqSOyPyS', - organization_id: 'org-COOLORG', - model: 'curie', - created_at: 1684452102, - updated_at: 1684452103, - status: 'cancelled', - fine_tuned_model: 'idk' - } - - if (semver.satisfies(realVersion, '>=4.1.0')) { - response.object = 'fine-tuning.job' - response.hyperparameters = { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.training_files = 'file-t3k1gVSQDHrfZnPckzftlZ4A' - response.validation_file = null - response.result_files = [] - } else { - response.object = 'fine-tune' - response.hyperparams = { - n_epochs: 4, - batch_size: 3, - prompt_loss_weight: 0.01, - learning_rate_multiplier: 0.1 - } - response.training_files = [{ - object: 'file', - id: 'file-t3k1gVSQDHrfZnPckzftlZ4A', - purpose: 'fine-tune', - filename: 'dave-hal.jsonl', - bytes: 356, - created_at: 1684365950, - status: 'processed', - status_details: null - }] - response.validation_files = [] - response.result_files = [] - response.events = [ - { - object: 'fine-tune-event', - level: 'info', - message: 'Created fine-tune: ft-TVpNqwlvermMegfRVqSOyPyS', - created_at: 1684452102 - }, - { - object: 'fine-tune-event', - level: 'info', - message: 'Fine-tune cancelled', - created_at: 1684452103 - } - ] - } - - scope = nock('https://api.openai.com:443') - .post( - semver.satisfies(realVersion, '>=4.1.0') - ? '/v1/fine_tuning/jobs/ft-TVpNqwlvermMegfRVqSOyPyS/cancel' - : '/v1/fine-tunes/ft-TVpNqwlvermMegfRVqSOyPyS/cancel' - ) - .reply(200, response, [ - 'Date', 'Thu, 18 May 2023 23:21:43 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '1042', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-processing-ms', '78' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2083,7 +1086,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'cancelFineTune') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.id', 'org-COOLORG') + expect(traces[0][0].meta).to.have.property('openai.organization.id', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') if (semver.satisfies(realVersion, '>=4.1.0')) { expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/fine_tuning/jobs/*/cancel') @@ -2092,7 +1095,7 @@ describe('Plugin', () => { } expect(traces[0][0].meta).to.have.property('openai.request.fine_tune_id', 'ft-TVpNqwlvermMegfRVqSOyPyS') - expect(traces[0][0].meta).to.have.property('openai.response.fine_tuned_model', 'idk') + expect(traces[0][0].meta).to.have.property('openai.response.fine_tuned_model', 'model') expect(traces[0][0].meta).to.have.property('openai.response.id', 'ft-TVpNqwlvermMegfRVqSOyPyS') expect(traces[0][0].meta).to.have.property('openai.response.model', 'curie') expect(traces[0][0].meta).to.have.property('openai.response.status', 'cancelled') @@ -2135,152 +1138,82 @@ describe('Plugin', () => { }) }) - if (semver.intersects(version, '>=3.0.1')) { - describe('create moderation', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/moderations') - .reply(200, { - id: 'modr-7HHZZZylF31ahuhmH279JrKbGTHCW', - model: 'text-moderation-001', - results: [{ - flagged: true, - categories: { - sexual: false, - hate: false, - violence: true, - 'self-harm': false, - 'sexual/minors': false, - 'hate/threatening': false, - 'violence/graphic': false - }, - category_scores: { - sexual: 0.0018438849, - hate: 0.069274776, - violence: 0.74101615, - 'self-harm': 0.008981651, - 'sexual/minors': 0.00070737937, - 'hate/threatening': 0.045174375, - 'violence/graphic': 0.019271193 - } - }] - }, [ - 'Date', 'Wed, 17 May 2023 19:58:01 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '450', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '419' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) + describe('create moderation', () => { + it('makes a successful call', async function () { + if (semver.satisfies(realVersion, '<3.0.1')) { + this.skip() + } - it('makes a successful call', async () => { - const checkTraces = agent - .assertSomeTraces(traces => { - expect(traces[0][0]).to.have.property('name', 'openai.request') - expect(traces[0][0]).to.have.property('type', 'openai') - if (semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6) { - expect(traces[0][0]).to.have.property('resource', 'moderations.create') - } else { - expect(traces[0][0]).to.have.property('resource', 'createModeration') - } - expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') - expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') - expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/moderations') - - expect(traces[0][0].meta).to.have.property('openai.request.input', 'I want to harm the robots') - expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-moderation-stable') - expect(traces[0][0].meta).to.have.property('openai.response.id', 'modr-7HHZZZylF31ahuhmH279JrKbGTHCW') - expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-moderation-001') - expect(traces[0][0].metrics).to.have.property('openai.response.categories.sexual', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.hate', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.violence', 1) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.self-harm', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.sexual/minors', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.hate/threatening', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.categories.violence/graphic', 0) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate', 0.069274776) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence', 0.74101615) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.sexual', 0.0018438849) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate', 0.069274776) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence', 0.74101615) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.self-harm', 0.008981651) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.sexual/minors', - 0.00070737937) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate/threatening', - 0.045174375) - expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence/graphic', - 0.019271193) - expect(traces[0][0].metrics).to.have.property('openai.response.flagged', 1) - }) + const checkTraces = agent + .assertSomeTraces(traces => { + expect(traces[0][0]).to.have.property('name', 'openai.request') + expect(traces[0][0]).to.have.property('type', 'openai') + if (semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6) { + expect(traces[0][0]).to.have.property('resource', 'moderations.create') + } else { + expect(traces[0][0]).to.have.property('resource', 'createModeration') + } + expect(traces[0][0]).to.have.property('error', 0) + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') + expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') + expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/moderations') + + expect(traces[0][0].meta).to.have.property('openai.request.input', 'I want to harm the robots') + expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-moderation-stable') + expect(traces[0][0].meta).to.have.property('openai.response.id', 'modr-7HHZZZylF31ahuhmH279JrKbGTHCW') + expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-moderation-001') + expect(traces[0][0].metrics).to.have.property('openai.response.categories.sexual', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.hate', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.violence', 1) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.self-harm', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.sexual/minors', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.hate/threatening', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.categories.violence/graphic', 0) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate', 0.069274776) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence', 0.74101615) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.sexual', 0.0018438849) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate', 0.069274776) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence', 0.74101615) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.self-harm', 0.008981651) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.sexual/minors', + 0.00070737937) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.hate/threatening', + 0.045174375) + expect(traces[0][0].metrics).to.have.property('openai.response.category_scores.violence/graphic', + 0.019271193) + expect(traces[0][0].metrics).to.have.property('openai.response.flagged', 1) + }) - if (semver.satisfies(realVersion, '>=4.0.0')) { - const result = await openai.moderations.create({ - input: 'I want to harm the robots', - model: 'text-moderation-stable' - }) + if (semver.satisfies(realVersion, '>=4.0.0')) { + const result = await openai.moderations.create({ + input: 'I want to harm the robots', + model: 'text-moderation-stable' + }) - expect(result.results[0].flagged).to.eql(true) - } else { - const result = await openai.createModeration({ - input: 'I want to harm the robots', - model: 'text-moderation-stable' - }) + expect(result.results[0].flagged).to.eql(true) + } else { + const result = await openai.createModeration({ + input: 'I want to harm the robots', + model: 'text-moderation-stable' + }) - expect(result.data.results[0].flagged).to.eql(true) - } + expect(result.data.results[0].flagged).to.eql(true) + } - await checkTraces + await checkTraces - expect(externalLoggerStub).to.have.been.calledWith({ - status: 'info', - message: semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6 - ? 'sampled moderations.create' - : 'sampled createModeration', - input: 'I want to harm the robots' - }) + expect(externalLoggerStub).to.have.been.calledWith({ + status: 'info', + message: semver.satisfies(realVersion, '>=4.0.0') && DD_MAJOR < 6 + ? 'sampled moderations.create' + : 'sampled createModeration', + input: 'I want to harm the robots' }) }) - } + }) if (semver.intersects(version, '>=3.1')) { describe('create image', () => { - let scope - - beforeEach(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/images/generations') - .reply(200, { - created: 1684270747, - data: [{ - url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-foo.png', - b64_json: 'foobar===' - }] - }, [ - 'Date', 'Tue, 16 May 2023 20:59:07 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '545', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '5085' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call using a string prompt', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2292,7 +1225,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createImage') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/images/generations') @@ -2465,33 +1398,6 @@ describe('Plugin', () => { }) describe('create image edit', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/images/edits') - .reply(200, { - created: 1684850118, - data: [{ - url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-bar.png', - b64_json: 'fOoF0f=' - }] - }, [ - 'Date', 'Tue, 23 May 2023 13:55:18 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '549', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '9901' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2503,7 +1409,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createImageEdit') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/images/edits') @@ -2562,33 +1468,6 @@ describe('Plugin', () => { }) describe('create image variation', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/images/variations') - .reply(200, { - created: 1684853320, - data: [{ - url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-soup.png', - b64_json: 'foo=' - }] - }, [ - 'Date', 'Tue, 23 May 2023 14:48:40 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '547', - 'Connection', 'close', - 'openai-version', '2020-10-01', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '8411' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2600,7 +1479,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createImageVariation') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/images/variations') @@ -2648,47 +1527,6 @@ describe('Plugin', () => { if (semver.intersects('>=3.2.0', version)) { describe('create chat completion', () => { - let scope - - beforeEach(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: "In that case, it's best to avoid peanut", - name: 'hunter2' - }, - finish_reason: 'length', - index: 0 - }] - }, [ - 'Date', 'Mon, 15 May 2023 22:00:21 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '327', - 'access-control-allow-origin', '*', - 'openai-model', 'gpt-3.5-turbo-0301', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '713', - 'openai-version', '2020-10-01' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2700,7 +1538,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createChatCompletion') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') @@ -2717,12 +1555,11 @@ describe('Plugin', () => { expect(traces[0][0].meta).to.have.property('openai.request.model', 'gpt-3.5-turbo') expect(traces[0][0].meta).to.have.property('openai.request.stop', 'time') expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'length') + expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'stop') expect(traces[0][0].meta).to.have.property('openai.response.choices.0.message.content', - "In that case, it's best to avoid peanut") + 'Hello, world!') expect(traces[0][0].meta).to.have.property('openai.response.choices.0.message.role', 'assistant') - expect(traces[0][0].meta).to.have.property('openai.response.choices.0.message.name', 'hunter2') - expect(traces[0][0].meta).to.have.property('openai.response.model', 'gpt-3.5-turbo-0301') + expect(traces[0][0].meta).to.have.property('openai.response.model', 'gpt-3.5-turbo') expect(traces[0][0].metrics).to.have.property('openai.request.logit_bias.1234', -1) expect(traces[0][0].metrics).to.have.property('openai.request.max_tokens', 10) expect(traces[0][0].metrics).to.have.property('openai.request.n', 3) @@ -2773,19 +1610,11 @@ describe('Plugin', () => { const result = await prom - expect(result.id).to.eql('chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN') - expect(result.model).to.eql('gpt-3.5-turbo-0301') - expect(result.choices[0].message.role).to.eql('assistant') - expect(result.choices[0].message.content).to.eql('In that case, it\'s best to avoid peanut') - expect(result.choices[0].finish_reason).to.eql('length') + expect(result.id).to.exist } else { const result = await openai.createChatCompletion(params) - expect(result.data.id).to.eql('chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN') - expect(result.data.model).to.eql('gpt-3.5-turbo-0301') - expect(result.data.choices[0].message.role).to.eql('assistant') - expect(result.data.choices[0].message.content).to.eql('In that case, it\'s best to avoid peanut') - expect(result.data.choices[0].finish_reason).to.eql('length') + expect(result.data.id).to.exist } await checkTraces @@ -2812,10 +1641,9 @@ describe('Plugin', () => { choices: [{ message: { role: 'assistant', - content: "In that case, it's best to avoid peanut", - name: 'hunter2' + content: 'Hello, world!' }, - finish_reason: 'length', + finish_reason: 'stop', index: 0 }] }) @@ -2882,17 +1710,11 @@ describe('Plugin', () => { if (semver.satisfies(realVersion, '>=4.0.0')) { const result = await openai.chat.completions.create(params) - expect(result.id).to.eql('chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN') - expect(result.choices[0].message.role).to.eql('assistant') - expect(result.choices[0].message.content).to.eql('In that case, it\'s best to avoid peanut') - expect(result.choices[0].finish_reason).to.eql('length') + expect(result.id).to.exist } else { const result = await openai.createChatCompletion(params) - expect(result.data.id).to.eql('chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN') - expect(result.data.choices[0].message.role).to.eql('assistant') - expect(result.data.choices[0].message.content).to.eql('In that case, it\'s best to avoid peanut') - expect(result.data.choices[0].finish_reason).to.eql('length') + expect(result.data.id).to.exist } await checkTraces @@ -2900,57 +1722,6 @@ describe('Plugin', () => { }) describe('create chat completion with tools', () => { - let scope - - beforeEach(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: null, - name: 'hunter2', - tool_calls: [ - { - id: 'tool-1', - type: 'function', - function: { - name: 'extract_fictional_info', - arguments: '{"name":"SpongeBob","origin":"Bikini Bottom"}' - } - } - ] - }, - finish_reason: 'tool_calls', - index: 0 - }] - }, [ - 'Date', 'Mon, 15 May 2023 22:00:21 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '327', - 'access-control-allow-origin', '*', - 'openai-model', 'gpt-3.5-turbo-0301', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '713', - 'openai-version', '2020-10-01' - ]) - }) - - afterEach(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('tags the tool calls successfully', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -2959,7 +1730,7 @@ describe('Plugin', () => { 'extract_fictional_info') expect(traces[0][0].meta) .to.have.property('openai.response.choices.0.message.tool_calls.0.function.arguments', - '{"name":"SpongeBob","origin":"Bikini Bottom"}') + '{"name":"some-value","origin":"some-value"}') expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'tool_calls') }) @@ -3023,11 +1794,10 @@ describe('Plugin', () => { type: 'function', function: { name: 'extract_fictional_info', - arguments: '{"name":"SpongeBob","origin":"Bikini Bottom"}' + arguments: '{"name":"some-value","origin":"some-value"}' } } - ], - name: 'hunter2' + ] }, finish_reason: 'tool_calls', index: 0 @@ -3037,46 +1807,6 @@ describe('Plugin', () => { }) describe('create transcription', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/audio/transcriptions') - .reply(200, { - task: 'transcribe', - language: 'english', - duration: 2.19, - segments: [{ - id: 0, - seek: 0, - start: 0, - end: 2, - text: ' Hello, friend.', - tokens: [50364, 2425, 11, 1277, 13, 50464], - temperature: 0.5, - avg_logprob: -0.7777707236153739, - compression_ratio: 0.6363636363636364, - no_speech_prob: 0.043891049921512604, - transient: false - }], - text: 'Hello, friend.' - }, [ - 'Date', 'Fri, 19 May 2023 03:19:49 GMT', - 'Content-Type', 'text/plain; charset=utf-8', - 'Content-Length', '15', - 'Connection', 'close', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '595', - 'openai-version', '2020-10-01' - ] - ) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -3088,7 +1818,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createTranscription') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/audio/transcriptions') expect(traces[0][0].meta).to.have.property('openai.request.filename', 'hello-friend.m4a') @@ -3116,7 +1846,7 @@ describe('Plugin', () => { }) // for OpenAI v4, the result is a stringified version of the JSON response - expect(typeof result).to.eql('string') + expect(result.text).to.eql('Hello, friend.') } else { const result = await openai.createTranscription( fs.createReadStream(Path.join(__dirname, '/hello-friend.m4a')), @@ -3144,45 +1874,6 @@ describe('Plugin', () => { }) describe('create translation', () => { - let scope - - before(() => { - scope = nock('https://api.openai.com:443') - .post('/v1/audio/translations') - .reply(200, { - task: 'translate', - language: 'english', - duration: 1.74, - segments: [{ - id: 0, - seek: 0, - start: 0, - end: 3, - text: ' Guten Tag!', - tokens: [50364, 42833, 11204, 0, 50514], - temperature: 0.5, - avg_logprob: -0.5626437266667684, - compression_ratio: 0.5555555555555556, - no_speech_prob: 0.01843200996518135, - transient: false - }], - text: 'Guten Tag!' - }, [ - 'Date', 'Fri, 19 May 2023 03:41:25 GMT', - 'Content-Type', 'application/json', - 'Content-Length', '334', - 'Connection', 'close', - 'openai-organization', 'kill-9', - 'openai-processing-ms', '520', - 'openai-version', '2020-10-01' - ]) - }) - - after(() => { - nock.removeInterceptor(scope) - scope.done() - }) - it('makes a successful call', async () => { const checkTraces = agent .assertSomeTraces(traces => { @@ -3194,7 +1885,7 @@ describe('Plugin', () => { expect(traces[0][0]).to.have.property('resource', 'createTranslation') } expect(traces[0][0]).to.have.property('error', 0) - expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9') + expect(traces[0][0].meta).to.have.property('openai.organization.name', 'datadog') expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/audio/translations') expect(traces[0][0].meta).to.have.property('openai.request.filename', 'guten-tag.m4a') @@ -3248,27 +1939,14 @@ describe('Plugin', () => { if (semver.intersects('>4.1.0', version)) { describe('streamed responses', () => { - afterEach(() => { - nock.cleanAll() - }) - it('makes a successful chat completion call', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') expect(span.meta).to.have.property('openai.request.model', 'gpt-4o') @@ -3309,22 +1987,13 @@ describe('Plugin', () => { }) it('makes a successful chat completion call with empty stream', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.empty.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') expect(span.meta).to.have.property('openai.request.model', 'gpt-4o') @@ -3337,7 +2006,8 @@ describe('Plugin', () => { model: 'gpt-4o', messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }], temperature: 0.5, - stream: true + stream: true, + n: 0 }) for await (const part of stream) { @@ -3348,22 +2018,13 @@ describe('Plugin', () => { }) it('makes a successful chat completion call with multiple choices', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.multiple.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') expect(span.meta).to.have.property('openai.request.model', 'gpt-4') @@ -3414,15 +2075,6 @@ describe('Plugin', () => { }) it('makes a successful chat completion call with usage included', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.usage.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3451,9 +2103,9 @@ describe('Plugin', () => { const expectedTags = [ 'error:0', - 'org:kill-9', + 'org:datadog', 'endpoint:/v1/chat/completions', - 'model:gpt-3.5-turbo-0125' + 'model:gpt-3.5-turbo' ] expect(metricStub).to.have.been.calledWith('openai.tokens.prompt', 11, 'd', expectedTags) @@ -3462,15 +2114,6 @@ describe('Plugin', () => { }) it('makes a successful chat completion call without image_url usage computed', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3510,15 +2153,6 @@ describe('Plugin', () => { }) it('makes a successful completion call', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/completions.simple.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3526,7 +2160,7 @@ describe('Plugin', () => { expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/completions') expect(span.meta).to.have.property('openai.request.model', 'text-davinci-002') @@ -3562,15 +2196,6 @@ describe('Plugin', () => { }) it('makes a successful completion call with usage included', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/completions.simple.usage.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3599,7 +2224,7 @@ describe('Plugin', () => { const expectedTags = [ 'error:0', - 'org:kill-9', + 'org:datadog', 'endpoint:/v1/completions', 'model:gpt-3.5-turbo-instruct' ] @@ -3611,15 +2236,6 @@ describe('Plugin', () => { if (semver.intersects('>4.16.0', version)) { it('makes a successful chat completion call with tools', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.tools.txt')) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3627,7 +2243,7 @@ describe('Plugin', () => { expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') expect(span.meta).to.have.property('openai.request.model', 'gpt-4') @@ -3680,17 +2296,6 @@ describe('Plugin', () => { }) it('makes a successful chat completion call with tools and content', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream( - Path.join(__dirname, 'streamed-responses/chat.completions.tool.and.content.txt') - ) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkTraces = agent .assertSomeTraces(traces => { const span = traces[0][0] @@ -3698,7 +2303,7 @@ describe('Plugin', () => { expect(span).to.have.property('name', 'openai.request') expect(span).to.have.property('type', 'openai') expect(span).to.have.property('error', 0) - expect(span.meta).to.have.property('openai.organization.name', 'kill-9') + expect(span.meta).to.have.property('openai.organization.name', 'datadog') expect(span.meta).to.have.property('openai.request.method', 'POST') expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions') expect(span.meta).to.have.property('openai.request.model', 'gpt-4') @@ -3737,54 +2342,32 @@ describe('Plugin', () => { }) } - if (semver.intersects('>=4.59.0', version)) { - it('makes a successful call with the beta chat completions', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-4o', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [ - { - message: { - role: 'assistant', - content: 'I am doing well, how about you?' - }, - finish_reason: 'stop', - index: 0 - } - ] - }) - - const checkTraces = agent - .assertSomeTraces(traces => { - const span = traces[0][0] - expect(span).to.have.property('name', 'openai.request') - }) + it('makes a successful call with the beta chat completions', async function () { + if (semver.satisfies(realVersion, '<4.59.0 || >=5.0.0')) { + this.skip() + } - const prom = openai.beta.chat.completions.parse({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }], - temperature: 0.5, - stream: false + const checkTraces = agent + .assertSomeTraces(traces => { + const span = traces[0][0] + expect(span).to.have.property('name', 'openai.request') }) - expect(prom).to.have.property('withResponse') + const prom = openai.beta.chat.completions.parse({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }], + temperature: 0.5, + stream: false + }) - const response = await prom + expect(prom).to.have.property('withResponse') - expect(response.choices[0].message.content).to.eql('I am doing well, how about you?') + const response = await prom - await checkTraces - }) - } + expect(response.choices[0].message.content).to.eql('Hello, world!') + + await checkTraces + }) }) }) }) diff --git a/packages/datadog-plugin-openai/test/mock-server.js b/packages/datadog-plugin-openai/test/mock-server.js new file mode 100644 index 00000000000..cb8dfba47a3 --- /dev/null +++ b/packages/datadog-plugin-openai/test/mock-server.js @@ -0,0 +1,926 @@ +'use strict' + +const useDebugLogs = process.env.DD_OPENAI_MOCK_SERVER_DEBUG_LOGS + +const express = require('express') +const fs = require('node:fs') +const path = require('node:path') +const app = express() + +app.use(express.json()) + +/** @type {import('http').Server} */ +let server + +/** @type {Set} */ +const connections = new Set() + +const debug = (...args) => { + if (useDebugLogs) { + // eslint-disable-next-line no-console + console.log(...args) + } +} + +app.post('/v1/completions', (req, res) => { + const { prompt, model, n = 1, stream = false, stream_options: streamOptions = {} } = req.body + + if (typeof prompt !== 'string') { + res.status(400).json({ + error: { + type: 'invalid_request_error' + } + }) + + return + } + + if (stream) { + // streamed responses are pre-recorded in a separate directory + res.setHeaders(new Map([ + ['Content-Type', 'text/plain'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + + let file + if (streamOptions.include_usage) { + file = 'completions.simple.usage.txt' + } else { + file = 'completions.simple.txt' + } + + const filePath = path.join(__dirname, 'streamed-responses', file) + const readStream = fs.createReadStream(filePath) + + readStream.pipe(res) + + readStream.on('end', () => res.end()) + readStream.on('error', (err) => { + res.status(500).end('Error streaming file') + }) + + return + } + + const choices = [] + + for (let i = 0; i < n; i++) { + choices.push({ + text: '\n\nHello, world!', + index: i, + logprobs: null, + finish_reason: 'stop' + }) + } + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'], + ['x-ratelimit-limit-requests', '3000'], + ['x-ratelimit-limit-tokens', '250000'], + ['x-ratelimit-remaining-requests', '2999'], + ['x-ratelimit-remaining-tokens', '249984'], + ['x-ratelimit-reset-requests', '20ms'], + ['x-ratelimit-reset-tokens', '3ms'] + ])) + .json({ + id: 'mock-completion-id', + object: 'text_completion', + created: Date.now(), + model, + choices, + usage: { + prompt_tokens: 3, + completion_tokens: 16, + total_tokens: 19 + } + }) +}) + +app.post('/v1/chat/completions', (req, res) => { + const { messages, model, tools, functions, stream = false, n = 1, stream_options: streamOptions = {} } = req.body + + if (typeof messages !== 'object') { + res.status(400).json({ + error: { + type: 'invalid_request_error' + } + }) + + return + } + + if (stream) { + // streamed responses are pre-recorded in a separate directory + res.setHeaders(new Map([ + ['Content-Type', 'text/plain'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + + let file + if (n > 1) { + file = 'chat.completions.multiple.txt' + } else if (n === 0) { + file = 'chat.completions.empty.txt' + } else if (streamOptions.include_usage) { + file = 'chat.completions.simple.usage.txt' + } else if (tools) { + file = tools.length ? 'chat.completions.tools.txt' : 'chat.completions.tools.and.content.txt' + } else { + file = 'chat.completions.simple.txt' + } + + const filePath = path.join(__dirname, 'streamed-responses', file) + const readStream = fs.createReadStream(filePath) + + readStream.pipe(res) + + readStream.on('end', () => res.end()) + readStream.on('error', (err) => { + res.status(500).end('Error streaming file') + }) + + return + } + + const response = { + id: 'mock-chat-completion-id', + object: 'chat.completion', + created: Date.now(), + model, + usage: { + prompt_tokens: 37, + completion_tokens: 10, + total_tokens: 47 + }, + choices: [{ + message: { + role: 'assistant', + content: 'Hello, world!' + }, + finish_reason: 'stop', + index: 0 + }] + } + + const toolsOrFunctions = tools || functions + + if (toolsOrFunctions) { + const toolCalls = [] + + for (let idx = 0; idx < toolsOrFunctions.length; idx++) { + const toolOrFunction = toolsOrFunctions[idx] + toolCalls.push({ + id: `tool-${idx + 1}`, + type: 'function', + function: { + name: toolOrFunction.function.name, + arguments: JSON.stringify( + Object + .keys(toolOrFunction.function.parameters.properties) + .reduce((acc, argName) => { + acc[argName] = 'some-value' + return acc + }, {})) + } + }) + } + + if (tools?.length) response.choices[0].message.tool_calls = toolCalls + if (functions?.length) response.choices[0].message.function_call = toolCalls[0].function + response.choices[0].finish_reason = 'tool_calls' + response.choices[0].message.content = null + } + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json(response) +}) + +app.post('/v1/embeddings', (req, res) => { + const { model, input = '' } = req.body + + const inputTokens = input.split(' ').length + const usage = { + prompt_tokens: inputTokens, + total_tokens: inputTokens + } + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: [{ + object: 'embedding', + index: 0, + embedding: Array(1536).fill(0) + }], + model, + usage + }) +}) + +app.get('/v1/models', (req, res) => { + res.json({ + object: 'list', + data: [ + { + id: 'model-1', + object: 'model', + created: 1677532384, + owned_by: 'openai-internal', + permission: [{ + id: 'modelperm-KlsZlfft3Gma8pI6A8rTnyjs', + object: 'model_permission', + created: 1683912666, + allow_create_engine: false, + allow_sampling: true, + allow_logprobs: true, + allow_search_indices: false, + allow_view: true, + allow_fine_tuning: false, + organization: '*', + group: null, + is_blocking: false + }], + root: 'model-1', + parent: null + }, + { + id: 'model-2', + object: 'model', + created: 1649358449, + owned_by: 'openai', + permission: [{ + id: 'modelperm-49FUp5v084tBB49tC4z8LPH5', + object: 'model_permission', + created: 1669085501, + allow_create_engine: false, + allow_sampling: true, + allow_logprobs: true, + allow_search_indices: false, + allow_view: true, + allow_fine_tuning: false, + organization: '*', + group: null, + is_blocking: false + }], + root: 'model-2', + parent: null + } + ] + }) +}) + +app.get('/v1/models/:id', (req, res) => { + const { id } = req.params + + res.json({ + id, + object: 'model', + created: 1678604602, + owned_by: 'openai', + permission: [{ + id: 'modelperm-ffiDrbtOGIZuczdJcFuOo2Mi', + object: 'model_permission', + created: 1684185078, + allow_create_engine: false, + allow_sampling: false, + allow_logprobs: false, + allow_search_indices: false, + allow_view: false, + allow_fine_tuning: false, + organization: '*', + group: null, + is_blocking: false + }], + root: 'gpt-4', + parent: 'gpt-4' + }) +}) + +app.delete('/v1/models/:id', (req, res) => { + const { id } = req.params + res.json({ + object: 'model', + id, + deleted: true + }) +}) + +app.post('/v1/edits', (req, res) => { + const { input, model } = req.body + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-model', model], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'], + ['x-ratelimit-limit-requests', '20'], + ['x-ratelimit-remaining-requests', '19'] + ])) + .json({ + object: 'edit', + created: 1684267309, + choices: [{ + text: `Edited: ${input}\n`, + index: 0 + }], + usage: { + prompt_tokens: 25, + completion_tokens: 28, + total_tokens: 53 + } + }) +}) + +app.get('/v1/files', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: [{ + object: 'file', + id: 'file-foofoofoo', + purpose: 'fine-tune-results', + filename: 'compiled_results.csv', + bytes: 3460, + created_at: 1684000162, + status: 'processed', + status_details: null + }, { + object: 'file', + id: 'file-barbarbar', + purpose: 'fine-tune-results', + filename: 'compiled_results.csv', + bytes: 13595, + created_at: 1684000508, + status: 'processed', + status_details: null + }] + }) +}) + +app.post('/v1/files', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'file', + id: 'file-268aYWYhvxWwHb4nIzP9FHM6', + purpose: 'fine-tune', + filename: 'dave-hal.jsonl', + bytes: 356, + created_at: 1684362764, + status: 'uploaded', + status_details: 'foo' // dummy value for testing + }) +}) + +app.delete('/v1/files/:id', (req, res) => { + const { id } = req.params + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'file', + id, + deleted: true + }) +}) + +app.get('/v1/files/:id', (req, res) => { + const { id } = req.params + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'file', + id, + purpose: 'fine-tune', + filename: 'dave-hal.jsonl', + bytes: 356, + created_at: 1684362764, + status: 'uploaded', + status_details: 'foo' // dummy value for testing + }) +}) + +app.get('/v1/files/:id/content', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'text/octet-stream'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'], + ['content-disposition', 'attachment; filename="dave-hal.jsonl"'] + ])) + .send('{"prompt": "foo?", "completion": "bar."}\n{"prompt": "foofoo?", "completion": "barbar."}\n') +}) + +app.post('/v1/fine_tuning/jobs', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'fine-tunes', + id: 'ft-10RCfqSvgyEcauomw7VpiYco', + created_at: 1684442489, + updated_at: 1684442489, + organization_id: 'datadog', + model: 'curie', + fine_tuned_model: 'huh', + status: 'pending', + result_files: [], + hyperparameters: { + n_epochs: 5, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + validation_file: null, + training_file: 'file-t3k1gVSQDHrfZnPckzftlZ4A' + }) +}) + +app.post('/v1/fine-tunes', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'fine_tuning.job', + id: 'ft-10RCfqSvgyEcauomw7VpiYco', + created_at: 1684442489, + updated_at: 1684442489, + organization_id: 'datadog', + model: 'curie', + fine_tuned_model: 'huh', + status: 'pending', + result_files: [], + hyperparams: { + n_epochs: 5, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + validation_file: [], + training_files: [{ + object: 'file', + id: 'file-t3k1gVSQDHrfZnPckzftlZ4A', + purpose: 'fine-tune', + filename: 'dave-hal.jsonl', + bytes: 356, + created_at: 1684365950, + status: 'processed', + status_details: null + }], + events: [{ + object: 'fine-tune-event', + level: 'info', + message: 'Created fine-tune: ft-10RCfqSvgyEcauomw7VpiYco', + created_at: 1684442489 + }] + }) +}) + +app.get('/v1/fine_tuning/jobs/:id', (req, res) => { + const { id } = req.params + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + id, + object: 'fine-tuning.job', + organization_id: 'datadog', + model: 'curie', + created_at: 1684442489, + updated_at: 1684442697, + status: 'succeeded', + fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56', + hyperparameters: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + result_files: [ + 'file-bJyf8TM0jeSZueBo4jpodZVQ' + ], + validation_files: null, + training_file: 'file-t3k1gVSQDHrfZnPckzftlZ4A' + }) +}) + +app.get('/v1/fine-tunes/:id', (req, res) => { + const { id } = req.params + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + id, + object: 'fine-tune', + organization_id: 'datadog', + model: 'curie', + created_at: 1684442489, + updated_at: 1684442697, + status: 'succeeded', + fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56', + hyperparams: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + result_files: [{}], + validation_files: [], + training_files: [{}], + events: Array(11).fill({}) + }) +}) + +app.get('/v1/fine_tuning/jobs', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: [{ + object: 'fine-tuning.jobs', + id: 'ft-10RCfqSvgyEcauomw7VpiYco', + hyperparameters: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + created_at: 1684442489, + updated_at: 1684442697, + organization_id: 'datadog', + model: 'curie', + fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56', + result_files: [], + status: 'succeeded', + validation_file: null, + training_file: 'file-t3k1gVSQDHrfZnPckzftlZ4A' + }] + }) +}) + +app.get('/v1/fine-tunes', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: [{ + object: 'fine-tune', + id: 'ft-10RCfqSvgyEcauomw7VpiYco', + hyperparams: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + organization_id: 'datadog', + model: 'curie', + training_files: [{}], + validation_files: [], + result_files: [{}], + created_at: 1684442489, + updated_at: 1684442697, + status: 'succeeded', + fine_tuned_model: 'curie:ft-foo:deleteme-2023-05-18-20-44-56' + }] + }) +}) + +app.get('/v1/fine_tuning/jobs/:id/events', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: Array(11).fill({ object: 'fine_tuning.job.event' }) + }) +}) + +app.get('/v1/fine-tunes/:id/events', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + object: 'list', + data: Array(11).fill({ object: 'fine-tune-event' }) + }) +}) + +app.post('/v1/fine_tuning/jobs/:id/cancel', (req, res) => { + const { id } = req.params + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + id, + object: 'fine-tuning.job', + organization_id: 'datadog', + model: 'curie', + created_at: 1684452102, + updated_at: 1684452103, + status: 'cancelled', + fine_tuned_model: 'model', + hyperparameters: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + result_files: [], + validation_files: null, + training_file: 'file-t3k1gVSQDHrfZnPckzftlZ4A' + }) +}) + +app.post('/v1/fine-tunes/:id/cancel', (req, res) => { + const { id } = req.params + + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + id, + object: 'fine-tune', + organization_id: 'datadog', + model: 'curie', + created_at: 1684452102, + updated_at: 1684452103, + status: 'cancelled', + fine_tuned_model: 'model', + hyperparams: { + n_epochs: 4, + batch_size: 3, + prompt_loss_weight: 0.01, + learning_rate_multiplier: 0.1 + }, + training_files: [{ id: 'file-t3k1gVSQDHrfZnPckzftlZ4A' }], + result_files: [], + validation_files: [], + events: Array(2).fill({ object: 'fine-tune-event' }) + }) +}) + +app.post('/v1/moderations', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + id: 'modr-7HHZZZylF31ahuhmH279JrKbGTHCW', + model: 'text-moderation-001', + results: [{ + flagged: true, + categories: { + sexual: false, + hate: false, + violence: true, + 'self-harm': false, + 'sexual/minors': false, + 'hate/threatening': false, + 'violence/graphic': false + }, + category_scores: { + sexual: 0.0018438849, + hate: 0.069274776, + violence: 0.74101615, + 'self-harm': 0.008981651, + 'sexual/minors': 0.00070737937, + 'hate/threatening': 0.045174375, + 'violence/graphic': 0.019271193 + } + }] + }) +}) + +app.post('/v1/images/generations', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + created: 1684270747, + data: [{ + url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-foo.png', + b64_json: 'foobar===' + }] + }) +}) + +app.post('/v1/images/edits', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + created: 1684850118, + data: [{ + url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-bar.png', + b64_json: 'fOoF0f=' + }] + }) +}) + +app.post('/v1/images/variations', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + created: 1684853320, + data: [{ + url: 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-COOLORG/user-FOO/img-soup.png', + b64_json: 'foo=' + }] + }) +}) + +app.post('/v1/audio/transcriptions', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + task: 'transcribe', + language: 'english', + duration: 2.19, + segments: [{ + id: 0, + seek: 0, + start: 0, + end: 2, + text: ' Hello, friend.', + tokens: [50364, 2425, 11, 1277, 13, 50464], + temperature: 0.5, + avg_logprob: -0.7777707236153739, + compression_ratio: 0.6363636363636364, + no_speech_prob: 0.043891049921512604, + transient: false + }], + text: 'Hello, friend.' + }) +}) + +app.post('/v1/audio/translations', (req, res) => { + res + .setHeaders(new Map([ + ['Content-Type', 'application/json'], + ['openai-organization', 'datadog'], + ['openai-version', '2023-10-01'] + ])) + .json({ + task: 'translate', + language: 'english', + duration: 1.74, + segments: [{ + id: 0, + seek: 0, + start: 0, + end: 3, + text: ' Guten Tag!', + tokens: [50364, 42833, 11204, 0, 50514], + temperature: 0.5, + avg_logprob: -0.5626437266667684, + compression_ratio: 0.5555555555555556, + no_speech_prob: 0.01843200996518135, + transient: false + }], + text: 'Guten Tag!' + }) +}) + +/** + * Starts the mock OpenAI server + * @returns {Promise} The port the server is listening on + */ +function startMockServer () { + if (server) { + return Promise.resolve(server.address().port) + } + + return new Promise((resolve, reject) => { + server = app.listen(0, 'localhost', (err) => { + if (err) { + return reject(err) + } + server.on('connection', connection => { + connections.add(connection) + connection.on('close', () => { + connections.delete(connection) + }) + }) + + debug(`Mock OpenAI server started on http://localhost:${server.address().port}`) + resolve(server.address().port) + }) + }) +} + +/** + * Stops the mock OpenAI server + * @returns {Promise} + */ +function stopMockServer () { + for (const connection of connections) { + connection.destroy() + } + connections.clear() + + if (!server) return Promise.resolve() + + return new Promise((resolve, reject) => { + server.close((err) => { + debug('Mock OpenAI server closed') + if (err) { + return reject(err) + } + server = null + resolve() + }) + }) +} + +module.exports = { + startMockServer, + stopMockServer +} diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tool.and.content.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tools.and.content.txt similarity index 100% rename from packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tool.and.content.txt rename to packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tools.and.content.txt diff --git a/packages/dd-trace/test/llmobs/plugins/openai/openaiv3.spec.js b/packages/dd-trace/test/llmobs/plugins/openai/openaiv3.spec.js index 144b2f76c33..daf211dc6d2 100644 --- a/packages/dd-trace/test/llmobs/plugins/openai/openaiv3.spec.js +++ b/packages/dd-trace/test/llmobs/plugins/openai/openaiv3.spec.js @@ -5,12 +5,13 @@ const Sampler = require('../../../../src/sampler') const { DogStatsDClient } = require('../../../../src/dogstatsd') const { NoopExternalLogger } = require('../../../../src/external-logger/src') -const nock = require('nock') const { expectedLLMObsLLMSpanEvent, deepEqualWithMockValues } = require('../../util') const chai = require('chai') const semver = require('semver') const LLMObsSpanWriter = require('../../../../src/llmobs/writers/spans') +const { startMockServer, stopMockServer } = require('../../../../../datadog-plugin-openai/test/mock-server') + const { expect } = chai chai.Assertion.addMethod('deepEqualWithMockValues', deepEqualWithMockValues) @@ -19,9 +20,10 @@ const satisfiesChatCompletion = version => semver.intersects('>=3.2.0', version) describe('integrations', () => { let openai + let mockServerPort describe('openai', () => { - before(() => { + before(async () => { sinon.stub(LLMObsSpanWriter.prototype, 'append') // reduce errors related to too many listeners @@ -33,6 +35,8 @@ describe('integrations', () => { LLMObsSpanWriter.prototype.append.reset() + mockServerPort = await startMockServer() + return agent.load('openai', {}, { llmobs: { mlApp: 'test', @@ -42,11 +46,11 @@ describe('integrations', () => { }) afterEach(() => { - nock.cleanAll() LLMObsSpanWriter.prototype.append.reset() }) - after(() => { + after(async () => { + await stopMockServer() require('../../../../../dd-trace').llmobs.disable() // unsubscribe from all events sinon.restore() return agent.close({ ritmReset: false, wipe: true }) @@ -62,26 +66,14 @@ describe('integrations', () => { const { Configuration, OpenAIApi } = module const configuration = new Configuration({ - apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS' + apiKey: 'sk-DATADOG-ACCEPTANCE-TESTS', + basePath: `http://localhost:${mockServerPort}/v1` }) openai = new OpenAIApi(configuration) }) it('submits a completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, { - model: 'text-davinci-002', - choices: [{ - text: 'I am doing well, how about you?', - index: 0, - logprobs: null, - finish_reason: 'length' - }], - usage: { prompt_tokens: 3, completion_tokens: 16, total_tokens: 19 } - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -94,7 +86,7 @@ describe('integrations', () => { { content: 'How are you?' } ], outputMessages: [ - { content: 'I am doing well, how about you?' } + { content: '\n\nHello, world!' } ], tokenMetrics: { input_tokens: 3, output_tokens: 16, total_tokens: 19 }, modelName: 'text-davinci-002', @@ -116,28 +108,6 @@ describe('integrations', () => { if (satisfiesChatCompletion(version)) { it('submits a chat completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: 'I am doing well, how about you?' - }, - finish_reason: 'length', - index: 0 - }] - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -151,7 +121,7 @@ describe('integrations', () => { { role: 'user', content: 'How are you?' } ], outputMessages: [ - { role: 'assistant', content: 'I am doing well, how about you?' } + { role: 'assistant', content: 'Hello, world!' } ], tokenMetrics: { input_tokens: 37, output_tokens: 10, total_tokens: 47 }, modelName: 'gpt-3.5-turbo-0301', @@ -176,22 +146,6 @@ describe('integrations', () => { } it('submits an embedding span', async () => { - nock('https://api.openai.com:443') - .post('/v1/embeddings') - .reply(200, { - object: 'list', - data: [{ - object: 'embedding', - index: 0, - embedding: [-0.0034387498, -0.026400521] - }], - model: 'text-embedding-ada-002-v2', - usage: { - prompt_tokens: 2, - total_tokens: 2 - } - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -203,7 +157,7 @@ describe('integrations', () => { inputDocuments: [ { text: 'Hello, world!' } ], - outputValue: '[1 embedding(s) returned with size 2]', + outputValue: '[1 embedding(s) returned with size 1536]', tokenMetrics: { input_tokens: 2, total_tokens: 2 }, modelName: 'text-embedding-ada-002-v2', modelProvider: 'openai', @@ -224,32 +178,6 @@ describe('integrations', () => { if (satisfiesChatCompletion(version)) { it('submits a chat completion span with functions', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: 'THOUGHT: I will use the "extract_fictional_info" tool', - function_call: { - name: 'extract_fictional_info', - arguments: '{"name":"SpongeBob","origin":"Bikini Bottom"}' - } - }, - finish_reason: 'function_call', - index: 0 - }] - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -263,13 +191,13 @@ describe('integrations', () => { inputMessages: [{ role: 'user', content: 'What is SpongeBob SquarePants\'s origin?' }], outputMessages: [{ role: 'assistant', - content: 'THOUGHT: I will use the "extract_fictional_info" tool', + content: '', tool_calls: [ { name: 'extract_fictional_info', arguments: { - name: 'SpongeBob', - origin: 'Bikini Bottom' + name: 'some-value', + origin: 'some-value' } } ] @@ -285,7 +213,20 @@ describe('integrations', () => { await openai.createChatCompletion({ model: 'gpt-3.5-turbo-0301', messages: [{ role: 'user', content: 'What is SpongeBob SquarePants\'s origin?' }], - functions: [{ type: 'function', functiin: { /* this doesn't matter */} }], + functions: [{ + type: 'function', + function: { + name: 'extract_fictional_info', + description: 'Get the fictional information from the body of the input text', + parameters: { + type: 'object', + properties: { + name: { type: 'string', description: 'Name of the character' }, + origin: { type: 'string', description: 'Where they live' } + } + } + } + }], function_call: 'auto' }) @@ -294,10 +235,6 @@ describe('integrations', () => { } it('submits a completion span with an error', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(400, {}) - let error const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] @@ -325,7 +262,7 @@ describe('integrations', () => { try { await openai.createCompletion({ model: 'gpt-3.5-turbo', - prompt: 'Hello', + prompt: 5, // trigger the error max_tokens: 50 }) } catch (e) { @@ -337,10 +274,6 @@ describe('integrations', () => { if (satisfiesChatCompletion(version)) { it('submits a chat completion span with an error', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(400, {}) - let error const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] @@ -368,7 +301,7 @@ describe('integrations', () => { try { await openai.createChatCompletion({ model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'Hello' }], + messages: 5, // trigger the error max_tokens: 50 }) } catch (e) { diff --git a/packages/dd-trace/test/llmobs/plugins/openai/openaiv4.spec.js b/packages/dd-trace/test/llmobs/plugins/openai/openaiv4.spec.js index ccee47e8a94..dfb63de3d59 100644 --- a/packages/dd-trace/test/llmobs/plugins/openai/openaiv4.spec.js +++ b/packages/dd-trace/test/llmobs/plugins/openai/openaiv4.spec.js @@ -1,23 +1,21 @@ 'use strict' -const fs = require('fs') -const Path = require('path') const agent = require('../../../plugins/agent') const Sampler = require('../../../../src/sampler') const { DogStatsDClient } = require('../../../../src/dogstatsd') const { NoopExternalLogger } = require('../../../../src/external-logger/src') -const nock = require('nock') const { expectedLLMObsLLMSpanEvent, deepEqualWithMockValues } = require('../../util') const chai = require('chai') const semver = require('semver') const LLMObsSpanWriter = require('../../../../src/llmobs/writers/spans') +const { startMockServer, stopMockServer } = require('../../../../../datadog-plugin-openai/test/mock-server') const { expect } = chai -chai.Assertion.addMethod('deepEqualWithMockValues', deepEqualWithMockValues) +const { NODE_MAJOR } = require('../../../../../../version') -const baseOpenAITestsPath = '../../../../../datadog-plugin-openai/test/' +chai.Assertion.addMethod('deepEqualWithMockValues', deepEqualWithMockValues) const satisfiesTools = version => semver.intersects('>4.16.0', version) const satisfiesStream = version => semver.intersects('>4.1.0', version) @@ -26,9 +24,16 @@ describe('integrations', () => { let openai let azureOpenai let deepseekOpenai + let customOpenai + + let realVersion + let mockServerPort + let globalFile describe('openai', () => { - before(() => { + before(async () => { + mockServerPort = await startMockServer() + sinon.stub(LLMObsSpanWriter.prototype, 'append') // reduce errors related to too many listeners @@ -49,14 +54,18 @@ describe('integrations', () => { }) afterEach(() => { - nock.cleanAll() LLMObsSpanWriter.prototype.append.reset() }) after(() => { + stopMockServer() + + if (semver.satisfies(realVersion, '>=5.0.0') && NODE_MAJOR < 20) { + global.File = globalFile + } + sinon.restore() require('../../../../../dd-trace').llmobs.disable() // unsubscribe from all events - // delete require.cache[require.resolve('../../../../dd-trace')] return agent.close({ ritmReset: false, wipe: true }) }) @@ -67,9 +76,26 @@ describe('integrations', () => { beforeEach(() => { const requiredModule = require(moduleRequirePath) const module = requiredModule.get() + realVersion = requiredModule.version() + + if (semver.satisfies(realVersion, '>=5.0.0') && NODE_MAJOR < 20) { + /** + * resolves the following error for OpenAI v5 + * + * Error: `File` is not defined as a global, which is required for file uploads. + * Update to Node 20 LTS or newer, or set `globalThis.File` to `import('node:buffer').File`. + */ + globalFile = global.File + global.File = require('node:buffer').File + } const OpenAI = module + customOpenai = new OpenAI({ + apiKey: 'test', + baseURL: `http://localhost:${mockServerPort}/v1` + }) + openai = new OpenAI({ apiKey: 'test' }) @@ -96,19 +122,6 @@ describe('integrations', () => { }) it('submits a completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, { - model: 'text-davinci-002', - choices: [{ - text: 'I am doing well, how about you?', - index: 0, - logprobs: null, - finish_reason: 'length' - }], - usage: { prompt_tokens: 3, completion_tokens: 16, total_tokens: 19 } - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -121,7 +134,7 @@ describe('integrations', () => { { content: 'How are you?' } ], outputMessages: [ - { content: 'I am doing well, how about you?' } + { content: '\n\nHello, world!' } ], tokenMetrics: { input_tokens: 3, output_tokens: 16, total_tokens: 19 }, modelName: 'text-davinci-002', @@ -133,7 +146,7 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - await openai.completions.create({ + await customOpenai.completions.create({ model: 'text-davinci-002', prompt: 'How are you?' }) @@ -142,28 +155,6 @@ describe('integrations', () => { }) it('submits a chat completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: 'I am doing well, how about you?' - }, - finish_reason: 'length', - index: 0 - }] - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -177,7 +168,7 @@ describe('integrations', () => { { role: 'user', content: 'How are you?' } ], outputMessages: [ - { role: 'assistant', content: 'I am doing well, how about you?' } + { role: 'assistant', content: 'Hello, world!' } ], tokenMetrics: { input_tokens: 37, output_tokens: 10, total_tokens: 47 }, modelName: 'gpt-3.5-turbo-0301', @@ -189,7 +180,7 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - await openai.chat.completions.create({ + await customOpenai.chat.completions.create({ model: 'gpt-3.5-turbo-0301', messages: [ { role: 'system', content: 'You are a helpful assistant' }, @@ -201,22 +192,6 @@ describe('integrations', () => { }) it('submits an embedding span', async () => { - nock('https://api.openai.com:443') - .post('/v1/embeddings') - .reply(200, { - object: 'list', - data: [{ - object: 'embedding', - index: 0, - embedding: [-0.0034387498, -0.026400521] - }], - model: 'text-embedding-ada-002-v2', - usage: { - prompt_tokens: 2, - total_tokens: 2 - } - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -228,7 +203,7 @@ describe('integrations', () => { inputDocuments: [ { text: 'Hello, world!' } ], - outputValue: '[1 embedding(s) returned with size 2]', + outputValue: '[1 embedding(s) returned with size 1536]', tokenMetrics: { input_tokens: 2, total_tokens: 2 }, modelName: 'text-embedding-ada-002-v2', modelProvider: 'openai', @@ -239,7 +214,7 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - await openai.embeddings.create({ + await customOpenai.embeddings.create({ model: 'text-embedding-ada-002-v2', input: 'Hello, world!', encoding_format: 'float' @@ -250,38 +225,6 @@ describe('integrations', () => { if (satisfiesTools(version)) { it('submits a chat completion span with tools', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, { - id: 'chatcmpl-7GaWqyMTD9BLmkmy8SxyjUGX3KSRN', - object: 'chat.completion', - created: 1684188020, - model: 'gpt-3.5-turbo-0301', - usage: { - prompt_tokens: 37, - completion_tokens: 10, - total_tokens: 47 - }, - choices: [{ - message: { - role: 'assistant', - content: 'THOUGHT: I will use the "extract_fictional_info" tool', - tool_calls: [ - { - id: 'tool-1', - type: 'function', - function: { - name: 'extract_fictional_info', - arguments: '{"name":"SpongeBob","origin":"Bikini Bottom"}' - } - } - ] - }, - finish_reason: 'tool_calls', - index: 0 - }] - }, []) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -295,13 +238,13 @@ describe('integrations', () => { inputMessages: [{ role: 'user', content: 'What is SpongeBob SquarePants\'s origin?' }], outputMessages: [{ role: 'assistant', - content: 'THOUGHT: I will use the "extract_fictional_info" tool', + content: '', tool_calls: [ { name: 'extract_fictional_info', arguments: { - name: 'SpongeBob', - origin: 'Bikini Bottom' + name: 'some-value', + origin: 'some-value' }, tool_id: 'tool-1', type: 'function' @@ -316,10 +259,23 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - await openai.chat.completions.create({ + await customOpenai.chat.completions.create({ model: 'gpt-3.5-turbo-0301', messages: [{ role: 'user', content: 'What is SpongeBob SquarePants\'s origin?' }], - tools: [{ type: 'function', functiin: { /* this doesn't matter */} }], + tools: [{ + type: 'function', + function: { + name: 'extract_fictional_info', + description: 'Get the fictional information from the body of the input text', + parameters: { + type: 'object', + properties: { + name: { type: 'string', description: 'Name of the character' }, + origin: { type: 'string', description: 'Where they live' } + } + } + } + }], tool_choice: 'auto' }) @@ -329,17 +285,6 @@ describe('integrations', () => { if (satisfiesStream(version)) { it('submits a streamed completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(200, function () { - return fs.createReadStream(Path.join( - __dirname, baseOpenAITestsPath, 'streamed-responses/completions.simple.txt' - )) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -364,7 +309,7 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - const stream = await openai.completions.create({ + const stream = await customOpenai.completions.create({ model: 'text-davinci-002', prompt: 'Can you say this is a test?', temperature: 0.5, @@ -380,17 +325,6 @@ describe('integrations', () => { }) it('submits a streamed chat completion span', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join( - __dirname, baseOpenAITestsPath, 'streamed-responses/chat.completions.simple.txt' - )) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -415,7 +349,7 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - const stream = await openai.chat.completions.create({ + const stream = await customOpenai.chat.completions.create({ model: 'gpt-3.5-turbo-0301', messages: [{ role: 'user', content: 'Hello' }], stream: true @@ -431,17 +365,6 @@ describe('integrations', () => { if (satisfiesTools(version)) { it('submits a chat completion span with tools stream', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(200, function () { - return fs.createReadStream(Path.join( - __dirname, baseOpenAITestsPath, 'streamed-responses/chat.completions.tool.and.content.txt' - )) - }, { - 'Content-Type': 'text/plain', - 'openai-organization': 'kill-9' - }) - const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -473,10 +396,10 @@ describe('integrations', () => { expect(spanEvent).to.deepEqualWithMockValues(expected) }) - const stream = await openai.chat.completions.create({ + const stream = await customOpenai.chat.completions.create({ model: 'gpt-3.5-turbo-0301', messages: [{ role: 'user', content: 'What function would you call to finish this?' }], - tools: [{ type: 'function', function: { /* this doesn't matter */ } }], + tools: [], // empty to trigger the correct scenario tool_choice: 'auto', stream: true }) @@ -492,10 +415,6 @@ describe('integrations', () => { } it('submits a completion span with an error', async () => { - nock('https://api.openai.com:443') - .post('/v1/completions') - .reply(400, {}) - let error const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] @@ -512,7 +431,7 @@ describe('integrations', () => { metadata: { max_tokens: 50 }, tags: { ml_app: 'test', language: 'javascript', integration: 'openai' }, error, - errorType: error.type || error.name, + errorType: 'Error', errorMessage: error.message, errorStack: error.stack }) @@ -523,7 +442,7 @@ describe('integrations', () => { try { await openai.completions.create({ model: 'gpt-3.5-turbo', - prompt: 'Hello', + prompt: 5, // trigger the error max_tokens: 50 }) } catch (e) { @@ -534,10 +453,6 @@ describe('integrations', () => { }) it('submits a chat completion span with an error', async () => { - nock('https://api.openai.com:443') - .post('/v1/chat/completions') - .reply(400, {}) - let error const checkSpan = agent.assertSomeTraces(traces => { const span = traces[0][0] @@ -554,7 +469,7 @@ describe('integrations', () => { metadata: { max_tokens: 50 }, tags: { ml_app: 'test', language: 'javascript', integration: 'openai' }, error, - errorType: error.type || error.name, + errorType: 'Error', errorMessage: error.message, errorStack: error.stack }) @@ -565,7 +480,7 @@ describe('integrations', () => { try { await openai.chat.completions.create({ model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'Hello' }], + messages: 5, // trigger the error max_tokens: 50 }) } catch (e) { @@ -576,19 +491,6 @@ describe('integrations', () => { }) it('submits an AzureOpenAI completion', async () => { - const isFromAzureOpenAIClass = azureOpenai.constructor.name === 'AzureOpenAI' - const postEndpoint = isFromAzureOpenAIClass - ? '//openai/deployments/some-model/chat/completions' - : '/chat/completions' - const query = isFromAzureOpenAIClass - ? { 'api-version': '2024-05-01-preview' } - : {} - - nock('https://dd.openai.azure.com:443') - .post(postEndpoint) - .query(query) - .reply(200, {}) - const checkSpan = agent.assertSomeTraces(traces => { const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -596,19 +498,19 @@ describe('integrations', () => { expect(spanEvent.meta).to.have.property('model_provider', 'azure_openai') }) - await azureOpenai.chat.completions.create({ - model: 'some-model', - messages: [] - }) + try { + await azureOpenai.chat.completions.create({ + model: 'some-model', + messages: [] + }) + } catch (e) { + // we expect an error here + } await checkSpan }) it('submits an DeepSeek completion', async () => { - nock('https://api.deepseek.com:443') - .post('/chat/completions') - .reply(200, {}) - const checkSpan = agent.assertSomeTraces(traces => { const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0] @@ -616,10 +518,14 @@ describe('integrations', () => { expect(spanEvent.meta).to.have.property('model_provider', 'deepseek') }) - await deepseekOpenai.chat.completions.create({ - model: 'some-model', - messages: [] - }) + try { + await deepseekOpenai.chat.completions.create({ + model: 'some-model', + messages: [] + }) + } catch (e) { + // we expect an error here + } await checkSpan }) diff --git a/yarn.lock b/yarn.lock index f4eb0ee3913..3b6864490bb 100644 --- a/yarn.lock +++ b/yarn.lock @@ -96,7 +96,7 @@ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== -"@babel/helper-string-parser@^7.25.9", "@babel/helper-string-parser@^7.27.1": +"@babel/helper-string-parser@^7.27.1": version "7.27.1" resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687" integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA== @@ -119,7 +119,7 @@ "@babel/template" "^7.27.2" "@babel/types" "^7.27.3" -"@babel/parser@^7.26.10", "@babel/parser@^7.26.9", "@babel/parser@^7.27.2": +"@babel/parser@^7.26.10", "@babel/parser@^7.27.2": version "7.27.3" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.27.3.tgz#1b7533f0d908ad2ac545c4d05cbe2fb6dc8cfaaf" integrity sha512-xyYxRj6+tLNDTWi0KCBcZ9V7yg3/lwL9DWh9Uwh/RIVlIfFidggcgxKX3GCXwCiswwcGRawBKbEg2LG/Y8eJhw== @@ -198,7 +198,7 @@ debug "^4.3.1" globals "^11.1.0" -"@babel/types@^7.25.9", "@babel/types@^7.26.10", "@babel/types@^7.26.9", "@babel/types@^7.27.1", "@babel/types@^7.27.3": +"@babel/types@^7.25.9", "@babel/types@^7.26.10", "@babel/types@^7.27.1", "@babel/types@^7.27.3": version "7.27.3" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.27.3.tgz#c0257bedf33aad6aad1f406d35c44758321eb3ec" integrity sha512-Y1GkI4ktrtvmawoSq+4FCVHNryea6uR+qUQy0AGxLSsjCX0nVmkYQMBLHDkXZuo5hGx7eYdnIaslsdBFm7zbUw== @@ -3557,7 +3557,7 @@ pathval@^1.1.1: resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== -picocolors@^1.0.0, picocolors@^1.1.1: +picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==