Skip to content

Commit 52a3b3c

Browse files
committed
fix: add o3-pro and update pricing
1 parent 1d1d76d commit 52a3b3c

File tree

7 files changed

+182
-8
lines changed

7 files changed

+182
-8
lines changed

src/codegen/modelScape.js

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// search for name: "Other models" in https://platform.openai.com/docs/models
2+
13
/**
24
* Converts a string with hyphens and dots into a valid JavaScript variable name.
35
* @param {string} name - The input string (e.g., 'gpt-4.1-mini').

src/model/o3-2025-04-16.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { GptEncoding } from '../GptEncoding.js'
55
export * from '../constants.js'
66
export * from '../specialTokens.js'
77
// prettier-ignore
8-
const api = GptEncoding.getEncodingApiForModel('o3-2025-04-16', () => bpeRanks, {name:"o3-2025-04-16",slug:"o3-2025-04-16",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","structured_outputs","file_search","function_calling","file_uploads","image_input","prompt_caching","evals","stored_completions"],supported_endpoints:["chat_completions","responses","batch"],reasoning_tokens:true,price_data:{main:{input:15,cached_output:7.5,output:60},batch:{input:7.5,output:30}}})
8+
const api = GptEncoding.getEncodingApiForModel('o3-2025-04-16', () => bpeRanks, {name:"o3-2025-04-16",slug:"o3-2025-04-16",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","structured_outputs","file_search","function_calling","file_uploads","image_input","prompt_caching","evals","stored_completions"],supported_endpoints:["chat_completions","responses","batch"],reasoning_tokens:true,price_data:{main:{input:2,cached_output:.5,output:8},batch:{input:1,output:4}}})
99
const {
1010
decode,
1111
decodeAsyncGenerator,

src/model/o3-pro-2025-06-10.ts

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
/* eslint-disable import/extensions */
2+
import bpeRanks from '../bpeRanks/o200k_base.js'
3+
import { GptEncoding } from '../GptEncoding.js'
4+
5+
export * from '../constants.js'
6+
export * from '../specialTokens.js'
7+
// prettier-ignore
8+
const api = GptEncoding.getEncodingApiForModel('o3-pro-2025-06-10', () => bpeRanks, {name:"o3-pro-2025-06-10",slug:"o3-pro-2025-06-10",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["structured_outputs","function_calling","image_input"],supported_endpoints:["responses","batch"],reasoning_tokens:true,price_data:{main:{input:20,output:80},batch:{input:10,output:40}}})
9+
const {
10+
decode,
11+
decodeAsyncGenerator,
12+
decodeGenerator,
13+
encode,
14+
encodeGenerator,
15+
isWithinTokenLimit,
16+
countTokens,
17+
encodeChat,
18+
encodeChatGenerator,
19+
vocabularySize,
20+
setMergeCacheSize,
21+
clearMergeCache,
22+
estimateCost,
23+
} = api
24+
export {
25+
clearMergeCache,
26+
countTokens,
27+
decode,
28+
decodeAsyncGenerator,
29+
decodeGenerator,
30+
encode,
31+
encodeChat,
32+
encodeChatGenerator,
33+
encodeGenerator,
34+
estimateCost,
35+
isWithinTokenLimit,
36+
setMergeCacheSize,
37+
vocabularySize,
38+
}
39+
// eslint-disable-next-line import/no-default-export
40+
export default api

src/model/o3-pro.ts

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
/* eslint-disable import/extensions */
2+
import bpeRanks from '../bpeRanks/o200k_base.js'
3+
import { GptEncoding } from '../GptEncoding.js'
4+
5+
export * from '../constants.js'
6+
export * from '../specialTokens.js'
7+
// prettier-ignore
8+
const api = GptEncoding.getEncodingApiForModel('o3-pro', () => bpeRanks, {name:"o3-pro-2025-06-10",slug:"o3-pro-2025-06-10",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["structured_outputs","function_calling","image_input"],supported_endpoints:["responses","batch"],reasoning_tokens:true,price_data:{main:{input:20,output:80},batch:{input:10,output:40}}})
9+
const {
10+
decode,
11+
decodeAsyncGenerator,
12+
decodeGenerator,
13+
encode,
14+
encodeGenerator,
15+
isWithinTokenLimit,
16+
countTokens,
17+
encodeChat,
18+
encodeChatGenerator,
19+
vocabularySize,
20+
setMergeCacheSize,
21+
clearMergeCache,
22+
estimateCost,
23+
} = api
24+
export {
25+
clearMergeCache,
26+
countTokens,
27+
decode,
28+
decodeAsyncGenerator,
29+
decodeGenerator,
30+
encode,
31+
encodeChat,
32+
encodeChatGenerator,
33+
encodeGenerator,
34+
estimateCost,
35+
isWithinTokenLimit,
36+
setMergeCacheSize,
37+
vocabularySize,
38+
}
39+
// eslint-disable-next-line import/no-default-export
40+
export default api

src/model/o3.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { GptEncoding } from '../GptEncoding.js'
55
export * from '../constants.js'
66
export * from '../specialTokens.js'
77
// prettier-ignore
8-
const api = GptEncoding.getEncodingApiForModel('o3', () => bpeRanks, {name:"o3-2025-04-16",slug:"o3-2025-04-16",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","structured_outputs","file_search","function_calling","file_uploads","image_input","prompt_caching","evals","stored_completions"],supported_endpoints:["chat_completions","responses","batch"],reasoning_tokens:true,price_data:{main:{input:15,cached_output:7.5,output:60},batch:{input:7.5,output:30}}})
8+
const api = GptEncoding.getEncodingApiForModel('o3', () => bpeRanks, {name:"o3-2025-04-16",slug:"o3-2025-04-16",performance:5,latency:1,modalities:{input:["text","image"],output:["text"]},context_window:200000,max_output_tokens:100000,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","structured_outputs","file_search","function_calling","file_uploads","image_input","prompt_caching","evals","stored_completions"],supported_endpoints:["chat_completions","responses","batch"],reasoning_tokens:true,price_data:{main:{input:2,cached_output:.5,output:8},batch:{input:1,output:4}}})
99
const {
1010
decode,
1111
decodeAsyncGenerator,

src/models.gen.ts

Lines changed: 97 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3343,6 +3343,98 @@ export {o3_mini_2025_01_31_spec as 'o3-mini-2025-01-31'}
33433343
// alias:
33443344
export { o3_mini_2025_01_31_spec as 'o3-mini' };
33453345

3346+
const o3_pro_config = {
3347+
name: 'o3-pro',
3348+
slug: 'o3-pro',
3349+
current_snapshot: 'o3-pro-2025-06-10',
3350+
tagline: 'Version of o3 with more compute for better responses',
3351+
description: 'The o-series of models are trained with reinforcement learning to think \nbefore they answer and perform complex reasoning. The o3-pro model uses more \ncompute to think harder and provide consistently better answers.\n\no3-pro is available in the [Responses API only](/docs/api-reference/responses)\nto enable support for multi-turn model interactions before responding to API \nrequests, and other advanced API features in the future. Since o3-pro is designed \nto tackle tough problems, some requests may take several minutes to finish. \nTo avoid timeouts, try using [background mode](/docs/guides/background).\n',
3352+
type: 'reasoning',
3353+
snapshots: [
3354+
'o3-pro-2025-06-10',
3355+
],
3356+
compare_prices: [
3357+
'o3',
3358+
'o3-mini',
3359+
],
3360+
supported_tools: [
3361+
'function_calling',
3362+
'file_search',
3363+
'image_generation',
3364+
'mcp',
3365+
],
3366+
rate_limits: {
3367+
tier_1: {
3368+
rpm: 500,
3369+
tpm: 3e4,
3370+
batch_queue_limit: 9e4,
3371+
},
3372+
tier_2: {
3373+
rpm: 5e3,
3374+
tpm: 45e4,
3375+
batch_queue_limit: 135e4,
3376+
},
3377+
tier_3: {
3378+
rpm: 5e3,
3379+
tpm: 8e5,
3380+
batch_queue_limit: 5e7,
3381+
},
3382+
tier_4: {
3383+
rpm: 1e4,
3384+
tpm: 2e6,
3385+
batch_queue_limit: 2e8,
3386+
},
3387+
tier_5: {
3388+
rpm: 1e4,
3389+
tpm: 3e7,
3390+
batch_queue_limit: 5e9,
3391+
},
3392+
},
3393+
} as const satisfies ModelConfig
3394+
3395+
const o3_pro_2025_06_10_spec = {
3396+
name: 'o3-pro-2025-06-10',
3397+
slug: 'o3-pro-2025-06-10',
3398+
performance: 5,
3399+
latency: 1,
3400+
modalities: {
3401+
input: [
3402+
'text',
3403+
'image',
3404+
],
3405+
output: [
3406+
'text',
3407+
],
3408+
},
3409+
context_window: 2e5,
3410+
max_output_tokens: 1e5,
3411+
knowledge_cutoff: new Date(17172e8),
3412+
supported_features: [
3413+
'structured_outputs',
3414+
'function_calling',
3415+
'image_input',
3416+
],
3417+
supported_endpoints: [
3418+
'responses',
3419+
'batch',
3420+
],
3421+
reasoning_tokens: true,
3422+
price_data: {
3423+
main: {
3424+
input: 20,
3425+
output: 80,
3426+
},
3427+
batch: {
3428+
input: 10,
3429+
output: 40,
3430+
},
3431+
},
3432+
} as const satisfies ModelSpec
3433+
export {o3_pro_2025_06_10_spec as 'o3-pro-2025-06-10'}
3434+
3435+
// alias:
3436+
export { o3_pro_2025_06_10_spec as 'o3-pro' };
3437+
33463438
const o3_config = {
33473439
name: 'o3',
33483440
slug: 'o3',
@@ -3429,13 +3521,13 @@ const o3_2025_04_16_spec = {
34293521
reasoning_tokens: true,
34303522
price_data: {
34313523
main: {
3432-
input: 15,
3433-
cached_output: 7.5,
3434-
output: 60,
3524+
input: 2,
3525+
cached_output: .5,
3526+
output: 8,
34353527
},
34363528
batch: {
3437-
input: 7.5,
3438-
output: 30,
3529+
input: 1,
3530+
output: 4,
34393531
},
34403532
},
34413533
} as const satisfies ModelSpec

src/modelsChatEnabled.gen.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
export const chatEnabledModels = ["chatgpt-4o-latest","codex-mini-latest","computer-use-preview","computer-use-preview-2025-03-11","gpt-3.5","gpt-3.5-0301","gpt-3.5-turbo","gpt-3.5-turbo-0125","gpt-3.5-turbo-0613","gpt-3.5-turbo-1106","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-instruct","gpt-4","gpt-4-0125-preview","gpt-4-0314","gpt-4-0613","gpt-4-1106-preview","gpt-4-1106-vision-preview","gpt-4-32k","gpt-4-turbo","gpt-4-turbo-2024-04-09","gpt-4-turbo-preview","gpt-4.1","gpt-4.1-2025-04-14","gpt-4.1-mini","gpt-4.1-mini-2025-04-14","gpt-4.1-nano","gpt-4.1-nano-2025-04-14","gpt-4.5-preview","gpt-4.5-preview-2025-02-27","gpt-4o","gpt-4o-2024-05-13","gpt-4o-2024-08-06","gpt-4o-2024-11-20","gpt-4o-audio-preview","gpt-4o-audio-preview-2024-10-01","gpt-4o-audio-preview-2024-12-17","gpt-4o-audio-preview-2025-06-03","gpt-4o-mini","gpt-4o-mini-2024-07-18","gpt-4o-mini-audio-preview","gpt-4o-mini-audio-preview-2024-12-17","gpt-4o-mini-search-preview","gpt-4o-mini-search-preview-2025-03-11","gpt-4o-search-preview","gpt-4o-search-preview-2025-03-11","o1","o1-2024-12-17","o1-mini","o1-mini-2024-09-12","o1-preview","o1-preview-2024-09-12","o1-pro","o1-pro-2025-03-19","o3","o3-2025-04-16","o3-mini","o3-mini-2025-01-31","o4-mini","o4-mini-2025-04-16"] as const;
1+
export const chatEnabledModels = ["chatgpt-4o-latest","codex-mini-latest","computer-use-preview","computer-use-preview-2025-03-11","gpt-3.5","gpt-3.5-0301","gpt-3.5-turbo","gpt-3.5-turbo-0125","gpt-3.5-turbo-0613","gpt-3.5-turbo-1106","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-instruct","gpt-4","gpt-4-0125-preview","gpt-4-0314","gpt-4-0613","gpt-4-1106-preview","gpt-4-1106-vision-preview","gpt-4-32k","gpt-4-turbo","gpt-4-turbo-2024-04-09","gpt-4-turbo-preview","gpt-4.1","gpt-4.1-2025-04-14","gpt-4.1-mini","gpt-4.1-mini-2025-04-14","gpt-4.1-nano","gpt-4.1-nano-2025-04-14","gpt-4.5-preview","gpt-4.5-preview-2025-02-27","gpt-4o","gpt-4o-2024-05-13","gpt-4o-2024-08-06","gpt-4o-2024-11-20","gpt-4o-audio-preview","gpt-4o-audio-preview-2024-10-01","gpt-4o-audio-preview-2024-12-17","gpt-4o-audio-preview-2025-06-03","gpt-4o-mini","gpt-4o-mini-2024-07-18","gpt-4o-mini-audio-preview","gpt-4o-mini-audio-preview-2024-12-17","gpt-4o-mini-search-preview","gpt-4o-mini-search-preview-2025-03-11","gpt-4o-search-preview","gpt-4o-search-preview-2025-03-11","o1","o1-2024-12-17","o1-mini","o1-mini-2024-09-12","o1-preview","o1-preview-2024-09-12","o1-pro","o1-pro-2025-03-19","o3","o3-2025-04-16","o3-mini","o3-mini-2025-01-31","o3-pro","o3-pro-2025-06-10","o4-mini","o4-mini-2025-04-16"] as const;

0 commit comments

Comments
 (0)