|
16 | 16 | "max_image_size_mb": "Maximum total size in MB for all images combined (capped at 40MB max for custom models)",
|
17 | 17 | "supports_temperature": "Whether the model accepts temperature parameter in API calls (set to false for O3/O4 reasoning models)",
|
18 | 18 | "temperature_constraint": "Type of temperature constraint: 'fixed' (fixed value), 'range' (continuous range), 'discrete' (specific values), or omit for default range",
|
| 19 | + "use_openai_response_api": "Set to true when the model must use the /responses endpoint (reasoning models like GPT-5 Pro). Leave false/omit for standard chat completions.", |
| 20 | + "default_reasoning_effort": "Default reasoning effort level for models that support it (e.g., 'low', 'medium', 'high'). Omit if not applicable.", |
19 | 21 | "description": "Human-readable description of the model",
|
20 | 22 | "intelligence_score": "1-20 human rating used as the primary signal for auto-mode model ordering"
|
21 | 23 | }
|
|
292 | 294 | "description": "GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
293 | 295 | "intelligence_score": 16
|
294 | 296 | },
|
| 297 | + { |
| 298 | + "model_name": "openai/gpt-5-pro", |
| 299 | + "aliases": [ |
| 300 | + "gpt5pro" |
| 301 | + ], |
| 302 | + "context_window": 400000, |
| 303 | + "max_output_tokens": 272000, |
| 304 | + "supports_extended_thinking": true, |
| 305 | + "supports_json_mode": true, |
| 306 | + "supports_function_calling": true, |
| 307 | + "supports_images": true, |
| 308 | + "max_image_size_mb": 20.0, |
| 309 | + "supports_temperature": false, |
| 310 | + "temperature_constraint": "fixed", |
| 311 | + "use_openai_response_api": true, |
| 312 | + "default_reasoning_effort": "high", |
| 313 | + "description": "GPT-5 Pro - Advanced reasoning model with highest quality responses (text+image input, text output only)", |
| 314 | + "intelligence_score": 17 |
| 315 | + }, |
295 | 316 | {
|
296 | 317 | "model_name": "openai/gpt-5-codex",
|
297 | 318 | "aliases": [
|
|
0 commit comments