@@ -50,7 +50,7 @@ class TestResponse(BaseModel):
5050 model = "gpt-5-nano" ,
5151 messages = [{"role" : "user" , "content" : "test" }],
5252 response_format = TestResponse ,
53- max_completion_tokens = 1000 ,
53+ max_completion_tokens = 2000 ,
5454 )
5555
5656 assert response is not None
@@ -79,14 +79,14 @@ class TestResponse(BaseModel):
7979 model = "gpt-5-nano" ,
8080 messages = [{"role" : "user" , "content" : "word: first" }],
8181 response_format = TestResponse ,
82- max_completion_tokens = 1000 ,
82+ max_completion_tokens = 2000 ,
8383 )
8484
8585 response2 = sync_client_maybe_wrapped .beta .chat .completions .parse (
8686 model = "gpt-5-nano" ,
8787 messages = [{"role" : "user" , "content" : "word: second" }],
8888 response_format = TestResponse ,
89- max_completion_tokens = 1000 ,
89+ max_completion_tokens = 2000 ,
9090 )
9191
9292 assert response1 is not None
@@ -133,7 +133,7 @@ class TestResponse(BaseModel):
133133 model = "invalid-model-name-that-does-not-exist" ,
134134 messages = [{"role" : "user" , "content" : "word: test" }],
135135 response_format = TestResponse ,
136- max_completion_tokens = 1000 ,
136+ max_completion_tokens = 2000 ,
137137 )
138138
139139 # Verify tracing when wrapped - should have exception recorded
@@ -155,7 +155,7 @@ class TestResponse(BaseModel):
155155 model = "gpt-5-nano" ,
156156 messages = [{"role" : "user" , "content" : "word: test" }],
157157 response_format = TestResponse ,
158- max_completion_tokens = 1000 ,
158+ max_completion_tokens = 2000 ,
159159 )
160160
161161 assert response is not None
@@ -185,14 +185,14 @@ class TestResponse(BaseModel):
185185 model = "gpt-5-nano" ,
186186 messages = [{"role" : "user" , "content" : "word: first" }],
187187 response_format = TestResponse ,
188- max_completion_tokens = 1000 ,
188+ max_completion_tokens = 2000 ,
189189 )
190190
191191 response2 = await async_client_maybe_wrapped .beta .chat .completions .parse (
192192 model = "gpt-5-nano" ,
193193 messages = [{"role" : "user" , "content" : "word: second" }],
194194 response_format = TestResponse ,
195- max_completion_tokens = 1000 ,
195+ max_completion_tokens = 2000 ,
196196 )
197197
198198 assert response1 is not None
@@ -242,7 +242,7 @@ class TestResponse(BaseModel):
242242 model = "invalid-model-name-that-does-not-exist" ,
243243 messages = [{"role" : "user" , "content" : "word: test" }],
244244 response_format = TestResponse ,
245- max_completion_tokens = 1000 ,
245+ max_completion_tokens = 2000 ,
246246 )
247247
248248 # Verify tracing when wrapped - should have exception recorded
@@ -270,14 +270,14 @@ class TestResponse(BaseModel):
270270 model = "gpt-5-nano" ,
271271 messages = [{"role" : "user" , "content" : "word: one" }],
272272 response_format = TestResponse ,
273- max_completion_tokens = 1000 ,
273+ max_completion_tokens = 2000 ,
274274 )
275275
276276 response2 = client2 .beta .chat .completions .parse (
277277 model = "gpt-5-nano" ,
278278 messages = [{"role" : "user" , "content" : "word: two" }],
279279 response_format = TestResponse ,
280- max_completion_tokens = 1000 ,
280+ max_completion_tokens = 2000 ,
281281 )
282282
283283 assert response1 is not None
@@ -333,7 +333,7 @@ def broken_serialize(obj):
333333 model = "gpt-5-nano" ,
334334 messages = [{"role" : "user" , "content" : "word: test" }],
335335 response_format = TestResponse ,
336- max_completion_tokens = 1000 ,
336+ max_completion_tokens = 2000 ,
337337 )
338338
339339 assert response is not None
@@ -369,14 +369,14 @@ class TestResponse(BaseModel):
369369 model = "gpt-5-nano" ,
370370 messages = [{"role" : "user" , "content" : "word: test" }],
371371 response_format = TestResponse ,
372- max_completion_tokens = 1000 ,
372+ max_completion_tokens = 2000 ,
373373 )
374374
375375 wrapped_response = wrapped .beta .chat .completions .parse (
376376 model = "gpt-5-nano" ,
377377 messages = [{"role" : "user" , "content" : "word: test" }],
378378 response_format = TestResponse ,
379- max_completion_tokens = 1000 ,
379+ max_completion_tokens = 2000 ,
380380 )
381381
382382 assert type (unwrapped_response ) is type (wrapped_response )
@@ -419,7 +419,7 @@ def broken_set_attribute(span, key, value):
419419 model = "gpt-5-nano" ,
420420 messages = [{"role" : "user" , "content" : "word: test" }],
421421 response_format = TestResponse ,
422- max_completion_tokens = 1000 ,
422+ max_completion_tokens = 2000 ,
423423 )
424424
425425 assert response is not None
0 commit comments