Skip to content

Commit 527633a

Browse files
authored
Merge pull request #1538 from andreadimaio/main
Replace 'max_tokens' with 'max_completion_tokens' in watsonx.ai
2 parents 5a4303b + fb0514d commit 527633a

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

model-providers/watsonx/runtime/src/main/java/io/quarkiverse/langchain4j/watsonx/bean/TextChatParameters.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ public static TextChatToolChoiceTool of(String name) {
3434
private final Map<String, Integer> logitBias;
3535
private final Boolean logprobs;
3636
private final Integer topLogprobs;
37-
private final Integer maxTokens;
37+
private final Integer maxCompletionTokens;
3838
private final Integer n;
3939
private final Double presencePenalty;
4040
private final Integer seed;
@@ -50,7 +50,7 @@ public TextChatParameters(Builder builder) {
5050
this.logitBias = builder.logitBias;
5151
this.logprobs = builder.logprobs;
5252
this.topLogprobs = builder.topLogprobs;
53-
this.maxTokens = builder.maxTokens;
53+
this.maxCompletionTokens = builder.maxTokens;
5454
this.n = builder.n;
5555
this.presencePenalty = builder.presencePenalty;
5656
this.temperature = builder.temperature;
@@ -172,8 +172,8 @@ public Integer getTopLogprobs() {
172172
return topLogprobs;
173173
}
174174

175-
public Integer getMaxTokens() {
176-
return maxTokens;
175+
public Integer getMaxCompletionTokens() {
176+
return maxCompletionTokens;
177177
}
178178

179179
public Integer getN() {

model-providers/watsonx/runtime/src/main/java/io/quarkiverse/langchain4j/watsonx/runtime/config/ChatModelConfig.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@ public interface ChatModelConfig {
7474
Optional<Integer> topLogprobs();
7575

7676
/**
77-
* Specifies the maximum number of tokens that can be generated in the chat completion.
78-
* <p>
79-
* The total number of tokens — including both input and output — must not exceed the model's context length.
77+
* The maximum number of tokens that can be generated in the chat completion. The total length of input tokens and generated
78+
* tokens is limited by the
79+
* model's context length. Set to 0 for the model's configured max generated tokens.
8080
*/
8181
@WithDefault("1024")
8282
Integer maxTokens();

0 commit comments

Comments
 (0)