You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: model-providers/watsonx/runtime/src/main/java/io/quarkiverse/langchain4j/watsonx/bean/TextChatParameters.java
+4-4Lines changed: 4 additions & 4 deletions
Original file line number
Diff line number
Diff line change
@@ -34,7 +34,7 @@ public static TextChatToolChoiceTool of(String name) {
34
34
privatefinalMap<String, Integer> logitBias;
35
35
privatefinalBooleanlogprobs;
36
36
privatefinalIntegertopLogprobs;
37
-
privatefinalIntegermaxTokens;
37
+
privatefinalIntegermaxCompletionTokens;
38
38
privatefinalIntegern;
39
39
privatefinalDoublepresencePenalty;
40
40
privatefinalIntegerseed;
@@ -50,7 +50,7 @@ public TextChatParameters(Builder builder) {
50
50
this.logitBias = builder.logitBias;
51
51
this.logprobs = builder.logprobs;
52
52
this.topLogprobs = builder.topLogprobs;
53
-
this.maxTokens = builder.maxTokens;
53
+
this.maxCompletionTokens = builder.maxTokens;
54
54
this.n = builder.n;
55
55
this.presencePenalty = builder.presencePenalty;
56
56
this.temperature = builder.temperature;
@@ -172,8 +172,8 @@ public Integer getTopLogprobs() {
Copy file name to clipboardExpand all lines: model-providers/watsonx/runtime/src/main/java/io/quarkiverse/langchain4j/watsonx/runtime/config/ChatModelConfig.java
+3-3Lines changed: 3 additions & 3 deletions
Original file line number
Diff line number
Diff line change
@@ -74,9 +74,9 @@ public interface ChatModelConfig {
74
74
Optional<Integer> topLogprobs();
75
75
76
76
/**
77
-
* Specifies the maximum number of tokens that can be generated in the chat completion.
78
-
* <p>
79
-
* The total number of tokens — including both input and output — must not exceed the model's context length.
77
+
* The maximum number of tokens that can be generated in the chat completion. The total length of input tokens and generated
78
+
* tokens is limited by the
79
+
* model's context length. Set to 0 for the model's configured max generated tokens.
0 commit comments