Skip to content

Commit b3ce87b

Browse files
committed
Regenerate client
1 parent ab5e3e9 commit b3ce87b

File tree

128 files changed

+4495
-1341
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

128 files changed

+4495
-1341
lines changed

src/Elastic.Clients.Elasticsearch/_Generated/Api/Cluster/ClusterStatsResponse.g.cs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,14 @@ namespace Elastic.Clients.Elasticsearch.Cluster;
2828

2929
public sealed partial class ClusterStatsResponse : ElasticsearchResponse
3030
{
31+
/// <summary>
32+
/// <para>
33+
/// Cross-cluster stats
34+
/// </para>
35+
/// </summary>
36+
[JsonInclude, JsonPropertyName("ccs")]
37+
public Elastic.Clients.Elasticsearch.Cluster.CCSStats Ccs { get; init; }
38+
3139
/// <summary>
3240
/// <para>
3341
/// Name of the cluster, based on the cluster name setting.

src/Elastic.Clients.Elasticsearch/_Generated/Api/IndexManagement/DeleteTemplateRequest.g.cs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ public sealed partial class DeleteTemplateRequestParameters : RequestParameters
5252
/// <summary>
5353
/// <para>
5454
/// Delete a legacy index template.
55+
/// IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
5556
/// </para>
5657
/// </summary>
5758
public sealed partial class DeleteTemplateRequest : PlainRequest<DeleteTemplateRequestParameters>
@@ -90,6 +91,7 @@ public DeleteTemplateRequest(Elastic.Clients.Elasticsearch.Name name) : base(r =
9091
/// <summary>
9192
/// <para>
9293
/// Delete a legacy index template.
94+
/// IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
9395
/// </para>
9496
/// </summary>
9597
public sealed partial class DeleteTemplateRequestDescriptor : RequestDescriptor<DeleteTemplateRequestDescriptor, DeleteTemplateRequestParameters>

src/Elastic.Clients.Elasticsearch/_Generated/Api/IndexManagement/GetTemplateRequest.g.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ public sealed partial class GetTemplateRequestParameters : RequestParameters
5757

5858
/// <summary>
5959
/// <para>
60-
/// Get index templates.
60+
/// Get legacy index templates.
6161
/// Get information about one or more index templates.
6262
/// </para>
6363
/// <para>
@@ -110,7 +110,7 @@ public GetTemplateRequest(Elastic.Clients.Elasticsearch.Names? name) : base(r =>
110110

111111
/// <summary>
112112
/// <para>
113-
/// Get index templates.
113+
/// Get legacy index templates.
114114
/// Get information about one or more index templates.
115115
/// </para>
116116
/// <para>

src/Elastic.Clients.Elasticsearch/_Generated/Api/IndexManagement/PutTemplateRequest.g.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ public sealed partial class PutTemplateRequestParameters : RequestParameters
5757

5858
/// <summary>
5959
/// <para>
60-
/// Create or update an index template.
60+
/// Create or update a legacy index template.
6161
/// Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
6262
/// Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
6363
/// </para>
@@ -185,7 +185,7 @@ public PutTemplateRequest(Elastic.Clients.Elasticsearch.Name name) : base(r => r
185185

186186
/// <summary>
187187
/// <para>
188-
/// Create or update an index template.
188+
/// Create or update a legacy index template.
189189
/// Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
190190
/// Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
191191
/// </para>
@@ -426,7 +426,7 @@ protected override void Serialize(Utf8JsonWriter writer, JsonSerializerOptions o
426426

427427
/// <summary>
428428
/// <para>
429-
/// Create or update an index template.
429+
/// Create or update a legacy index template.
430430
/// Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
431431
/// Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
432432
/// </para>

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/ChatCompletionUnifiedRequest.g.cs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,20 @@ public sealed partial class ChatCompletionUnifiedRequestParameters : RequestPara
4444
/// <para>
4545
/// Perform chat completion inference
4646
/// </para>
47+
/// <para>
48+
/// The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
49+
/// It only works with the <c>chat_completion</c> task type for <c>openai</c> and <c>elastic</c> inference services.
50+
/// </para>
51+
/// <para>
52+
/// IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
53+
/// For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
54+
/// </para>
55+
/// <para>
56+
/// NOTE: The <c>chat_completion</c> task type is only available within the _stream API and only supports streaming.
57+
/// The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
58+
/// The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
59+
/// If you use the <c>openai</c> service or the <c>elastic</c> service, use the Chat completion inference API.
60+
/// </para>
4761
/// </summary>
4862
public sealed partial class ChatCompletionUnifiedRequest : PlainRequest<ChatCompletionUnifiedRequestParameters>, ISelfSerializable
4963
{
@@ -79,6 +93,20 @@ void ISelfSerializable.Serialize(Utf8JsonWriter writer, JsonSerializerOptions op
7993
/// <para>
8094
/// Perform chat completion inference
8195
/// </para>
96+
/// <para>
97+
/// The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
98+
/// It only works with the <c>chat_completion</c> task type for <c>openai</c> and <c>elastic</c> inference services.
99+
/// </para>
100+
/// <para>
101+
/// IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
102+
/// For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
103+
/// </para>
104+
/// <para>
105+
/// NOTE: The <c>chat_completion</c> task type is only available within the _stream API and only supports streaming.
106+
/// The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
107+
/// The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
108+
/// If you use the <c>openai</c> service or the <c>elastic</c> service, use the Chat completion inference API.
109+
/// </para>
82110
/// </summary>
83111
public sealed partial class ChatCompletionUnifiedRequestDescriptor : RequestDescriptor<ChatCompletionUnifiedRequestDescriptor, ChatCompletionUnifiedRequestParameters>
84112
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutAlibabacloudRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,6 @@ public sealed partial class PutAlibabacloudRequestParameters : RequestParameters
4141
/// <para>
4242
/// Create an inference endpoint to perform an inference task with the <c>alibabacloud-ai-search</c> service.
4343
/// </para>
44-
/// <para>
45-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
46-
/// After creating the endpoint, wait for the model deployment to complete before using it.
47-
/// To verify the deployment status, use the get trained model statistics API.
48-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
49-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
50-
/// </para>
5144
/// </summary>
5245
public sealed partial class PutAlibabacloudRequest : PlainRequest<PutAlibabacloudRequestParameters>
5346
{
@@ -104,13 +97,6 @@ public PutAlibabacloudRequest(Elastic.Clients.Elasticsearch.Inference.AlibabaClo
10497
/// <para>
10598
/// Create an inference endpoint to perform an inference task with the <c>alibabacloud-ai-search</c> service.
10699
/// </para>
107-
/// <para>
108-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
109-
/// After creating the endpoint, wait for the model deployment to complete before using it.
110-
/// To verify the deployment status, use the get trained model statistics API.
111-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
112-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
113-
/// </para>
114100
/// </summary>
115101
public sealed partial class PutAlibabacloudRequestDescriptor : RequestDescriptor<PutAlibabacloudRequestDescriptor, PutAlibabacloudRequestParameters>
116102
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutAmazonbedrockRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,6 @@ public sealed partial class PutAmazonbedrockRequestParameters : RequestParameter
4545
/// info
4646
/// You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
4747
/// </para>
48-
/// <para>
49-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
50-
/// After creating the endpoint, wait for the model deployment to complete before using it.
51-
/// To verify the deployment status, use the get trained model statistics API.
52-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
53-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
54-
/// </para>
5548
/// </summary>
5649
public sealed partial class PutAmazonbedrockRequest : PlainRequest<PutAmazonbedrockRequestParameters>
5750
{
@@ -112,13 +105,6 @@ public PutAmazonbedrockRequest(Elastic.Clients.Elasticsearch.Inference.AmazonBed
112105
/// info
113106
/// You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
114107
/// </para>
115-
/// <para>
116-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
117-
/// After creating the endpoint, wait for the model deployment to complete before using it.
118-
/// To verify the deployment status, use the get trained model statistics API.
119-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
120-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
121-
/// </para>
122108
/// </summary>
123109
public sealed partial class PutAmazonbedrockRequestDescriptor : RequestDescriptor<PutAmazonbedrockRequestDescriptor, PutAmazonbedrockRequestParameters>
124110
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutAnthropicRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,6 @@ public sealed partial class PutAnthropicRequestParameters : RequestParameters
4141
/// <para>
4242
/// Create an inference endpoint to perform an inference task with the <c>anthropic</c> service.
4343
/// </para>
44-
/// <para>
45-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
46-
/// After creating the endpoint, wait for the model deployment to complete before using it.
47-
/// To verify the deployment status, use the get trained model statistics API.
48-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
49-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
50-
/// </para>
5144
/// </summary>
5245
public sealed partial class PutAnthropicRequest : PlainRequest<PutAnthropicRequestParameters>
5346
{
@@ -104,13 +97,6 @@ public PutAnthropicRequest(Elastic.Clients.Elasticsearch.Inference.AnthropicTask
10497
/// <para>
10598
/// Create an inference endpoint to perform an inference task with the <c>anthropic</c> service.
10699
/// </para>
107-
/// <para>
108-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
109-
/// After creating the endpoint, wait for the model deployment to complete before using it.
110-
/// To verify the deployment status, use the get trained model statistics API.
111-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
112-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
113-
/// </para>
114100
/// </summary>
115101
public sealed partial class PutAnthropicRequestDescriptor : RequestDescriptor<PutAnthropicRequestDescriptor, PutAnthropicRequestParameters>
116102
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutAzureaistudioRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,6 @@ public sealed partial class PutAzureaistudioRequestParameters : RequestParameter
4141
/// <para>
4242
/// Create an inference endpoint to perform an inference task with the <c>azureaistudio</c> service.
4343
/// </para>
44-
/// <para>
45-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
46-
/// After creating the endpoint, wait for the model deployment to complete before using it.
47-
/// To verify the deployment status, use the get trained model statistics API.
48-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
49-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
50-
/// </para>
5144
/// </summary>
5245
public sealed partial class PutAzureaistudioRequest : PlainRequest<PutAzureaistudioRequestParameters>
5346
{
@@ -104,13 +97,6 @@ public PutAzureaistudioRequest(Elastic.Clients.Elasticsearch.Inference.AzureAiSt
10497
/// <para>
10598
/// Create an inference endpoint to perform an inference task with the <c>azureaistudio</c> service.
10699
/// </para>
107-
/// <para>
108-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
109-
/// After creating the endpoint, wait for the model deployment to complete before using it.
110-
/// To verify the deployment status, use the get trained model statistics API.
111-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
112-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
113-
/// </para>
114100
/// </summary>
115101
public sealed partial class PutAzureaistudioRequestDescriptor : RequestDescriptor<PutAzureaistudioRequestDescriptor, PutAzureaistudioRequestParameters>
116102
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutAzureopenaiRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -59,13 +59,6 @@ public sealed partial class PutAzureopenaiRequestParameters : RequestParameters
5959
/// <para>
6060
/// The list of embeddings models that you can choose from in your deployment can be found in the <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings">Azure models documentation</a>.
6161
/// </para>
62-
/// <para>
63-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
64-
/// After creating the endpoint, wait for the model deployment to complete before using it.
65-
/// To verify the deployment status, use the get trained model statistics API.
66-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
67-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
68-
/// </para>
6962
/// </summary>
7063
public sealed partial class PutAzureopenaiRequest : PlainRequest<PutAzureopenaiRequestParameters>
7164
{
@@ -140,13 +133,6 @@ public PutAzureopenaiRequest(Elastic.Clients.Elasticsearch.Inference.AzureOpenAI
140133
/// <para>
141134
/// The list of embeddings models that you can choose from in your deployment can be found in the <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings">Azure models documentation</a>.
142135
/// </para>
143-
/// <para>
144-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
145-
/// After creating the endpoint, wait for the model deployment to complete before using it.
146-
/// To verify the deployment status, use the get trained model statistics API.
147-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
148-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
149-
/// </para>
150136
/// </summary>
151137
public sealed partial class PutAzureopenaiRequestDescriptor : RequestDescriptor<PutAzureopenaiRequestDescriptor, PutAzureopenaiRequestParameters>
152138
{

src/Elastic.Clients.Elasticsearch/_Generated/Api/Inference/PutCohereRequest.g.cs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,6 @@ public sealed partial class PutCohereRequestParameters : RequestParameters
4141
/// <para>
4242
/// Create an inference endpoint to perform an inference task with the <c>cohere</c> service.
4343
/// </para>
44-
/// <para>
45-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
46-
/// After creating the endpoint, wait for the model deployment to complete before using it.
47-
/// To verify the deployment status, use the get trained model statistics API.
48-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
49-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
50-
/// </para>
5144
/// </summary>
5245
public sealed partial class PutCohereRequest : PlainRequest<PutCohereRequestParameters>
5346
{
@@ -105,13 +98,6 @@ public PutCohereRequest(Elastic.Clients.Elasticsearch.Inference.CohereTaskType t
10598
/// <para>
10699
/// Create an inference endpoint to perform an inference task with the <c>cohere</c> service.
107100
/// </para>
108-
/// <para>
109-
/// When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
110-
/// After creating the endpoint, wait for the model deployment to complete before using it.
111-
/// To verify the deployment status, use the get trained model statistics API.
112-
/// Look for <c>"state": "fully_allocated"</c> in the response and ensure that the <c>"allocation_count"</c> matches the <c>"target_allocation_count"</c>.
113-
/// Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
114-
/// </para>
115101
/// </summary>
116102
public sealed partial class PutCohereRequestDescriptor : RequestDescriptor<PutCohereRequestDescriptor, PutCohereRequestParameters>
117103
{

0 commit comments

Comments
 (0)