From 9f28f4b30e86b4a1441e87fee701ff63cd0a6664 Mon Sep 17 00:00:00 2001 From: May Lee Date: Mon, 14 Jul 2025 13:12:49 -0400 Subject: [PATCH 01/23] add http client destination --- config/_default/menus/main.en.yaml | 21 ++++++---- .../destinations/http_client.md | 39 +++++++++++++++++++ .../environment_variables.md | 3 ++ .../update_existing_pipelines.md | 5 +++ .../destination_env_vars/http_client.md | 4 ++ .../destination_batching.en.md | 1 + 6 files changed, 65 insertions(+), 8 deletions(-) create mode 100644 content/en/observability_pipelines/destinations/http_client.md create mode 100644 layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 7fff3ca023007..13a9142856484 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -5456,46 +5456,51 @@ menu: url: /observability_pipelines/destinations/google_cloud_storage/ parent: observability_pipelines_destinations weight: 1109 + - name: HTTP Client + url: observability_pipelines/destinations/http_client/ + parent: observability_pipelines_destinations + identifier: observability_pipelines_http_client + weight: 1110 - name: Microsoft Sentinel identifier: observability_pipelines_microsoft_sentinel url: /observability_pipelines/destinations/microsoft_sentinel/ parent: observability_pipelines_destinations - weight: 1110 + weight: 1111 - name: New Relic identifier: observability_pipelines_new_relic url: /observability_pipelines/destinations/new_relic/ parent: observability_pipelines_destinations - weight: 1111 + weight: 1112 - name: OpenSearch url: observability_pipelines/destinations/opensearch parent: observability_pipelines_destinations identifier: observability_pipelines_opensearch - weight: 1112 + weight: 1113 - name: SentinelOne url: observability_pipelines/destinations/sentinelone parent: observability_pipelines_destinations identifier: observability_pipelines_sentinelone - weight: 1113 + weight: 1114 - name: Socket url: observability_pipelines/destinations/socket parent: observability_pipelines_destinations identifier: observability_pipelines_socket - weight: 1114 + weight: 1115 - name: Splunk HEC url: observability_pipelines/destinations/splunk_hec parent: observability_pipelines_destinations identifier: observability_pipelines_splunk_hec - weight: 1115 + weight: 1116 - name: Sumo Logic Hosted Collector url: observability_pipelines/destinations/sumo_logic_hosted_collector parent: observability_pipelines_destinations identifier: observability_pipelines_sumo_logic_hosted_collector - weight: 1116 + weight: 1117 - name: Syslog url: observability_pipelines/destinations/syslog parent: observability_pipelines_destinations identifier: observability_pipelines_syslog - weight: 1117 + weight: 1118 - name: Environment Variables url: observability_pipelines/environment_variables/ parent: observability_pipelines diff --git a/content/en/observability_pipelines/destinations/http_client.md b/content/en/observability_pipelines/destinations/http_client.md new file mode 100644 index 0000000000000..6586e04076dc0 --- /dev/null +++ b/content/en/observability_pipelines/destinations/http_client.md @@ -0,0 +1,39 @@ +--- +title: HTTP Client Destination +disable_toc: false +--- + +## Overview + +Use Observability Pipelines' HTTP Client destination to send logs to an HTTP client, such as a logging platform or SIEM. + +## Set up destination + +Set up the New Relic destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. + +1. Select your authorization strategy (**None**, **Basic**, or **Bearer**). +1. Select the encoder you want to use for the HTTP messages. +1. Optionally, toggle the switch to enable compression. If enabled: + 1. Select the compression algorithm you want to use. + 1. Select the compressions level you want to use. +1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required. + - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). + - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). + - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. + +## Set the environment variables + +{{% observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client %}} + +## How does the destination work + +### Event batching + +A batch of events is flushed when one of these parameters is met. See [event batching][2] for more information. + +| Max Events | Max Bytes | Timeout (seconds) | +|----------------|-----------------|---------------------| +| 1,000 | 1,000,000 | 1 | + +[1]: https://app.datadoghq.com/observability-pipelines +[2]: /observability_pipelines/destinations/#event-batching \ No newline at end of file diff --git a/content/en/observability_pipelines/environment_variables.md b/content/en/observability_pipelines/environment_variables.md index 0670c7b432cc9..77c1df28decff 100644 --- a/content/en/observability_pipelines/environment_variables.md +++ b/content/en/observability_pipelines/environment_variables.md @@ -95,6 +95,9 @@ Some Observability Pipelines components require setting up environment variables ### Elasticsearch {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/elasticsearch %}} +### HTTP Client +{{% observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client %}} + ### Microsoft Sentinel {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/microsoft_sentinel %}} diff --git a/content/en/observability_pipelines/update_existing_pipelines.md b/content/en/observability_pipelines/update_existing_pipelines.md index 2a3ad8db6b11d..9b74bd3deb928 100644 --- a/content/en/observability_pipelines/update_existing_pipelines.md +++ b/content/en/observability_pipelines/update_existing_pipelines.md @@ -135,6 +135,11 @@ On the Worker installation page: {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/elasticsearch %}} +{{% /tab %}} +{{% tab "HTTP Client" %}} + +{{% observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client %}} + {{% /tab %}} {{% tab "Microsoft Sentinel" %}} diff --git a/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md new file mode 100644 index 0000000000000..40894ed322dc8 --- /dev/null +++ b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md @@ -0,0 +1,4 @@ +- HTTP/S client URI endpoint + - Stored as the environment variable `DD_OP_DESTINATION_HTTP_CLIENT_URI`. +- HTTP/S endpoint bearer token + - Stored as the environment variable `DD_OP_DESTINATION_HTTP_CLIENT_BEARER_TOKEN`. \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_batching.en.md b/layouts/shortcodes/observability_pipelines/destination_batching.en.md index 6dad67419472e..f3da602dc29ad 100644 --- a/layouts/shortcodes/observability_pipelines/destination_batching.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_batching.en.md @@ -9,6 +9,7 @@ | Elasticsearch | None | 10,000,000 | 1 | | Google Chronicle | None | 1,000,000 | 15 | | Google Cloud Storage (Datadog Log Archives)| None | 100,000,000 | 900 | +| HTTP Client | 1000 | 1,000,000 | 1 | | Microsoft Sentinel | None | 10,000,000 | 1 | | New Relic | 100 | 1,000,000 | 1 | | OpenSearch | None | 10,000,000 | 1 | From b7abb225d19048715b610210eb7c3386585e3dc4 Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 15 Jul 2025 11:24:58 -0400 Subject: [PATCH 02/23] update http client destination --- .../observability_pipelines/destinations/http_client.md | 6 +++--- .../destination_env_vars/http_client.md | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/content/en/observability_pipelines/destinations/http_client.md b/content/en/observability_pipelines/destinations/http_client.md index 6586e04076dc0..0f08cec1a4f6d 100644 --- a/content/en/observability_pipelines/destinations/http_client.md +++ b/content/en/observability_pipelines/destinations/http_client.md @@ -12,10 +12,10 @@ Use Observability Pipelines' HTTP Client destination to send logs to an HTTP cli Set up the New Relic destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. 1. Select your authorization strategy (**None**, **Basic**, or **Bearer**). -1. Select the encoder you want to use for the HTTP messages. +1. JSON is the only available encoder. 1. Optionally, toggle the switch to enable compression. If enabled: - 1. Select the compression algorithm you want to use. - 1. Select the compressions level you want to use. + 1. GZIP is the only available compression algorithm. + 1. Select the compression level you want to use. 1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required. - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). diff --git a/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md index 40894ed322dc8..cb13a539ae520 100644 --- a/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md +++ b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md @@ -1,4 +1,8 @@ - HTTP/S client URI endpoint - Stored as the environment variable `DD_OP_DESTINATION_HTTP_CLIENT_URI`. -- HTTP/S endpoint bearer token - - Stored as the environment variable `DD_OP_DESTINATION_HTTP_CLIENT_BEARER_TOKEN`. \ No newline at end of file +- If you are using basic authentication: + - HTTP/S endpoint authentication username and password. + - Stored as the environment variables: `DD_OP_DESTINATION_HTTP_CLIENT_USERNAME` and `DD_OP_DESTINATION_HTTP_CLIENT_PASSWORD`. +- If you are using bearer authentication: + - HTTP/S endpoint bearer token. + - Stored as the environment variable: `DD_OP_DESTINATION_HTTP_CLIENT_BEARER_TOKEN`. From 6a1cdeb5bc27c230c85499fa9774c163bf146488 Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 15 Jul 2025 13:48:59 -0400 Subject: [PATCH 03/23] update gcs auth --- .../datadog_archives_google_cloud_storage.en.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md index 5ae6a277e7306..e98ea86fa442e 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md @@ -1,7 +1,8 @@
The Google Cloud Storage destination only supports Access Control Lists.
-1. Enter the name of the Google Cloud storage bucket you created earlier. -1. Enter the path to the credentials JSON file you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). +1. Enter the name of your Google Cloud storage bucket. If you configured Log Archives, it's the bucket you created earlier. +1. Optionally, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). + - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you are using Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is automatically set for you. The Worker uses standard Google authentication methods. See [Authentication methods at Google][10052] for more information. 1. Select the storage class for the created objects. 1. Select the access level of the created objects. 1. Optionally, enter in the prefix. @@ -9,4 +10,5 @@ - See [template syntax][10051] if you want to route logs to different object keys based on specific fields in your logs. 1. Optionally, click **Add Header** to add metadata. -[10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file +[10051]: /observability_pipelines/destinations/#template-syntax +[10052]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file From 7d74fc7fc8a1c26930a20bd13118736c5b825038 Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 15 Jul 2025 15:56:39 -0400 Subject: [PATCH 04/23] apply suggestions to gcs --- .../datadog_archives_google_cloud_storage.en.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md index e98ea86fa442e..1db54d49ac056 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md @@ -2,7 +2,8 @@ 1. Enter the name of your Google Cloud storage bucket. If you configured Log Archives, it's the bucket you created earlier. 1. Optionally, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). - - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you are using Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is automatically set for you. The Worker uses standard Google authentication methods. See [Authentication methods at Google][10052] for more information. + - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you're using [workload identity][10053] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. + - The Worker uses standard [Google authentication methods][10052]. 1. Select the storage class for the created objects. 1. Select the access level of the created objects. 1. Optionally, enter in the prefix. @@ -11,4 +12,5 @@ 1. Optionally, click **Add Header** to add metadata. [10051]: /observability_pipelines/destinations/#template-syntax -[10052]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file +[10052]: https://cloud.google.com/docs/authentication#auth-flowchart +[10053]: https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity \ No newline at end of file From ac017809aa917d6aaf7a645640bd4280aacbb9ee Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 15 Jul 2025 16:01:41 -0400 Subject: [PATCH 05/23] update google chronicle --- .../destination_settings/chronicle.en.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md index 546c2c064861d..8339e0eb982c7 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md @@ -3,7 +3,9 @@ To authenticate the Observability Pipelines Worker for Google Chronicle, contact To set up the Worker's Google Chronicle destination: 1. Enter the customer ID for your Google Chronicle instance. -1. Enter the path to the credentials JSON file you downloaded earlier. +1. Optionally, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). + - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you're using [workload identity][10004] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. + - The Worker uses standard [Google authentication methods][10005]. 1. Select **JSON** or **Raw** encoding in the dropdown menu. 1. Enter the log type. See [template syntax][10002] if you want to route logs to different log types based on specific fields in your logs. @@ -11,4 +13,6 @@ To set up the Worker's Google Chronicle destination: [10001]: https://cloud.google.com/chronicle/docs/reference/ingestion-api#getting_api_authentication_credentials [10002]: /observability_pipelines/destinations/#template-syntax -[10003]: https://cloud.google.com/chronicle/docs/ingestion/parser-list/supported-default-parsers#with-default-parser \ No newline at end of file +[10003]: https://cloud.google.com/chronicle/docs/ingestion/parser-list/supported-default-parsers#with-default-parser +[10004]:https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity +[10005]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file From 08ee092956ef86e5fa674eef298fcc648520728c Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 23 Jul 2025 11:37:48 -0400 Subject: [PATCH 06/23] add quotas to archives --- .../observability_pipelines/processors/quota.en.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/layouts/shortcodes/observability_pipelines/processors/quota.en.md b/layouts/shortcodes/observability_pipelines/processors/quota.en.md index 844579387cbd2..b405136e2949c 100644 --- a/layouts/shortcodes/observability_pipelines/processors/quota.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/quota.en.md @@ -19,6 +19,9 @@ To set up the quota processor: - Click **Download as CSV** for an example of how to structure the CSV. - Drag and drop your overrides CSV to upload it. You can also click **Browse** to select the file to upload it. See the [Overrides example](#overrides-example) for more information. b. Click **Add Field** if you want to add another partition. +1. In the **When quota is met** dropdown menu, select if you want to **drop events**, **keep events**, or **send events to overflow destination**, when the quota has been met. + 1. If you select **send events to overflow destination**, an overflow destination is added with the following cloud storage options: **Amazon S3**, **Azure Blob**, and **Google Cloud**. + 1. Select the cloud storage you want to send overflow logs to. See the setup instructions for your cloud storage: [Amazon S3][5002], [Azure Blog Storage][5003], or [Google Cloud Storage][5004]. #### Examples @@ -56,4 +59,7 @@ If you are partitioning by `service` and have two services: `a` and `b`, you can | `a` | Bytes | 5,000 | | `b` | Events | 50 | -[5001]: /monitors/types/metric/?tab=threshold \ No newline at end of file +[5001]: /monitors/types/metric/?tab=threshold +[5002]: /observability_pipelines/destinations/amazon_s3/ +[5003]: /observability_pipelines/destinations/azure_storage/ +[5004]: /observability_pipelines/destinations/google_cloud_storage/ \ No newline at end of file From d47f2f78939c65ccbf633f91c0e617a44b3a76c1 Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 23 Jul 2025 12:01:54 -0400 Subject: [PATCH 07/23] add processor groups --- .../observability_pipelines/processors/_index.md | 8 +++++++- .../set_up_pipelines/_index.md | 14 ++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index b420aa91e6a08..04c9d30c0266f 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -13,7 +13,13 @@ further_reading: Use Observability Pipelines' processors to parse, structure, and enrich your logs. When you create a pipeline in the UI, pre-selected processors are added to your processor group based on the selected template. You can add additional processors and delete any existing ones based on your processing needs. -Processor groups are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To modify the order of the processors, use the drag handle on the top left corner of the processor you want to move. +You can organize your processors into logical groups to help you manage them. Each processor group has a Group Filter so that those processors are only applied to specific logs. For example, if you want the group processors to only process logs coming from `vpc`, then use the group filter `source:vpc`. You can also add filters for each individual processor. + +Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To modify the order of the processors, use the drag handle on the top left corner of the processor you want to move. + +**Notes**: +- Adding multiple processor groups for a destination is only available for Worker versions 2.7 and later. +- There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. Select a processor in the left navigation menu to see more information about it. diff --git a/content/en/observability_pipelines/set_up_pipelines/_index.md b/content/en/observability_pipelines/set_up_pipelines/_index.md index 08596d2f65786..eb0d8cc945f42 100644 --- a/content/en/observability_pipelines/set_up_pipelines/_index.md +++ b/content/en/observability_pipelines/set_up_pipelines/_index.md @@ -43,6 +43,20 @@ Set up your pipelines and its sources, processors, and destinations in the Obser 1. Select and set up your [source][1]. 1. Select and set up your [destinations][2]. 1. Set up your [processors][3]. + - If you want to copy a processor, click the copy icon for that processor and then use `command-v` to paste it. +1. If you want to add another group of processors for a destination: + 1. Click the plus sign (**+**) at the bottom of the existing processor group. + 1. Click the name of the processor group to update it. + 1. Optionally, enter a group filter. See [Filter Syntax](#filter-query-syntax) for more information. + 1. Click **Add** to add processors to the group. + 1. If you want to copy all the processors in a group and paste them into the same processor group or a different group: + 1. Click the three dots on the processor group. + 1. Select **Copy all processors**. + 1. Select the processor group you want to paste the processors into and then use `command-v` to paste them. + 1. You can toggle the switch to enable and disable the processor group and also each individual processor. +
**Notes**: +
- Adding additional processor groups for a destination is available for Worker versions 2.7 and later. +
- There is a limit of 10 processor groups for a pipeline canvas. 1. If you want to add another set of processors and destinations, click the plus sign (**+**) to the left of the processor group to add another set of processors and destinations to the source. - To delete a processor group, you need to delete all destinations linked to that processor group. When the last destination is deleted, the processor group is removed with it. 1. If you want to add an additional destination to a processor group, click the plus sign (**+**) to the right of the processor group. From b63ce7b77e2cf7bc8565b20a7dbcb4da6b581e37 Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 23 Jul 2025 12:04:37 -0400 Subject: [PATCH 08/23] add note about processor groups in preview --- content/en/observability_pipelines/processors/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index 04c9d30c0266f..e25ead850beea 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -18,7 +18,7 @@ You can organize your processors into logical groups to help you manage them. Ea Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To modify the order of the processors, use the drag handle on the top left corner of the processor you want to move. **Notes**: -- Adding multiple processor groups for a destination is only available for Worker versions 2.7 and later. +- Adding multiple processor groups for a destination is in Preview and only available for Worker versions 2.7 and later. - There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. Select a processor in the left navigation menu to see more information about it. From 1126cef4a414d870211e56f5a26afa4be0c601c7 Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 23 Jul 2025 13:57:38 -0400 Subject: [PATCH 09/23] Apply suggestions from code review Co-authored-by: Rosa Trieu <107086888+rtrieu@users.noreply.github.com> --- .../observability_pipelines/destinations/http_client.md | 8 ++++---- content/en/observability_pipelines/processors/_index.md | 2 +- .../destination_env_vars/http_client.md | 2 +- .../observability_pipelines/processors/quota.en.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/content/en/observability_pipelines/destinations/http_client.md b/content/en/observability_pipelines/destinations/http_client.md index 0f08cec1a4f6d..7caf70af35a6b 100644 --- a/content/en/observability_pipelines/destinations/http_client.md +++ b/content/en/observability_pipelines/destinations/http_client.md @@ -9,14 +9,14 @@ Use Observability Pipelines' HTTP Client destination to send logs to an HTTP cli ## Set up destination -Set up the New Relic destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. +Set up the HTTP Client destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. 1. Select your authorization strategy (**None**, **Basic**, or **Bearer**). 1. JSON is the only available encoder. 1. Optionally, toggle the switch to enable compression. If enabled: 1. GZIP is the only available compression algorithm. 1. Select the compression level you want to use. -1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required. +1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required: - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. @@ -25,11 +25,11 @@ Set up the New Relic destination and its environment variables when you [set up {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client %}} -## How does the destination work +## How the destination works ### Event batching -A batch of events is flushed when one of these parameters is met. See [event batching][2] for more information. +A batch of events is flushed when one of these conditions occurs. See [event batching][2] for more information. | Max Events | Max Bytes | Timeout (seconds) | |----------------|-----------------|---------------------| diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index e25ead850beea..645e9f78dd3cb 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -15,7 +15,7 @@ Use Observability Pipelines' processors to parse, structure, and enrich your log You can organize your processors into logical groups to help you manage them. Each processor group has a Group Filter so that those processors are only applied to specific logs. For example, if you want the group processors to only process logs coming from `vpc`, then use the group filter `source:vpc`. You can also add filters for each individual processor. -Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To modify the order of the processors, use the drag handle on the top left corner of the processor you want to move. +Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To change the order of the processors, use the drag handle on the top left corner of the processor you want to move. **Notes**: - Adding multiple processor groups for a destination is in Preview and only available for Worker versions 2.7 and later. diff --git a/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md index cb13a539ae520..f71f51729e35f 100644 --- a/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md +++ b/layouts/shortcodes/observability_pipelines/configure_existing_pipelines/destination_env_vars/http_client.md @@ -1,6 +1,6 @@ - HTTP/S client URI endpoint - Stored as the environment variable `DD_OP_DESTINATION_HTTP_CLIENT_URI`. -- If you are using basic authentication: +- If you are using basic authentication: - HTTP/S endpoint authentication username and password. - Stored as the environment variables: `DD_OP_DESTINATION_HTTP_CLIENT_USERNAME` and `DD_OP_DESTINATION_HTTP_CLIENT_PASSWORD`. - If you are using bearer authentication: diff --git a/layouts/shortcodes/observability_pipelines/processors/quota.en.md b/layouts/shortcodes/observability_pipelines/processors/quota.en.md index b405136e2949c..0baa35ed1e18b 100644 --- a/layouts/shortcodes/observability_pipelines/processors/quota.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/quota.en.md @@ -21,7 +21,7 @@ To set up the quota processor: b. Click **Add Field** if you want to add another partition. 1. In the **When quota is met** dropdown menu, select if you want to **drop events**, **keep events**, or **send events to overflow destination**, when the quota has been met. 1. If you select **send events to overflow destination**, an overflow destination is added with the following cloud storage options: **Amazon S3**, **Azure Blob**, and **Google Cloud**. - 1. Select the cloud storage you want to send overflow logs to. See the setup instructions for your cloud storage: [Amazon S3][5002], [Azure Blog Storage][5003], or [Google Cloud Storage][5004]. + 1. Select the cloud storage you want to send overflow logs to. See the setup instructions for your cloud storage: [Amazon S3][5002], [Azure Blob Storage][5003], or [Google Cloud Storage][5004]. #### Examples From d0c85dcbb734af13e41ccc5135249e6916ebcb02 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 12:21:47 -0400 Subject: [PATCH 10/23] update google pubsub --- .../prerequisites/google_pubsub.en.md | 7 +++++-- .../source_settings/google_pubsub.en.md | 8 ++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md b/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md index afb83661f7dda..68eaff433c2b7 100644 --- a/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md +++ b/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md @@ -1,6 +1,9 @@ To use Observability Pipelines' Google Pub/Sub source, you need the following information available: - The Google Pub/Sub source requires a Pub/Sub subscription. -- A Google Developer Service Account Credential to authenticate the Observability Pipelines Worker. Contact your Google Security Operations representative for a Google Developer Service Account Credential. This credential is a JSON file and must be placed under `DD_OP_DATA_DIR/config`. See [Getting API authentication credentials][10021] for more information. +- The Worker uses standard Google authentication methods. See [Authentication methods at Google][10022] for more information about choosing the authentication method for your use case. +- For the IAM role, use `roles/pubsub.subscriber`. See [Available Pub/Sub roles][10023] for more information. -[10021]: https://cloud.google.com/chronicle/docs/reference/ingestion-api#getting_api_authentication_credentials \ No newline at end of file +[10022]: https://cloud.google.com/docs/authentication#auth-flowchart + +[10023]: https://cloud.google.com/pubsub/docs/access-control#roles \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md b/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md index c532bee800220..0d931edc890c1 100644 --- a/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md +++ b/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md @@ -1,5 +1,7 @@ 1. Enter the name of the source project. -1. Enter the path to the Google Developer Service Account Credential JSON file. See [Prerequisites](#prerequisites) for more information. +1. If you have a credentials JSON file, enter the path to your credentials JSON file. Alternatively, you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. + - If you're using [workload identity][10021] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. + - The Worker uses standard Google authentication methods. See [Authentication methods at Google][10022] for more information about choosing the authentication method for your use case. 1. Enter the subscription name. 1. Select the decoder you want to use (Bytes, GELF, JSON, syslog). 1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required.
**Note**: All file paths are made relative to the configuration data directory, which is `/var/lib/observability-pipelines-worker/config/` by default. See [Advanced Configurations][10172] for more information. The file must be owned by the `observability-pipelines-worker group` and `observability-pipelines-worker` user, or at least readable by the group or user. @@ -8,4 +10,6 @@ - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS #8) format. [10172]: /observability_pipelines/advanced_configurations/ - \ No newline at end of file + +[10021]: https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity +[10022]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file From 9557df6c724689f355ace14174039b3aaa0796eb Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 12:26:13 -0400 Subject: [PATCH 11/23] small update google pubsub --- .../observability_pipelines/prerequisites/google_pubsub.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md b/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md index 68eaff433c2b7..0931c2f74bf1f 100644 --- a/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md +++ b/layouts/shortcodes/observability_pipelines/prerequisites/google_pubsub.en.md @@ -2,7 +2,7 @@ To use Observability Pipelines' Google Pub/Sub source, you need the following in - The Google Pub/Sub source requires a Pub/Sub subscription. - The Worker uses standard Google authentication methods. See [Authentication methods at Google][10022] for more information about choosing the authentication method for your use case. -- For the IAM role, use `roles/pubsub.subscriber`. See [Available Pub/Sub roles][10023] for more information. +- Use `roles/pubsub.subscriber` for the Pub/Sub IAM role. See [Available Pub/Sub roles][10023] for more information. [10022]: https://cloud.google.com/docs/authentication#auth-flowchart From ead38fc0c0e820d9d2cb69cda12e7ef94b84c395 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 14:21:52 -0400 Subject: [PATCH 12/23] update gcs integrations --- .../destinations/google_chronicle.md | 5 ++++- .../destinations/google_cloud_storage.md | 7 +++++-- .../destination_settings/chronicle.en.md | 7 ++----- .../datadog_archives_google_cloud_storage.en.md | 6 ++---- .../source_settings/google_pubsub.en.md | 4 ++-- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/content/en/observability_pipelines/destinations/google_chronicle.md b/content/en/observability_pipelines/destinations/google_chronicle.md index b0da57da77c7e..82beb1b5b6596 100644 --- a/content/en/observability_pipelines/destinations/google_chronicle.md +++ b/content/en/observability_pipelines/destinations/google_chronicle.md @@ -4,6 +4,8 @@ disable_toc: false --- Use Observability Pipelines' Google Chronicle destination to send logs to Google Chronicle. +The Observability Pipelines Worker uses standard Google authentication methods. See [Authentication methods at Google][3] for more information about choosing the authentication method for your use case. + ## Setup Set up the Google Chronicle destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. @@ -27,4 +29,5 @@ A batch of events is flushed when one of these parameters is met. See [event bat | None | 1,000,000 | 15 | [1]: https://app.datadoghq.com/observability-pipelines -[2]: /observability_pipelines/destinations/#event-batching \ No newline at end of file +[2]: /observability_pipelines/destinations/#event-batching +[3]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file diff --git a/content/en/observability_pipelines/destinations/google_cloud_storage.md b/content/en/observability_pipelines/destinations/google_cloud_storage.md index 112a12d31502d..6011fc9934d11 100644 --- a/content/en/observability_pipelines/destinations/google_cloud_storage.md +++ b/content/en/observability_pipelines/destinations/google_cloud_storage.md @@ -3,10 +3,12 @@ title: Google Cloud Storage Destination disable_toc: false --- -
The Google Cloud Storage destination only supports Access Control Lists.
+
For Worker versions 2.7 and later, the Google Cloud destination supports uniform bucket-level access. For Worker version older than 2.7, only Access Control Lists is supported.
Use the Google Cloud Storage destination to send your logs to a Google Cloud Storage bucket. If you want to send logs in Datadog-rehydratable format to Google Cloud Storage for [archiving][1] and [rehydration][2], you must [configure Log Archives](#configure-log-archives). If you want to send your logs directly to Google Cloud Storage, without converting them to Datadog-rehydratable format, skip to [Set up the destination for your pipeline](#set-up-the-destinations). +The Observability Pipelines Worker uses standard Google authentication methods. See [Authentication methods at Google][6] for more information about choosing the authentication method for your use case. + ## Configure Log Archives This step is only required if you want to send logs to Google Cloud Storage in Datadog-rehydratable format for [archiving][1] and [rehydration][2], and you don't already have a Datadog Log Archive configured for Observability Pipelines. If you already have a Datadog Log Archive configured or only want to send your logs directly to Google Cloud Storage, skip to [Set up the destination for your pipeline](#set-up-the-destinations). @@ -41,4 +43,5 @@ A batch of events is flushed when one of these parameters is met. See [event bat [2]: /logs/log_configuration/rehydrating/ [3]: /integrations/google_cloud_platform/#setup [4]: /observability_pipelines/archive_logs/ -[5]: /observability_pipelines/destinations/#event-batching \ No newline at end of file +[5]: /observability_pipelines/destinations/#event-batching +[6]: https://cloud.google.com/docs/authentication#auth-flowchart \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md index 8339e0eb982c7..065c352d6d7a7 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md @@ -1,17 +1,14 @@ -To authenticate the Observability Pipelines Worker for Google Chronicle, contact your Google Security Operations representative for a Google Developer Service Account Credential. This credential is a JSON file and must be placed under `DD_OP_DATA_DIR/config`. See [Getting API authentication credential][10001] for more information. - To set up the Worker's Google Chronicle destination: 1. Enter the customer ID for your Google Chronicle instance. -1. Optionally, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). - - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you're using [workload identity][10004] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. +1. If you have a credentials JSON file, enter the path to your credentials JSON file. The credentials file must be placed under `DD_OP_DATA_DIR/config`. Alternatively, you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. + - If you're using [workload identity][10004] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. - The Worker uses standard [Google authentication methods][10005]. 1. Select **JSON** or **Raw** encoding in the dropdown menu. 1. Enter the log type. See [template syntax][10002] if you want to route logs to different log types based on specific fields in your logs. **Note**: Logs sent to the Google Chronicle destination must have ingestion labels. For example, if the logs are from a A10 load balancer, it must have the ingestion label `A10_LOAD_BALANCER`. See Google Cloud's [Support log types with a default parser][10003] for a list of available log types and their respective ingestion labels. -[10001]: https://cloud.google.com/chronicle/docs/reference/ingestion-api#getting_api_authentication_credentials [10002]: /observability_pipelines/destinations/#template-syntax [10003]: https://cloud.google.com/chronicle/docs/ingestion/parser-list/supported-default-parsers#with-default-parser [10004]:https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md index 1db54d49ac056..ea085b09c2fdb 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md @@ -1,8 +1,6 @@ -
The Google Cloud Storage destination only supports Access Control Lists.
- 1. Enter the name of your Google Cloud storage bucket. If you configured Log Archives, it's the bucket you created earlier. -1. Optionally, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). - - You can also use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. If you're using [workload identity][10053] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. +1. If you have a credentials JSON file, enter the path to your credentials JSON file. If you configured Log Archives it's the credentials you downloaded [earlier](#create-a-service-account-to-allow-workers-to-write-to-the-bucket). The credentials file must be placed under `DD_OP_DATA_DIR/config`. Alternatively, you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. + - If you're using [workload identity][10053] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. - The Worker uses standard [Google authentication methods][10052]. 1. Select the storage class for the created objects. 1. Select the access level of the created objects. diff --git a/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md b/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md index 0d931edc890c1..e4e9b884b0ae2 100644 --- a/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md +++ b/layouts/shortcodes/observability_pipelines/source_settings/google_pubsub.en.md @@ -1,7 +1,7 @@ 1. Enter the name of the source project. -1. If you have a credentials JSON file, enter the path to your credentials JSON file. Alternatively, you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. +1. If you have a credentials JSON file, enter the path to your credentials JSON file. The credentials file must be placed under `DD_OP_DATA_DIR/config`. Alternatively, you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to provide the credential path. - If you're using [workload identity][10021] on Google Kubernetes Engine (GKE), the `GOOGLE_APPLICATION_CREDENTIALS` is provided for you. - - The Worker uses standard Google authentication methods. See [Authentication methods at Google][10022] for more information about choosing the authentication method for your use case. + - The Worker uses standard [Google authentication methods][10022]. 1. Enter the subscription name. 1. Select the decoder you want to use (Bytes, GELF, JSON, syslog). 1. Optionally, toggle the switch to enable TLS. If you enable TLS, the following certificate and key files are required.
**Note**: All file paths are made relative to the configuration data directory, which is `/var/lib/observability-pipelines-worker/config/` by default. See [Advanced Configurations][10172] for more information. The file must be owned by the `observability-pipelines-worker group` and `observability-pipelines-worker` user, or at least readable by the group or user. From 70e4dbc82b17f50e7bdfc4cc86129aaee6033f6b Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 14:55:56 -0400 Subject: [PATCH 13/23] update gcs --- .../destinations/google_cloud_storage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/observability_pipelines/destinations/google_cloud_storage.md b/content/en/observability_pipelines/destinations/google_cloud_storage.md index 6011fc9934d11..16211a1c49cc5 100644 --- a/content/en/observability_pipelines/destinations/google_cloud_storage.md +++ b/content/en/observability_pipelines/destinations/google_cloud_storage.md @@ -3,7 +3,7 @@ title: Google Cloud Storage Destination disable_toc: false --- -
For Worker versions 2.7 and later, the Google Cloud destination supports uniform bucket-level access. For Worker version older than 2.7, only Access Control Lists is supported.
+
For Worker versions 2.7 and later, the Google Cloud destination supports uniform bucket-level access. Google recommends using uniform bucket-level access.
For Worker version older than 2.7, only Access Control Lists is supported.
Use the Google Cloud Storage destination to send your logs to a Google Cloud Storage bucket. If you want to send logs in Datadog-rehydratable format to Google Cloud Storage for [archiving][1] and [rehydration][2], you must [configure Log Archives](#configure-log-archives). If you want to send your logs directly to Google Cloud Storage, without converting them to Datadog-rehydratable format, skip to [Set up the destination for your pipeline](#set-up-the-destinations). From 8b5517f27976a7ed1821c549261d78c756cc79f5 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 16:17:52 -0400 Subject: [PATCH 14/23] Apply suggestions from code review Co-authored-by: Bruce Guenter --- content/en/observability_pipelines/processors/_index.md | 2 +- content/en/observability_pipelines/set_up_pipelines/_index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index 645e9f78dd3cb..6adb899c3a015 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -18,7 +18,7 @@ You can organize your processors into logical groups to help you manage them. Ea Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To change the order of the processors, use the drag handle on the top left corner of the processor you want to move. **Notes**: -- Adding multiple processor groups for a destination is in Preview and only available for Worker versions 2.7 and later. +- Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later. - There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. Select a processor in the left navigation menu to see more information about it. diff --git a/content/en/observability_pipelines/set_up_pipelines/_index.md b/content/en/observability_pipelines/set_up_pipelines/_index.md index eb0d8cc945f42..c712ade38f3a4 100644 --- a/content/en/observability_pipelines/set_up_pipelines/_index.md +++ b/content/en/observability_pipelines/set_up_pipelines/_index.md @@ -55,7 +55,7 @@ Set up your pipelines and its sources, processors, and destinations in the Obser 1. Select the processor group you want to paste the processors into and then use `command-v` to paste them. 1. You can toggle the switch to enable and disable the processor group and also each individual processor.
**Notes**: -
- Adding additional processor groups for a destination is available for Worker versions 2.7 and later. +
- Configuring a pipeline with processor groups is available for Worker versions 2.7 and later.
- There is a limit of 10 processor groups for a pipeline canvas. 1. If you want to add another set of processors and destinations, click the plus sign (**+**) to the left of the processor group to add another set of processors and destinations to the source. - To delete a processor group, you need to delete all destinations linked to that processor group. When the last destination is deleted, the processor group is removed with it. From 769931292f3a10e00733e4fe8e005f083207e031 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 24 Jul 2025 17:20:58 -0400 Subject: [PATCH 15/23] update gcs instructions --- .../google_cloud_storage/instructions.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layouts/shortcodes/observability_pipelines/configure_log_archive/google_cloud_storage/instructions.en.md b/layouts/shortcodes/observability_pipelines/configure_log_archive/google_cloud_storage/instructions.en.md index c71794f2b18a9..adb1a5b14bdf6 100644 --- a/layouts/shortcodes/observability_pipelines/configure_log_archive/google_cloud_storage/instructions.en.md +++ b/layouts/shortcodes/observability_pipelines/configure_log_archive/google_cloud_storage/instructions.en.md @@ -11,7 +11,7 @@ 1. Create a Google Cloud Storage [service account][9092]. - Grant the Service Account permissions to your bucket with `Storage Admin` and `Storage Object Admin` permissions. - - Download the JSON service account key file. This is the credentials JSON file and must be placed under `DD_OP_DATA_DIR/config`. You reference this file when you set up the [Google Cloud Storage destination](#set-up-the-destinations) in the pipeline UI later on. + - If you want to authenticate with a credentials file, download the service account key file and place it under `DD_OP_DATA_DIR/config`. You reference this file when you set up the [Google Cloud Storage destination](#set-up-the-destinations) later on. 1. Follow these [instructions][9093] to create a service account key. Choose `json` for the key type. ### Connect the storage bucket to Datadog Log Archives From 1fe02a7bb7373ef1eb145a6658bf8ad596f5616b Mon Sep 17 00:00:00 2001 From: May Lee Date: Mon, 28 Jul 2025 12:15:05 -0400 Subject: [PATCH 16/23] add buffering options --- .../en/observability_pipelines/destinations/http_client.md | 3 +++ content/en/observability_pipelines/performance.md | 6 +++++- go.mod | 4 +++- .../destination_settings/amazon_opensearch.en.md | 3 +++ .../destination_settings/amazon_security_lake.md | 3 +++ .../destination_settings/chronicle.en.md | 3 +++ .../destination_settings/crowdstrike_ng_siem.md | 3 +++ .../destination_settings/datadog.en.md | 4 +++- .../destination_settings/datadog_archives_amazon_s3.en.md | 3 +++ .../datadog_archives_azure_storage.en.md | 5 ++++- .../datadog_archives_google_cloud_storage.en.md | 3 +++ .../destination_settings/elasticsearch.en.md | 5 ++++- .../destination_settings/microsoft_sentinel.md | 3 +++ .../destination_settings/new_relic.en.md | 5 ++++- .../destination_settings/opensearch.en.md | 5 ++++- .../destination_settings/sentinelone.md | 5 ++++- .../observability_pipelines/destination_settings/socket.md | 3 +++ .../destination_settings/splunk_hec.en.md | 3 +++ .../destination_settings/sumo_logic.en.md | 3 +++ .../destination_settings/syslog.en.md | 5 ++++- 20 files changed, 68 insertions(+), 9 deletions(-) diff --git a/content/en/observability_pipelines/destinations/http_client.md b/content/en/observability_pipelines/destinations/http_client.md index 7caf70af35a6b..7b26b5c194030 100644 --- a/content/en/observability_pipelines/destinations/http_client.md +++ b/content/en/observability_pipelines/destinations/http_client.md @@ -20,6 +20,9 @@ Set up the HTTP Client destination and its environment variables when you [set u - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. ## Set the environment variables diff --git a/content/en/observability_pipelines/performance.md b/content/en/observability_pipelines/performance.md index 394b71b94e238..2d0e8acdd27bc 100644 --- a/content/en/observability_pipelines/performance.md +++ b/content/en/observability_pipelines/performance.md @@ -30,12 +30,16 @@ Backpressure determines if the system should slow down the consumption or accept ## In-memory buffering for components -All components in Observability Pipelines have a small in-memory buffer between them. The buffer is the channel that two components communicate over. It ensures that there is a small amount of space, typically 100 events, that can be used to send events even if the component on the receiving end is busy. This allows maximizing throughput when workloads are not entirely uniform. +All components in Observability Pipelines have a small in-memory buffer between them. In-memory buffering can also be configured for all Observability Pipelines destinations. The buffer is the channel that two components communicate over. It ensures that there is a small amount of space, typically 100 events, that can be used to send events even if the component on the receiving end is busy. This allows maximizing throughput when workloads are not entirely uniform. Buffering protects against temporary overloads or outages for a given workload. The buffering model prioritizes performance when handling an excess of events, an amount that is beyond what a destination can process, by using in-memory buffers on destinations. By default, a destination's default buffer size is increased from 100 events to 500 events. The buffer capacity is increased because destinations are typically the primary source of backpressure in any given Observability Pipelines topology. They communicate to services over the network, where latency may be introduced or outages may temporarily occur. Observability Pipelines destination's buffers are configured to block events, which means it waits indefinitely to write to a buffer that is full. This is to make sure observability data is reliably processed in the order it was given. Additionally, as mentioned earlier, blocking induces backpressure and signals upstream components to slow down event acceptance or consumption. As a result, although the system retains all data, it accumulates at the edge. +## Disk buffers + +Observability Pipelines destination can be configured with disk buffers. When disk buffering is enabled for a destination, every event is first sent through the buffer and written to the data files, before the data is sent to the downstream integration. By default, data is not synchronized for every write, but instead synchronized on an interval (500 milliseconds), which allows for high throughput with a reduced risk of data loss. + ## Further reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/go.mod b/go.mod index df4d82a2e04c4..7a7b7f2c75e3b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module documentation -go 1.14 +go 1.24.4 + +toolchain go1.24.5 require ( github.com/DataDog/websites-modules v1.4.244 // indirect diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/amazon_opensearch.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/amazon_opensearch.en.md index f108fd5373bd4..868bb3b616fc7 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/amazon_opensearch.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/amazon_opensearch.en.md @@ -1,4 +1,7 @@ 1. Optionally, enter the name of the Amazon OpenSearch index. See [template syntax][10051] if you want to route logs to different indexes based on specific fields in your logs. 1. Select an authentication strategy, **Basic** or **AWS**. For **AWS**, enter the AWS region. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/amazon_security_lake.md b/layouts/shortcodes/observability_pipelines/destination_settings/amazon_security_lake.md index baebf4b4c2bd3..449c132c3fc71 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/amazon_security_lake.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/amazon_security_lake.md @@ -8,6 +8,9 @@ - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. **Notes**: - When you add the Amazon Security Lake destination, the OCSF processor is automatically added so that you can convert your logs to Parquet before they are sent to Amazon Security Lake. See [Remap to OCSF documentation][10081] for setup instructions. diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md index 065c352d6d7a7..d4cac0b51f9d3 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/chronicle.en.md @@ -6,6 +6,9 @@ To set up the Worker's Google Chronicle destination: - The Worker uses standard [Google authentication methods][10005]. 1. Select **JSON** or **Raw** encoding in the dropdown menu. 1. Enter the log type. See [template syntax][10002] if you want to route logs to different log types based on specific fields in your logs. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. **Note**: Logs sent to the Google Chronicle destination must have ingestion labels. For example, if the logs are from a A10 load balancer, it must have the ingestion label `A10_LOAD_BALANCER`. See Google Cloud's [Support log types with a default parser][10003] for a list of available log types and their respective ingestion labels. diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/crowdstrike_ng_siem.md b/layouts/shortcodes/observability_pipelines/destination_settings/crowdstrike_ng_siem.md index 11f381a20e7db..1795af3ef3248 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/crowdstrike_ng_siem.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/crowdstrike_ng_siem.md @@ -6,6 +6,9 @@ To use the CrowdStrike NG-SIEM destination, you need to set up a CrowdStrike dat - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10171]: https://falcon.us-2.crowdstrike.com/documentation/page/bdded008/hec-http-event-connector-guide [10172]: /observability_pipelines/advanced_configurations/ diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog.en.md index 06f7317387c25..10f8e2ee59133 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog.en.md @@ -1,3 +1,5 @@
Observability Pipelines compresses logs with the zstd (level 1) algorithm.
-There are no configuration steps for your Datadog destination. \ No newline at end of file +Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_amazon_s3.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_amazon_s3.en.md index c35f80fbda616..99fba294f2500 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_amazon_s3.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_amazon_s3.en.md @@ -16,6 +16,9 @@ 1. Enter the ARN of the IAM role you want to assume. 1. Optionally, enter the assumed role session name and external ID. - **Note:** The [user or role you created earlier][10054] must have permission to assume this role so that the Worker can authenticate with AWS. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. #### Example destination and log archive setup diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_azure_storage.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_azure_storage.en.md index d9b9b65bd9395..ca99d1a91fc98 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_azure_storage.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_azure_storage.en.md @@ -1,6 +1,9 @@ 1. Enter the name of the Azure container you created earlier. -2. Optionally, enter a prefix. +1. Optionally, enter a prefix. - Prefixes are useful for partitioning objects. For example, you can use a prefix as an object key to store objects under a particular directory. If using a prefix for this purpose, it must end in `/` to act as a directory path; a trailing `/` is not automatically added. - See [template syntax][10051] if you want to route logs to different object keys based on specific fields in your logs. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md index ea085b09c2fdb..354c515081665 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/datadog_archives_google_cloud_storage.en.md @@ -8,6 +8,9 @@ - Prefixes are useful for partitioning objects. For example, you can use a prefix as an object key to store objects under a particular directory. If using a prefix for this purpose, it must end in `/` to act as a directory path; a trailing `/` is not automatically added. - See [template syntax][10051] if you want to route logs to different object keys based on specific fields in your logs. 1. Optionally, click **Add Header** to add metadata. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax [10052]: https://cloud.google.com/docs/authentication#auth-flowchart diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/elasticsearch.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/elasticsearch.en.md index f4adb00c7f029..34753a74d7371 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/elasticsearch.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/elasticsearch.en.md @@ -1,6 +1,9 @@ The following fields are optional: 1. Enter the name for the Elasticsearch index. See [template syntax][10051] if you want to route logs to different indexes based on specific fields in your logs. -2. Enter the Elasticsearch version. +1. Enter the Elasticsearch version. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/microsoft_sentinel.md b/layouts/shortcodes/observability_pipelines/destination_settings/microsoft_sentinel.md index 69afa53e03c92..6657207af6977 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/microsoft_sentinel.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/microsoft_sentinel.md @@ -33,6 +33,9 @@ To set up the Microsoft Sentinel destination, you need to create a Workspace in 1. On the Members page, select **User, group, or service principal**. 1. Click **Select Members** and search for the application you created in the app registration step. 1. Click **Review + Assign**. **Note**: It can take up to 10 minutes for the IAM change to take effect. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. The table below summarizes the Azure and Microsoft Sentinel information you need when you [set up the Observability Pipelines Microsoft Sentinel destination](#set-up-the-destination-in-observability-pipelines): diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/new_relic.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/new_relic.en.md index 3169356e3295c..a0b790f5292e4 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/new_relic.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/new_relic.en.md @@ -1 +1,4 @@ -Select the data center region (**US** or **EU**) of your New Relic account. \ No newline at end of file +1. Select the data center region (**US** or **EU**) of your New Relic account. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/opensearch.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/opensearch.en.md index 956acd7e2dd41..553bf7f5119d2 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/opensearch.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/opensearch.en.md @@ -1,3 +1,6 @@ -Optionally, enter the name of the OpenSearch index. See [template syntax][10051] if you want to route logs to different indexes based on specific fields in your logs. +1. Optionally, enter the name of the OpenSearch index. See [template syntax][10051] if you want to route logs to different indexes based on specific fields in your logs. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/sentinelone.md b/layouts/shortcodes/observability_pipelines/destination_settings/sentinelone.md index 80337a958a046..bedd429a578ac 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/sentinelone.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/sentinelone.md @@ -1 +1,4 @@ -Select your SentinelOne logs environment in the dropdown menu. \ No newline at end of file +1. Select your SentinelOne logs environment in the dropdown menu. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/socket.md b/layouts/shortcodes/observability_pipelines/destination_settings/socket.md index 86434de193a0a..769b957acac8e 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/socket.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/socket.md @@ -4,3 +4,6 @@ - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/splunk_hec.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/splunk_hec.en.md index bc6c439bb59b6..f263c2401af4b 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/splunk_hec.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/splunk_hec.en.md @@ -4,5 +4,8 @@ The following fields are optional: 1. Enter the name of the Splunk index you want your data in. This has to be an allowed index for your HEC. See [template syntax][10051] if you want to route logs to different indexes based on specific fields in your logs. 1. Select whether the timestamp should be auto-extracted. If set to `true`, Splunk extracts the timestamp from the message with the expected format of `yyyy-mm-dd hh:mm:ss`. 1. Optionally, set the `sourcetype` to override Splunk's default value, which is `httpevent` for HEC data. See [template syntax][10051] if you want to route logs to different source types based on specific fields in your logs. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. [10051]: /observability_pipelines/destinations/#template-syntax \ No newline at end of file diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/sumo_logic.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/sumo_logic.en.md index 92758abeff24e..08ad25bf0d8e9 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/sumo_logic.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/sumo_logic.en.md @@ -5,3 +5,6 @@ The following fields are optional: 1. Enter a **host name** to override the default `host` value configured for your Sumo Logic collector's source. 1. Enter a **category name** to override the default `category` value configured for your Sumo Logic collector's source. 1. Click **Add Header** to add any custom header fields and values. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. diff --git a/layouts/shortcodes/observability_pipelines/destination_settings/syslog.en.md b/layouts/shortcodes/observability_pipelines/destination_settings/syslog.en.md index 4bc2deac68e7e..a1a2f875a6f79 100644 --- a/layouts/shortcodes/observability_pipelines/destination_settings/syslog.en.md +++ b/layouts/shortcodes/observability_pipelines/destination_settings/syslog.en.md @@ -19,4 +19,7 @@ The following destination settings are optional: - `Server Certificate Path`: The path to the certificate file that has been signed by your Certificate Authority (CA) Root File in DER or PEM (X.509). - `CA Certificate Path`: The path to the certificate file that is your Certificate Authority (CA) Root File in DER or PEM (X.509). - `Private Key Path`: The path to the `.key` private key file that belongs to your Server Certificate Path in DER or PEM (PKCS#8) format. -1. Enter the number of seconds to wait before sending TCP keepalive probes on an idle connection. \ No newline at end of file +1. Enter the number of seconds to wait before sending TCP keepalive probes on an idle connection. +1. Optionally, toggle the switch to enable **Buffering Options**. If left disabled, the maximum size for buffering is 500 events. If enabled: + 1. Select the buffer type you want to set (**Memory** or **Disk**). + 1. Enter the buffer size and select the unit. \ No newline at end of file From dfb1185dcd3fb868216b576f246711dda5bce0df Mon Sep 17 00:00:00 2001 From: May Lee Date: Mon, 28 Jul 2025 12:17:16 -0400 Subject: [PATCH 17/23] revery go.mod go.sum --- go.mod | 8 +++----- go.sum | 8 ++++---- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 7a7b7f2c75e3b..7027d3bf6f7b4 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,10 @@ module documentation -go 1.24.4 - -toolchain go1.24.5 +go 1.14 require ( - github.com/DataDog/websites-modules v1.4.244 // indirect - github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa // indirect + github.com/DataDog/websites-modules v1.4.246 // indirect + github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e // indirect ) // replace github.com/DataDog/websites-modules => /Users/lisiane.turlure/guac/websites-modules diff --git a/go.sum b/go.sum index 671e29aea34d1..0ca633f81b32d 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/DataDog/websites-modules v1.4.244 h1:GbHeTzymRqqvTXwar24cHHui71vUMJmNwWDYQwYtxpg= -github.com/DataDog/websites-modules v1.4.244/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= -github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa h1:XWN3hFITXoC1B4pW5pwKYqlfuVCcyhZDAVAMs+1L7wo= -github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa/go.mod h1:DlDYkYoR/nhKYRur0+2jmGGc7ydk8Q0ybWkS6oaLVuo= +github.com/DataDog/websites-modules v1.4.246 h1:vTRuSLbzwZyadOzBRtroYMxm+PNbhRajWmL5h9DJYmQ= +github.com/DataDog/websites-modules v1.4.246/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= +github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e h1:F4Khh33ikLmXpHrBaz4vgoBusJhjQGVn/iavRxGsXiU= +github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= From a95e41e08d104a54f671a75e30c82e97840c9b88 Mon Sep 17 00:00:00 2001 From: May Lee Date: Mon, 28 Jul 2025 12:19:53 -0400 Subject: [PATCH 18/23] try revert for go files again --- go.mod | 8 +++++--- go.sum | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 7027d3bf6f7b4..7a7b7f2c75e3b 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,12 @@ module documentation -go 1.14 +go 1.24.4 + +toolchain go1.24.5 require ( - github.com/DataDog/websites-modules v1.4.246 // indirect - github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e // indirect + github.com/DataDog/websites-modules v1.4.244 // indirect + github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa // indirect ) // replace github.com/DataDog/websites-modules => /Users/lisiane.turlure/guac/websites-modules diff --git a/go.sum b/go.sum index 0ca633f81b32d..671e29aea34d1 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/DataDog/websites-modules v1.4.246 h1:vTRuSLbzwZyadOzBRtroYMxm+PNbhRajWmL5h9DJYmQ= -github.com/DataDog/websites-modules v1.4.246/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= -github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e h1:F4Khh33ikLmXpHrBaz4vgoBusJhjQGVn/iavRxGsXiU= -github.com/DataDog/websites-sources v0.0.0-20250606082516-6e5a26ff9d0e/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= +github.com/DataDog/websites-modules v1.4.244 h1:GbHeTzymRqqvTXwar24cHHui71vUMJmNwWDYQwYtxpg= +github.com/DataDog/websites-modules v1.4.244/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= +github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa h1:XWN3hFITXoC1B4pW5pwKYqlfuVCcyhZDAVAMs+1L7wo= +github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa/go.mod h1:DlDYkYoR/nhKYRur0+2jmGGc7ydk8Q0ybWkS6oaLVuo= From a1eb4cf24e1e8c62099d833190c89420cbad0517 Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 29 Jul 2025 11:17:25 -0400 Subject: [PATCH 19/23] update quota --- .../observability_pipelines/processors/quota.en.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/layouts/shortcodes/observability_pipelines/processors/quota.en.md b/layouts/shortcodes/observability_pipelines/processors/quota.en.md index 0baa35ed1e18b..a074f2a427d87 100644 --- a/layouts/shortcodes/observability_pipelines/processors/quota.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/quota.en.md @@ -1,6 +1,11 @@ -The quota processor measures the logging traffic for logs that match the filter you specify. When the configured daily quota is met inside the 24-hour rolling window, the processor can either drop additional logs or send an alert using a Datadog monitor. You can configure the processor to track the total volume or the total number of events. The pipeline uses the name of the quota to identify the quota across multiple Remote Configuration deployments of the Worker. +The quota processor measures the logging traffic for logs that match the filter you specify. When the configured daily quota is met inside the 24-hour rolling window, the processor can either keep or drop additional logs, or send them to a storage bucket. You can configure the processor to track the total volume or the total number of events. For example, you can configure this processor to drop new logs or trigger an alert without dropping logs after the processor has received 10 million events from a certain service in the last 24 hours. + +You can also use field-based partitioning, such as `service`, `env`, `status`. Each unique fields uses a separate quota bucket with its own daily quota limit. See [Partition example](#partition-example) for more information. + +**Notes**: +- Each Worker can have up to 1000 buckets. If you need to increase the bucket limit, contact your account manager. +- The pipeline uses the name of the quota to identify the quota across multiple Remote Configuration deployments of the Worker. -As an example, you can configure this processor to drop new logs or trigger an alert without dropping logs after the processor has received 10 million events from a certain service in the last 24 hours. To set up the quota processor: 1. Enter a name for the quota processor. @@ -9,10 +14,7 @@ To set up the quota processor: - Logs that do not match the quota filter are sent to the next step of the pipeline. 1. In the **Unit for quota** dropdown menu, select if you want to measure the quota by the number of `Events` or by the `Volume` in bytes. 1. Set the daily quota limit and select the unit of magnitude for your desired quota. -1. Check the **Drop events** checkbox if you want to drop all events when your quota is met. Leave it unchecked if you plan to set up a [monitor][5001] that sends an alert when the quota is met. - - If logs that match the quota filter are received after the daily quota has been met and the **Drop events** option is selected, then those logs are dropped. In this case, only logs that did not match the filter query are sent to the next step in the pipeline. - - If logs that match the quota filter are received after the daily quota has been met and the **Drop events** option is not selected, then those logs and the logs that did not match the filter query are sent to the next step in the pipeline. -1. Optional: Click **Add Field** if you want to set a quota on a specific service or region field. +1. Optional, Click **Add Field** if you want to set a quota on a specific service or region field. a. Enter the field name you want to partition by. See the [Partition example](#partition-example) for more information. i. Select the **Ignore when missing** if you want the quota applied only to events that match the partition. See the [Ignore when missing example](#example-for-the-ignore-when-missing-option) for more information. ii. Optional: Click **Overrides** if you want to set different quotas for the partitioned field. From f8c894614bfd839752e2c67e4e7605bd43268d74 Mon Sep 17 00:00:00 2001 From: May Lee Date: Tue, 29 Jul 2025 11:19:47 -0400 Subject: [PATCH 20/23] update quota --- .../shortcodes/observability_pipelines/processors/quota.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layouts/shortcodes/observability_pipelines/processors/quota.en.md b/layouts/shortcodes/observability_pipelines/processors/quota.en.md index a074f2a427d87..bf7025b69e84a 100644 --- a/layouts/shortcodes/observability_pipelines/processors/quota.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/quota.en.md @@ -1,4 +1,4 @@ -The quota processor measures the logging traffic for logs that match the filter you specify. When the configured daily quota is met inside the 24-hour rolling window, the processor can either keep or drop additional logs, or send them to a storage bucket. You can configure the processor to track the total volume or the total number of events. For example, you can configure this processor to drop new logs or trigger an alert without dropping logs after the processor has received 10 million events from a certain service in the last 24 hours. +The quota processor measures the logging traffic for logs that match the filter you specify. When the configured daily quota is met inside the 24-hour rolling window, the processor can either keep or drop additional logs, or send them to a storage bucket. For example, you can configure this processor to drop new logs or trigger an alert without dropping logs after the processor has received 10 million events from a certain service in the last 24 hours. You can also use field-based partitioning, such as `service`, `env`, `status`. Each unique fields uses a separate quota bucket with its own daily quota limit. See [Partition example](#partition-example) for more information. From c29af355f0a99ad60b5a8bb115202f2410fe7934 Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 30 Jul 2025 15:10:40 -0400 Subject: [PATCH 21/23] update processors --- .../en/observability_pipelines/processors/_index.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index 6adb899c3a015..aa608b69774a8 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -13,15 +13,17 @@ further_reading: Use Observability Pipelines' processors to parse, structure, and enrich your logs. When you create a pipeline in the UI, pre-selected processors are added to your processor group based on the selected template. You can add additional processors and delete any existing ones based on your processing needs. +Select a processor in the left navigation menu to see more information about it. + +## Processor groups + +
Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later.
+ You can organize your processors into logical groups to help you manage them. Each processor group has a Group Filter so that those processors are only applied to specific logs. For example, if you want the group processors to only process logs coming from `vpc`, then use the group filter `source:vpc`. You can also add filters for each individual processor. Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To change the order of the processors, use the drag handle on the top left corner of the processor you want to move. -**Notes**: -- Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later. -- There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. - -Select a processor in the left navigation menu to see more information about it. +**Note**: There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. {{% observability_pipelines/processors/filter_syntax %}} From a2257df70cc66c16ca6880b16c4ce376d8f7f15c Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 30 Jul 2025 16:26:32 -0400 Subject: [PATCH 22/23] get rid of go.mod --- go.mod | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.mod b/go.mod index 7a7b7f2c75e3b..a55f067784900 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module documentation go 1.24.4 -toolchain go1.24.5 - require ( github.com/DataDog/websites-modules v1.4.244 // indirect github.com/DataDog/websites-sources v0.0.0-20250725130558-4b13439577aa // indirect From cbcf96aac49fd113fb069bd7c8526afea781f58d Mon Sep 17 00:00:00 2001 From: May Lee Date: Wed, 6 Aug 2025 12:07:13 -0400 Subject: [PATCH 23/23] revert processor groups updates --- .../observability_pipelines/processors/_index.md | 12 ++---------- .../set_up_pipelines/_index.md | 14 -------------- 2 files changed, 2 insertions(+), 24 deletions(-) diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index aa608b69774a8..b420aa91e6a08 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -13,17 +13,9 @@ further_reading: Use Observability Pipelines' processors to parse, structure, and enrich your logs. When you create a pipeline in the UI, pre-selected processors are added to your processor group based on the selected template. You can add additional processors and delete any existing ones based on your processing needs. -Select a processor in the left navigation menu to see more information about it. - -## Processor groups - -
Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later.
+Processor groups are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To modify the order of the processors, use the drag handle on the top left corner of the processor you want to move. -You can organize your processors into logical groups to help you manage them. Each processor group has a Group Filter so that those processors are only applied to specific logs. For example, if you want the group processors to only process logs coming from `vpc`, then use the group filter `source:vpc`. You can also add filters for each individual processor. - -Processor groups and the processors within each group are executed from top to bottom. The order of the processors is important because logs are checked by each processor, but only logs that match the processor's filters are processed. To change the order of the processors, use the drag handle on the top left corner of the processor you want to move. - -**Note**: There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. +Select a processor in the left navigation menu to see more information about it. {{% observability_pipelines/processors/filter_syntax %}} diff --git a/content/en/observability_pipelines/set_up_pipelines/_index.md b/content/en/observability_pipelines/set_up_pipelines/_index.md index c712ade38f3a4..08596d2f65786 100644 --- a/content/en/observability_pipelines/set_up_pipelines/_index.md +++ b/content/en/observability_pipelines/set_up_pipelines/_index.md @@ -43,20 +43,6 @@ Set up your pipelines and its sources, processors, and destinations in the Obser 1. Select and set up your [source][1]. 1. Select and set up your [destinations][2]. 1. Set up your [processors][3]. - - If you want to copy a processor, click the copy icon for that processor and then use `command-v` to paste it. -1. If you want to add another group of processors for a destination: - 1. Click the plus sign (**+**) at the bottom of the existing processor group. - 1. Click the name of the processor group to update it. - 1. Optionally, enter a group filter. See [Filter Syntax](#filter-query-syntax) for more information. - 1. Click **Add** to add processors to the group. - 1. If you want to copy all the processors in a group and paste them into the same processor group or a different group: - 1. Click the three dots on the processor group. - 1. Select **Copy all processors**. - 1. Select the processor group you want to paste the processors into and then use `command-v` to paste them. - 1. You can toggle the switch to enable and disable the processor group and also each individual processor. -
**Notes**: -
- Configuring a pipeline with processor groups is available for Worker versions 2.7 and later. -
- There is a limit of 10 processor groups for a pipeline canvas. 1. If you want to add another set of processors and destinations, click the plus sign (**+**) to the left of the processor group to add another set of processors and destinations to the source. - To delete a processor group, you need to delete all destinations linked to that processor group. When the last destination is deleted, the processor group is removed with it. 1. If you want to add an additional destination to a processor group, click the plus sign (**+**) to the right of the processor group.