From 68e930ec51f5b151807ddccd1298f078aec121d3 Mon Sep 17 00:00:00 2001 From: cnp-autobot <85171364+cnp-autobot@users.noreply.github.com> Date: Fri, 23 May 2025 15:01:50 +0000 Subject: [PATCH] Sync EnterpriseDB/cloud-native-postgres product/pg4k/v1.22.10 --- .../1/applications.mdx | 2 + .../1/architecture.mdx | 11 +- .../docs/postgres_for_kubernetes/1/backup.mdx | 10 + .../1/backup_barmanobjectstore.mdx | 5 + .../1/backup_recovery.mdx | 12 +- .../1/backup_volumesnapshot.mdx | 2 + .../1/before_you_start.mdx | 2 + .../1/benchmarking.mdx | 4 +- .../postgres_for_kubernetes/1/bootstrap.mdx | 32 +- .../1/certificates.mdx | 2 + .../1/cluster_conf.mdx | 2 + .../1/cncf-projects/external-secrets.mdx | 267 + .../1/connection_pooling.mdx | 2 + .../1/container_images.mdx | 2 + .../postgres_for_kubernetes/1/controller.mdx | 2 + .../1/database_import.mdx | 6 +- .../1/declarative_hibernation.mdx | 4 +- .../1/declarative_role_management.mdx | 2 + .../postgres_for_kubernetes/1/failover.mdx | 2 + .../1/failure_modes.mdx | 2 + .../docs/postgres_for_kubernetes/1/faq.mdx | 2 + .../postgres_for_kubernetes/1/fencing.mdx | 2 + .../1/installation_upgrade.mdx | 8 +- .../1/instance_manager.mdx | 2 + .../1/kubectl-plugin.mdx | 30 +- .../1/kubernetes_upgrade.mdx | 2 + .../1/labels_annotations.mdx | 53 +- .../postgres_for_kubernetes/1/logging.mdx | 2 + .../postgres_for_kubernetes/1/monitoring.mdx | 118 +- .../postgres_for_kubernetes/1/networking.mdx | 2 + .../1/object_stores.mdx | 2 + .../postgres_for_kubernetes/1/openshift.mdx | 89 +- .../1/operator_capability_levels.mdx | 2 + .../1/operator_conf.mdx | 2 + .../1/pg4k.v1/v1.22.10.mdx | 5905 +++++++++++++++++ .../1/postgresql_conf.mdx | 12 +- .../postgres_for_kubernetes/1/quickstart.mdx | 60 +- .../postgres_for_kubernetes/1/recovery.mdx | 2 + .../1/replica_cluster.mdx | 11 +- .../postgres_for_kubernetes/1/replication.mdx | 2 + .../1/resource_management.mdx | 2 + .../1/rolling_update.mdx | 2 + .../postgres_for_kubernetes/1/samples.mdx | 2 + .../1/samples/cluster-example-full.yaml | 2 +- .../1/samples/k9s/plugins.yml | 16 +- .../samples/monitoring/kube-stack-config.yaml | 9 +- .../postgres_for_kubernetes/1/scheduling.mdx | 4 +- .../1/ssl_connections.mdx | 4 +- .../postgres_for_kubernetes/1/storage.mdx | 2 + .../postgres_for_kubernetes/1/tablespaces.mdx | 12 +- .../docs/postgres_for_kubernetes/1/tde.mdx | 68 +- .../1/troubleshooting.mdx | 6 +- .../postgres_for_kubernetes/1/use_cases.mdx | 2 + .../1/wal_archiving.mdx | 2 + 54 files changed, 6598 insertions(+), 216 deletions(-) create mode 100644 product_docs/docs/postgres_for_kubernetes/1/cncf-projects/external-secrets.mdx create mode 100644 product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.22.10.mdx diff --git a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx index 84cf029a86c..32661da0a96 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx @@ -3,6 +3,8 @@ title: 'Connecting from an application' originalFilePath: 'src/applications.md' --- + + Applications are supposed to work with the services created by EDB Postgres for Kubernetes in the same Kubernetes cluster. diff --git a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx index 55a96d5237b..cc6139de634 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx @@ -3,11 +3,10 @@ title: 'Architecture' originalFilePath: 'src/architecture.md' --- -!!! Hint - For a deeper understanding, we recommend reading our article on the CNCF - blog post titled ["Recommended Architectures for PostgreSQL in Kubernetes"](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/), - which provides valuable insights into best practices and design - considerations for PostgreSQL deployments in Kubernetes. + + +This section covers the main architectural aspects you need to consider +when deploying PostgreSQL in Kubernetes. This documentation page provides an overview of the key architectural considerations for implementing a robust business continuity strategy when @@ -138,7 +137,7 @@ the [replica cluster feature](replica_cluster.md)). ![Example of a Kubernetes architecture with only 2 data centers](./images/k8s-architecture-2-az.png) !!! Hint - If you are at en early stage of your Kubernetes journey, please share this + If you are at an early stage of your Kubernetes journey, please share this document with your infrastructure team. The two data centers setup might be simply the result of a "lift-and-shift" transition to Kubernetes from a traditional bare-metal or VM based infrastructure, and the benefits diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx index 063a44186e9..2fc0c036e12 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx @@ -3,6 +3,16 @@ title: 'Backup' originalFilePath: 'src/backup.md' --- + + +!!! Important + With version 1.21, backup and recovery capabilities in EDB Postgres for Kubernetes + have sensibly changed due to the introduction of native support for + [Kubernetes Volume Snapshots](backup_volumesnapshot.md). + Up to that point, backup and recovery were available only for object + stores. Please carefully read this section and the [recovery](recovery.md) + one if you have been a user of EDB Postgres for Kubernetes 1.15 through 1.20. + PostgreSQL natively provides first class backup and recovery capabilities based on file system level (physical) copy. These have been successfully used for more than 15 years in mission critical production databases, helping diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx index 29420b58970..48e751b0b76 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx @@ -3,6 +3,8 @@ title: 'Backup on object stores' originalFilePath: 'src/backup_barmanobjectstore.md' --- + + EDB Postgres for Kubernetes natively supports **online/hot backup** of PostgreSQL clusters through continuous physical backup and WAL archiving on an object store. This means that the database is always up (no downtime required) @@ -96,7 +98,10 @@ algorithms via `barman-cloud-backup` (for backups) and - bzip2 - gzip +- lz4 - snappy +- xz +- zstd The compression settings for backups and WALs are independent. See the [DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx index 1ee141ade78..2fbcc6d51b2 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx @@ -3,4 +3,14 @@ title: 'Backup and Recovery' originalFilePath: 'src/backup_recovery.md' --- -[Backup](backup.md) and [recovery](recovery.md) are in two separate sections. + + +Until EDB Postgres for Kubernetes 1.20, this page used to contain both the backup and +recovery phases of a PostgreSQL cluster. The reason was that EDB Postgres for Kubernetes +supported only backup and recovery object stores. + +Version 1.21 introduces support for the Kubernetes `VolumeSnapshot` API, +providing more possibilities for the end user. + +As a result, [backup](backup.md) and [recovery](recovery.md) are now in two +separate sections. diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx index 3a085374575..c6ff4a60cbf 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx @@ -3,6 +3,8 @@ title: 'Backup on volume snapshots' originalFilePath: 'src/backup_volumesnapshot.md' --- + + !!! Warning As noted in the [backup document](backup.md), a cold snapshot explicitly set to target the primary will result in the primary being fenced for diff --git a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx index b24598570b2..c651d02845a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx @@ -3,6 +3,8 @@ title: 'Before You Start' originalFilePath: 'src/before_you_start.md' --- + + Before we get started, it is essential to go over some terminology that is specific to Kubernetes and PostgreSQL. diff --git a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx index ceb73331d5e..ea29f56f8d1 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx @@ -3,6 +3,8 @@ title: 'Benchmarking' originalFilePath: 'src/benchmarking.md' --- + + The CNP kubectl plugin provides an easy way for benchmarking a PostgreSQL deployment in Kubernetes using EDB Postgres for Kubernetes. Benchmarking is focused on two aspects: @@ -177,7 +179,7 @@ It will: 3. Create a fio deployment composed by a single Pod, which will run fio on the PVC, create graphs after completing the benchmark and start serving the generated files with a webserver. We use the - [`fio-tools`](https://github.com/wallnerryan/fio-tools`) image for that. + [`fio-tools`](https://github.com/wallnerryan/fio-tools) image for that. The Pod created by the deployment will be ready when it starts serving the results. You can forward the port of the pod created by the deployment diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx index 3db31d60291..885e038c059 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx @@ -3,6 +3,8 @@ title: 'Bootstrap' originalFilePath: 'src/bootstrap.md' --- + + !!! Note When referring to "PostgreSQL cluster" in this section, the same concepts apply to both PostgreSQL and EDB Postgres Advanced Server, unless @@ -33,7 +35,7 @@ For more detailed information about this feature, please refer to the EDB Postgres for Kubernetes requires both the `postgres` user and database to always exist. Using the local Unix Domain Socket, it needs to connect as the `postgres` user to the `postgres` database via `peer` authentication in - order to perform administrative tasks on the cluster. + order to perform administrative tasks on the cluster. **DO NOT DELETE** the `postgres` user or the `postgres` database!!! !!! Info @@ -498,7 +500,23 @@ or a standby PostgreSQL server. It’s crucial to thoroughly review the requirements section below, as the pros and cons of PostgreSQL physical replication fully apply. -The primary use cases for this method include: +- Reporting and business intelligence clusters that need to be regenerated + periodically (daily, weekly) +- Test databases containing live data that require periodic regeneration + (daily, weekly, monthly) and anonymization +- Rapid spin-up of a standalone replica cluster +- Physical migrations of EDB Postgres for Kubernetes clusters to different namespaces or + Kubernetes clusters + +!!! Important + Avoid using this method, based on physical replication, to migrate an + existing PostgreSQL cluster outside of Kubernetes into EDB Postgres for Kubernetes, unless you + are completely certain that all [requirements](#requirements) are met and + the operation has been + thoroughly tested. The EDB Postgres for Kubernetes community does not endorse this approach + for such use cases, and recommends using logical import instead. It is + exceedingly rare that all requirements for physical replication are met in a + way that seamlessly works with EDB Postgres for Kubernetes. - Reporting and business intelligence clusters that need to be regenerated periodically (daily, weekly) @@ -621,7 +639,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 17.4 cluster, +The following manifest creates a new PostgreSQL 17.5 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -636,7 +654,7 @@ metadata: name: target-db spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:17.4 + imageName: quay.io/enterprisedb/postgresql:17.5 bootstrap: pg_basebackup: @@ -656,7 +674,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 17.4). +the same PostgreSQL version (in our case 17.5). #### TLS certificate authentication @@ -671,7 +689,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 17.4 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.5 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -686,7 +704,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:17.4 + imageName: quay.io/enterprisedb/postgresql:17.5 bootstrap: pg_basebackup: diff --git a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx index e19687881f4..34df751a467 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx @@ -3,6 +3,8 @@ title: 'Certificates' originalFilePath: 'src/certificates.md' --- + + EDB Postgres for Kubernetes was designed to natively support TLS certificates. To set up a cluster, the operator requires: diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx index 8b550eb893d..6c104e5ecbf 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx @@ -3,6 +3,8 @@ title: 'Instance pod configuration' originalFilePath: 'src/cluster_conf.md' --- + + ## Projected volumes EDB Postgres for Kubernetes supports mounting custom files inside the Postgres pods through diff --git a/product_docs/docs/postgres_for_kubernetes/1/cncf-projects/external-secrets.mdx b/product_docs/docs/postgres_for_kubernetes/1/cncf-projects/external-secrets.mdx new file mode 100644 index 00000000000..4d7b3a1b355 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/cncf-projects/external-secrets.mdx @@ -0,0 +1,267 @@ +--- +title: 'External Secrets' +originalFilePath: 'src/cncf-projects/external-secrets.md' +--- + +[External Secrets](https://external-secrets.io/latest/) is a CNCF Sandbox +project, accepted in 2022 under the sponsorship of TAG Security. + +## About + +The **External Secrets Operator (ESO)** is a Kubernetes operator that enhances +secret management by decoupling the storage of secrets from Kubernetes itself. +It enables seamless synchronization between external secret management systems +and native Kubernetes `Secret` resources. + +ESO supports a wide range of backends, including: + +- [HashiCorp Vault](https://www.vaultproject.io/) +- [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) +- [Google Secret Manager](https://cloud.google.com/secret-manager) +- [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) +- [IBM Cloud Secrets Manager](https://www.ibm.com/cloud/secrets-manager) + +…and many more. For a full and up-to-date list of supported providers, refer to +the [official External Secrets documentation](https://external-secrets.io/latest/). + +## Integration with PostgreSQL and EDB Postgres for Kubernetes + +When it comes to PostgreSQL databases, External Secrets integrates seamlessly +with [EDB Postgres for Kubernetes](https://cloudnative-pg.io/) in two major use cases: + +- **Automated password management:** ESO can handle the automatic generation + and rotation of database user passwords stored in Kubernetes `Secret` + resources, ensuring that applications running inside the cluster always have + access to up-to-date credentials. + +- **Cross-platform secret access:** It enables transparent synchronization of + those passwords with an external Key Management Service (KMS) via a + `SecretStore` resources. This allows applications and developers outside the + Kubernetes cluster—who may not have access to Kubernetes secrets—to retrieve + the database credentials directly from the external KMS. + +## Example: Automated Password Management with External Secrets + +Let’s walk through how to automatically rotate the password of the `app` user +every 24 hours in the `cluster-example` Postgres cluster from the +[quickstart guide](../quickstart.md#part-3-deploy-a-postgresql-cluster). + +!!! Important + Before proceeding, ensure that the `cluster-example` Postgres cluster is up + and running in your environment. + +By default, EDB Postgres for Kubernetes generates and manages a Kubernetes `Secret` named +`cluster-example-app`, which contains the credentials for the `app` user in the +`cluster-example` cluster. You can read more about this in the +[“Connecting from an application” section](../applications.md#secrets). + +With External Secrets, the goal is to: + +1. Define a `Password` generator that specifies how to generate the password. +2. Create an `ExternalSecret` resource that keeps the `cluster-example-app` + secret in sync by updating only the `password` and `pgpass` fields. + +### Creating the Password Generator + +The following example creates a +[`Password` generator](https://external-secrets.io/main/api/generator/password/) +resource named `pg-password-generator` in the `default` namespace. You can +customize the name and properties to suit your needs: + +```yaml +apiVersion: generators.external-secrets.io/v1alpha1 +kind: Password +metadata: + name: pg-password-generator +spec: + length: 42 + digits: 5 + symbols: 5 + symbolCharacters: "-_$@" + noUpper: false + allowRepeat: true +``` + +This specification defines the characteristics of the generated password, +including its length and the inclusion of digits, symbols, and uppercase +letters. + +### Creating the External Secret + +The example below creates an `ExternalSecret` resource named +`cluster-example-app-secret`, which refreshes the password every 24 hours. It +uses a `Merge` policy to update only the specified fields (`password`, `pgpass`, +`jdbc-uri` and `uri`) in the `cluster-example-app` secret. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cluster-example-app-secret +spec: + refreshInterval: "24h" + target: + name: cluster-example-app + creationPolicy: Merge + template: + metadata: + labels: + k8s.enterprisedb.io/reload: "true" + data: + password: "{{ .password }}" + pgpass: "cluster-example-rw:5432:app:app:{{ .password }}" + jdbc-uri: "jdbc:postgresql://cluster-example-rw.default:5432/app?password={{ .password }}&user=app" + uri: "postgresql://app:{{ .password }}@cluster-example-rw.default:5432/app" + dataFrom: + - sourceRef: + generatorRef: + apiVersion: generators.external-secrets.io/v1alpha1 + kind: Password + name: pg-password-generator +``` + +The label `k8s.enterprisedb.io/reload: "true"` ensures that EDB Postgres for Kubernetes triggers a reload +of the user password in the database when the secret changes. + +### Verifying the Configuration + +To check that the `ExternalSecret` is correctly synchronizing: + +```sh +kubectl get es cluster-example-app-secret +``` + +To observe the password being refreshed in real time, temporarily reduce the +`refreshInterval` to `30s` and run the following command repeatedly: + +```sh +kubectl get secret cluster-example-app \ + -o jsonpath="{.data.password}" | base64 -d +``` + +You should see the password change every 30 seconds, confirming that the +rotation is working correctly. + +### There's More + +While the example above focuses on the default `cluster-example-app` secret +created by EDB Postgres for Kubernetes, the same approach can be extended to manage any +custom secrets or PostgreSQL users you create to regularly rotate their +password. + +## Example: Integration with an External KMS + +One of the most widely used Key Management Service (KMS) providers in the CNCF +ecosystem is [HashiCorp Vault](https://www.vaultproject.io/). Although Vault is +licensed under the Business Source License (BUSL), a fully compatible and +actively maintained open source alternative is available: [OpenBao](https://openbao.org/). +OpenBao supports all the same interfaces as HashiCorp Vault, making it a true +drop-in replacement. + +In this example, we'll demonstrate how to integrate EDB Postgres for Kubernetes, +External Secrets Operator, and HashiCorp Vault to automatically rotate +a PostgreSQL password and securely store it in Vault. + +!!! Important + This example assumes that HashiCorp Vault is already installed and properly + configured in your environment, and that your team has the necessary expertise + to operate it. There are various ways to deploy Vault, and detailing them is + outside the scope of EDB Postgres for Kubernetes. While it's possible to run Vault inside + Kubernetes, it is more commonly deployed externally. For detailed instructions, + consult the [HashiCorp Vault documentation](https://www.vaultproject.io/docs). + +Continuing from the previous example, we will now create the necessary +`SecretStore` and `PushSecret` resources to complete the integration with +Vault. + +### Creating the `SecretStore` + +In this example, we assume that HashiCorp Vault is accessible from within the +namespace at `http://vault.vault.svc:8200`, and that a Kubernetes `Secret` +named `vault-token` exists in the same namespace, containing the token used to +authenticate with Vault. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: "http://vault.vault.svc:8200" + path: "secrets" + # Specifies the Vault KV secret engine version ("v1" or "v2"). + # Defaults to "v2" if not set. + version: "v2" + auth: + # References a Kubernetes Secret that contains the Vault token. + # See: https://www.vaultproject.io/docs/auth/token + tokenSecretRef: + name: "vault-token" + key: "token" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vault-token +data: + token: aHZzLioqKioqKio= # hvs.******* +``` + +This configuration creates a `SecretStore` resource named `vault-backend`. + +!!! Important + This example uses basic token-based authentication, which is suitable for + testing API, and CLI use cases. While it is the default method enabled in + Vault, it is not recommended for production environments. For production, + consider using more secure authentication methods. + Refer to the [External Secrets Operator documentation](https://external-secrets.io/latest/provider/hashicorp-vault/) + for a full list of supported authentication mechanisms. + +!!! Info + HashiCorp Vault must have a KV secrets engine enabled at the `secrets` path + with version `v2`. If your Vault instance uses a different path or + version, be sure to update the `path` and `version` fields accordingly. + +### Creating the `PushSecret` + +The `PushSecret` resource is used to push a Kubernetes `Secret` to HashiCorp +Vault. In this simplified example, we'll push the credentials for the `app` +user of the sample cluster `cluster-example`. + +For more details on configuring `PushSecret`, refer to the +[External Secrets Operator documentation](https://external-secrets.io/latest/api/pushsecret/). + +```yaml +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: pushsecret-example +spec: + deletionPolicy: Delete + refreshInterval: 24h + secretStoreRefs: + - name: vault-backend + kind: SecretStore + selector: + secret: + name: cluster-example-app + data: + - match: + remoteRef: + remoteKey: cluster-example-app +``` + +In this example, the `PushSecret` resource instructs the External Secrets +Operator to push the Kubernetes `Secret` named `cluster-example-app` to +HashiCorp Vault (from the previous example). The `remoteKey` defines the name +under which the secret will be stored in Vault, using the `SecretStore` named +`vault-backend`. + +### Verifying the Configuration + +To verify that the `PushSecret` is functioning correctly, navigate to the +HashiCorp Vault UI. In the `kv` secrets engine at the path `secrets`, you +should find a secret named `cluster-example-app`, corresponding to the +`remoteKey` defined above. diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx index c14f5b52717..dd91458902e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx @@ -3,6 +3,8 @@ title: 'Connection pooling' originalFilePath: 'src/connection_pooling.md' --- + + EDB Postgres for Kubernetes provides native support for connection pooling with [PgBouncer](https://www.pgbouncer.org/), one of the most popular open source connection poolers for PostgreSQL, through the `Pooler` custom resource definition (CRD). diff --git a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx index 43442a066c0..5add2e1bd0e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx @@ -3,6 +3,8 @@ title: 'Container Image Requirements' originalFilePath: 'src/container_images.md' --- + + The EDB Postgres for Kubernetes operator for Kubernetes is designed to work with any compatible container image of PostgreSQL that complies with the following requirements: diff --git a/product_docs/docs/postgres_for_kubernetes/1/controller.mdx b/product_docs/docs/postgres_for_kubernetes/1/controller.mdx index f8b7ae9016e..4a4b65d6a39 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/controller.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/controller.mdx @@ -3,6 +3,8 @@ title: 'Custom Pod Controller' originalFilePath: 'src/controller.md' --- + + Kubernetes uses the [Controller pattern](https://kubernetes.io/docs/concepts/architecture/controller/) to align the current cluster state with the desired one. diff --git a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx index d203baffb82..e950fb415f9 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx @@ -3,6 +3,8 @@ title: 'Importing Postgres databases' originalFilePath: 'src/database_import.md' --- + + This section describes how to import one or more existing PostgreSQL databases inside a brand new EDB Postgres for Kubernetes cluster. @@ -20,8 +22,8 @@ As a result, the instructions in this section are suitable for both: - importing one or more databases from an existing PostgreSQL instance, even outside Kubernetes - importing the database from any PostgreSQL version to one that is either the - same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 11.x - to version 15.x) + same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 13.x + to version 17.x) !!! Warning When performing major upgrades of PostgreSQL you are responsible for making diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx index e23788fb088..35e7793ca0e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx @@ -3,6 +3,8 @@ title: 'Declarative hibernation' originalFilePath: 'src/declarative_hibernation.md' --- + + EDB Postgres for Kubernetes is designed to keep PostgreSQL clusters up, running and available anytime. @@ -61,7 +63,7 @@ $ kubectl cnp status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: quay.io/enterprisedb/postgresql:17.4 +PostgreSQL Image: quay.io/enterprisedb/postgresql:17.5 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx index 7144a67eefa..e8a35fa031e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx @@ -3,6 +3,8 @@ title: 'PostgreSQL Role Management' originalFilePath: 'src/declarative_role_management.md' --- + + From its inception, EDB Postgres for Kubernetes has managed the creation of specific roles required in PostgreSQL instances: diff --git a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx index 73f92b0bffc..bb5fa67f5d6 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx @@ -3,6 +3,8 @@ title: 'Automated failover' originalFilePath: 'src/failover.md' --- + + In the case of unexpected errors on the primary for longer than the `.spec.failoverDelay` (by default `0` seconds), the cluster will go into **failover mode**. This may happen, for example, when: diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx index b8186905157..35010038306 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx @@ -3,6 +3,8 @@ title: 'Failure Modes' originalFilePath: 'src/failure_modes.md' --- + + !!! Note In previous versions of EDB Postgres for Kubernetes, this page included specific failure scenarios. Since these largely follow standard Kubernetes behavior, we have diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx index ee91c65d1c4..f79a2b373c5 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx @@ -3,6 +3,8 @@ title: 'Frequently Asked Questions (FAQ)' originalFilePath: 'src/faq.md' --- + + ## Running PostgreSQL in Kubernetes **Everyone knows that stateful workloads like PostgreSQL cannot run in diff --git a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx index c35e05abb82..0ba6a957961 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx @@ -3,6 +3,8 @@ title: 'Fencing' originalFilePath: 'src/fencing.md' --- + + Fencing in EDB Postgres for Kubernetes is the ultimate process of protecting the data in one, more, or even all instances of a PostgreSQL cluster when they appear to be malfunctioning. When an instance is fenced, the PostgreSQL server diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx index eafb229b16a..cf43678272f 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx @@ -3,6 +3,8 @@ title: 'Installation and upgrades' originalFilePath: 'src/installation_upgrade.md' --- + + !!! Seealso "OpenShift" For instructions on how to install Cloud Native PostgreSQL on Red Hat OpenShift Container Platform, please refer to the ["OpenShift"](openshift.md) @@ -79,14 +81,14 @@ through a YAML manifest applied via `kubectl`. There are two different manifests available depending on your subscription plan: -- Standard: The [latest standard operator manifest](https://get.enterprisedb.io/pg4k/pg4k-standard-1.25.1.yaml). -- Enterprise: The [latest enterprise operator manifest](https://get.enterprisedb.io/pg4k/pg4k-enterprise-1.25.1.yaml). +- Standard: The [latest standard operator manifest](https://get.enterprisedb.io/pg4k/pg4k-standard-1.22.10.yaml). +- Enterprise: The [latest enterprise operator manifest](https://get.enterprisedb.io/pg4k/pg4k-enterprise-1.22.10.yaml). You can install the manifest for the latest version of the operator by running: ```sh kubectl apply --server-side -f \ - https://get.enterprisedb.io/pg4k/pg4k-$EDB_SUBSCRIPTION_PLAN-1.25.1.yaml + https://get.enterprisedb.io/pg4k/pg4k-$EDB_SUBSCRIPTION_PLAN-1.22.10.yaml ``` You can verify that with: diff --git a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx index b529f086de6..e6794153d6c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx @@ -3,6 +3,8 @@ title: 'Postgres instance manager' originalFilePath: 'src/instance_manager.md' --- + + EDB Postgres for Kubernetes does not rely on an external tool for failover management. It simply relies on the Kubernetes API server and a native key component called: the **Postgres instance manager**. diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx index 517d6cb11ff..36929341495 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx @@ -35,11 +35,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.25.0 release of the plugin, for an Intel based +For example, let's install the 1.22.8 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.25.0/kubectl-cnp_1.25.0_linux_x86_64.deb \ +wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.8/kubectl-cnp_1.22.8_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -50,17 +50,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnp. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking kubectl-cnp (1.25.0) ... -Setting up kubectl-cnp (1.25.0) ... +Unpacking kubectl-cnp (1.22.8) ... +Setting up kubectl-cnp (1.22.8) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.25.0 release for an +As in the example for `.rpm` packages, let's install the 1.22.8 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.25.0/kubectl-cnp_1.25.0_linux_x86_64.rpm \ +curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.8/kubectl-cnp_1.22.8_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -74,7 +74,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnp x86_64 1.25.0-1 @commandline 20 M + cnp x86_64 1.22.8-1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -265,9 +265,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.22.8 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.22.8 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.22.8 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -321,9 +321,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.25.0 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.22.8 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.22.8 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.22.8 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnp status sandbox -v -v`), you can @@ -546,12 +546,12 @@ Archive: report_operator_.zip ```output ====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.22.8","build":{"Version":"1.22.8+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.22.8","build":{"Version":"1.22.8+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx index 63733dfd2fd..6ddb6efbc3a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx @@ -3,6 +3,8 @@ title: 'Kubernetes Upgrade and Maintenance' originalFilePath: 'src/kubernetes_upgrade.md' --- + + Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal performance and security, particularly for self-managed clusters, especially those running on bare metal infrastructure. Regular updates help address diff --git a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx index 10155eb2d82..3b426d8c1c9 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx @@ -3,6 +3,8 @@ title: 'Labels and annotations' originalFilePath: 'src/labels_annotations.md' --- + + Resources in Kubernetes are organized in a flat structure, with no hierarchical information or relationship between them. However, such resources and objects can be linked together and put in relationship through *labels* and @@ -31,40 +33,45 @@ they're inherited by all resources created by it (including pods). ## Predefined labels -These predefined labels are managed by EDB Postgres for Kubernetes. +EDB Postgres for Kubernetes manages the following predefined labels: `k8s.enterprisedb.io/backupDate` -: The date of the backup in ISO 8601 format (`YYYYMMDD`) +: The date of the backup in ISO 8601 format (`YYYYMMDD`). + This label is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupName` -: Backup identifier, available only on `Backup` and `VolumeSnapshot` - resources +: Backup identifier. + This label is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupMonth` -: The year/month when a backup was taken +: The year/month when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupTimeline` -: The timeline of the instance when a backup was taken +: The timeline of the instance when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupYear` -: The year a backup was taken +: The year a backup was taken. + This label is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/cluster` -: Name of the cluster +: Name of the cluster. `k8s.enterprisedb.io/immediateBackup` : Applied to a `Backup` resource if the backup is the first one created from - a `ScheduledBackup` object having `immediate` set to `true` + a `ScheduledBackup` object having `immediate` set to `true`. `k8s.enterprisedb.io/instanceName` : Name of the PostgreSQL instance (replaces the old and - deprecated `postgresql` label) + deprecated `postgresql` label). `k8s.enterprisedb.io/jobRole` : Role of the job (that is, `import`, `initdb`, `join`, ...) `k8s.enterprisedb.io/onlineBackup` -: Whether the backup is online (hot) or taken when Postgres is down (cold) +: Whether the backup is online (hot) or taken when Postgres is down (cold). + This label is available only on `VolumeSnapshot` resources. `postgresql` : deprecated, Name of the PostgreSQL instance. Use `k8s.enterprisedb.io/instanceName` @@ -72,13 +79,13 @@ instead `k8s.enterprisedb.io/podRole` : Distinguishes pods dedicated to pooler deployment from those used for - database instances + database instances. `k8s.enterprisedb.io/poolerName` -: Name of the PgBouncer pooler +: Name of the PgBouncer pooler. `k8s.enterprisedb.io/pvcRole` -: Purpose of the PVC, such as `PG_DATA` or `PG_WAL` +: Purpose of the PVC, such as `PG_DATA` or `PG_WAL`. `k8s.enterprisedb.io/reload` : Available on `ConfigMap` and `Secret` resources. When set to `true`, @@ -96,14 +103,14 @@ instead `k8s.enterprisedb.io/scheduled-backup` : When available, name of the `ScheduledBackup` resource that created a given - `Backup` object + `Backup` object. `k8s.enterprisedb.io/instanceRole` : Whether the instance running in a pod is a `primary` or a `replica`. ## Predefined annotations -These predefined annotations are managed by EDB Postgres for Kubernetes. +EDB Postgres for Kubernetes manages the following predefined annotations: `container.apparmor.security.beta.kubernetes.io/*` : Name of the AppArmor profile to apply to the named container. @@ -112,15 +119,18 @@ These predefined annotations are managed by EDB Postgres for Kubernetes. `k8s.enterprisedb.io/backupEndTime` : The time a backup ended. + This annotation is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupEndWAL` : The WAL at the conclusion of a backup. + This annotation is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/backupStartTime` : The time a backup started. `k8s.enterprisedb.io/backupStartWAL` : The WAL at the start of a backup. + This annotation is available only on `VolumeSnapshot` resources. `k8s.enterprisedb.io/coredumpFilter` : Filter to control the coredump of Postgres processes, expressed with a @@ -199,7 +209,7 @@ to the deployment specification, except for changes to `spec.instances`. : Contains the latest cluster `reload` time. `reload` is triggered by the user through a plugin. `k8s.enterprisedb.io/skipEmptyWalArchiveCheck` -: When set to `true` on a `Cluster` resource, the operator disables the check +: When set to `enabled` on a `Cluster` resource, the operator disables the check that ensures that the WAL archive is empty before writing data. Use at your own risk. @@ -214,6 +224,15 @@ to the deployment specification, except for changes to `spec.instances`. `k8s.enterprisedb.io/snapshotEndTime` : The time a snapshot was marked as ready to use. +`k8s.enterprisedb.io/validation` +: When set to `disabled` on a EDB Postgres for Kubernetes-managed custom resource, the + validation webhook allows all changes without restriction. + +``` +**⚠️ WARNING:** Disabling validation may permit unsafe or destructive +operations. Use this setting with caution and at your own risk. +``` + `kubectl.kubernetes.io/restartedAt` : When available, the time of last requested restart of a Postgres cluster. diff --git a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx index 6a2cdd3a3f1..3e53504c920 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx @@ -3,6 +3,8 @@ title: 'Logging' originalFilePath: 'src/logging.md' --- + + EDB Postgres for Kubernetes outputs logs in JSON format directly to standard output, including PostgreSQL logs, without persisting them to storage for security reasons. This design facilitates seamless integration with most Kubernetes-compatible log diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx index 429e33f743d..b586cb7e649 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx @@ -3,10 +3,12 @@ title: 'Monitoring' originalFilePath: 'src/monitoring.md' --- + + !!! Important Installing Prometheus and Grafana is beyond the scope of this project. We assume they are correctly installed in your system. However, for - experimentation we provide instructions in + experimentation we provide instructions in [Part 4 of the Quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana). ## Monitoring Instances @@ -57,14 +59,13 @@ by specifying a list of one or more databases in the `target_databases` option. with Prometheus and Grafana, you can find a quick setup guide in [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) -### Prometheus Operator example +### Monitoring with the Prometheus operator A specific PostgreSQL cluster can be monitored using the -[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource -[PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.75.1/Documentation/api.md#podmonitor). - -A `PodMonitor` that correctly points to the Cluster can be automatically created by the operator by setting -`.spec.monitoring.enablePodMonitor` to `true` in the Cluster resource itself (default: `false`). +[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource +[PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor). +A PodMonitor correctly pointing to a Cluster can be automatically created by the operator by setting +`.spec.monitoring.enablePodMonitor` to `true` in the Cluster resource itself (default: false). !!! Important Any change to the `PodMonitor` created automatically will be overridden by the Operator at the next reconciliation @@ -220,7 +221,7 @@ cnp_collector_up{cluster="cluster-example"} 1 # HELP cnp_collector_postgres_version Postgres version # TYPE cnp_collector_postgres_version gauge -cnp_collector_postgres_version{cluster="cluster-example",full="17.4"} 17.4 +cnp_collector_postgres_version{cluster="cluster-example",full="17.5"} 17.5 # HELP cnp_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnp_collector_last_failed_backup_timestamp gauge @@ -674,7 +675,7 @@ cnp_pg_replication_is_wal_receiver_up 0 ### Default set of metrics -The operator can be configured to automatically inject in a Cluster a set of +The operator can be configured to automatically inject in a Cluster a set of monitoring queries defined in a ConfigMap or a Secret, inside the operator's namespace. You have to set the `MONITORING_QUERIES_CONFIGMAP` or `MONITORING_QUERIES_SECRET` key in the ["operator configuration"](operator_conf.md), @@ -684,7 +685,7 @@ the operator will then use the content of the `queries` key. Any change to the `queries` content will be immediately reflected on all the deployed Clusters using it. -The operator installation manifests come with a predefined ConfigMap, +The operator installation manifests come with a predefined ConfigMap, called `postgresql-operator-default-monitoring`, to be used by all Clusters. `MONITORING_QUERIES_CONFIGMAP` is by default set to `postgresql-operator-default-monitoring` in the operator configuration. @@ -705,7 +706,7 @@ EDB Postgres for Kubernetes is inspired by the PostgreSQL Prometheus Exporter, b presents some differences. In particular, the `cache_seconds` field is not implemented in EDB Postgres for Kubernetes' exporter. -## Monitoring the operator +## Monitoring the EDB Postgres for Kubernetes operator The operator internally exposes [Prometheus](https://prometheus.io/) metrics via HTTP on port 8080, named `metrics`. @@ -715,17 +716,21 @@ via HTTP on port 8080, named `metrics`. the ["How to inspect the exported metrics"](#how-to-inspect-the-exported-metrics) section below. -Currently, the operator exposes default `kubebuilder` metrics, see -[kubebuilder documentation](https://book.kubebuilder.io/reference/metrics.html) for more details. +Currently, the operator exposes default `kubebuilder` metrics. See +[kubebuilder documentation](https://book.kubebuilder.io/reference/metrics.html) +for more details. -### Prometheus Operator example +### Monitoring the operator with Prometheus -The operator deployment can be monitored using the -[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) by defining the following +The operator can be monitored using the +[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) by defining a [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor) -resource: +pointing to the operator pod(s), as follows (note it's applied in the same +namespace as the operator): ```yaml +kubectl -n postgresql-operator-system apply -f - < 8080:8080 +``` + +With port forwarding active, the metrics are easily viewable on a browser at +[`localhost:8080/metrics`](http://localhost:8080/metrics). + +### Using curl + +Create the `curl` pod with the following command: ```yaml +kubectl apply -f - <:9187/metrics @@ -799,10 +830,10 @@ If you enabled TLS metrics, run instead: kubectl exec -ti curl -- curl -sk https://${POD_IP}:9187/metrics ``` -In case you want to access the metrics of the operator, you need to point +To access the metrics of the operator, you need to point to the pod where the operator is running, and use TCP port 8080 as target. -At the end of the inspection, please make sure you delete the `curl` pod: +When you're done inspecting metrics, please remember to delete the `curl` pod: ```shell kubectl delete -f curl.yaml @@ -829,15 +860,18 @@ section for context: In addition, we provide the "raw" sources for the Prometheus alert rules in the `alerts.yaml` file. -The [Grafana dashboard](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) has a dedicated repository now. - -Note that, for the configuration of `kube-prometheus-stack`, other fields and -settings are available over what we provide in `kube-stack-config.yaml`. +A Grafana dashboard for EDB Postgres for Kubernetes clusters and operator, is kept in the +dedicated repository [`cloudnative-pg/grafana-dashboards`](https://github.com/cloudnative-pg/grafana-dashboards/tree/main) +as a dashboard JSON configuration: +[`grafana-dashboard.json`](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json). +The file can be downloaded, and imported into Grafana +(menus: Dashboard > New > Import). -You can execute `helm show values prometheus-community/kube-prometheus-stack` -to view them. For further information, please refer to the +For a general reference on the settings available on `kube-prometheus-stack`, +you can execute `helm show values prometheus-community/kube-prometheus-stack`. +Please refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) -page. +page for more detail. ## Monitoring on OpenShift diff --git a/product_docs/docs/postgres_for_kubernetes/1/networking.mdx b/product_docs/docs/postgres_for_kubernetes/1/networking.mdx index 0df3fb93072..cd3d58cdb79 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/networking.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/networking.mdx @@ -3,6 +3,8 @@ title: 'Networking' originalFilePath: 'src/networking.md' --- + + EDB Postgres for Kubernetes assumes the underlying Kubernetes cluster has the required connectivity already set up. Networking on Kubernetes is an important and extended topic; please refer to diff --git a/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx b/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx index d74b8942080..c3d5a6aa790 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx @@ -3,6 +3,8 @@ title: 'Appendix A - Common object stores for backups' originalFilePath: 'src/appendixes/object_stores.md' --- + + You can store the [backup](backup.md) files in any service that is supported by the Barman Cloud infrastructure. That is: diff --git a/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx b/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx index 8edcb1ab7d2..44e83e6f969 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx @@ -202,27 +202,27 @@ EDB Postgres for Kubernetes is distributed through the following [OLM channels](https://olm.operatorframework.io/docs/best-practices/channel-naming/), each serving a distinct purpose: -- `candidate`: this channel provides early access to the next potential `fast` - release. It includes the latest pre-release versions with new features and - fixes, but is considered experimental and not supported. Use this channel only - for testing and validation purposes—**not in production environments**. Versions in - `candidate` may not appear in other channels if no further updates are - recommended. - -- `fast`: designed for users who want timely access to the latest stable - features and patches. The head of the `fast` channel always points to the - latest **patch release** of the latest **minor release** of EDB Postgres for - Kubernetes. - -- `stable`: similar to `fast`, but restricted to the latest **minor release** - currently under EDB’s Long Term Support (LTS) policy. Designed for users who - require predictable updates and official support while benefiting from ongoing - stability and maintenance. - -- `stable-vX.Y`: tracks the latest patch release within a specific minor - version (e.g., `stable-v1.26`). These channels are ideal for environments - that require version pinning and predictable updates within a stable minor - release. +- `candidate`: this channel provides early access to the next potential `fast` + release. It includes the latest pre-release versions with new features and + fixes, but is considered experimental and not supported. Use this channel only + for testing and validation purposes—**not in production environments**. Versions in + `candidate` may not appear in other channels if no further updates are + recommended. + +- `fast`: designed for users who want timely access to the latest stable + features and patches. The head of the `fast` channel always points to the + latest **patch release** of the latest **minor release** of EDB Postgres for + Kubernetes. + +- `stable`: similar to `fast`, but restricted to the latest **minor release** + currently under EDB’s Long Term Support (LTS) policy. Designed for users who + require predictable updates and official support while benefiting from ongoing + stability and maintenance. + +- `stable-vX.Y`: tracks the latest patch release within a specific minor + version (e.g., `stable-v1.26`). These channels are ideal for environments + that require version pinning and predictable updates within a stable minor + release. The `fast` and `stable` channels may span multiple minor versions, whereas each `stable-vX.Y` channel is limited to patch updates within a specific minor @@ -517,25 +517,6 @@ suits your needs in terms of operating system and architecture: [OpenShift CLI documentation](https://docs.openshift.com/container-platform/4.16/cli_reference/openshift_cli/getting-started-cli.html) directly maintained by Red Hat. -## Upgrading the operator - -In order to upgrade your operator safely, you need to be in a `stable-vX.Y` channel, -or the `fast` channel if you want to follow the head of the development trunk of -EDB Postgres for Kubernetes. - -If you are currently in the `stable` channel, you need to either choose `fast` or -progressively move to the latest Long Term Supported release of EDB Postgres -for Kubernetes - currently `stable-v1.25`. - -!!! Important - We have made a change to the way conditions are represented in the status of - the operator in version 1.16.0, 1.15.2, and onward. This change could cause - an operator upgrade to hang on Openshift, if one of the old conditions are set - during the upgrade process, because of the way the Operator Lifecycle Manager - checks new CRDs against existing CRs. To avoid this issue, you need to upgrade - to version 1.15.5 first, which will automatically remove the offending - conditions from all the cluster CRs that prevent Openshift from upgrading. - ## Predefined RBAC objects EDB Postgres for Kubernetes comes with a predefined set of resources that play an @@ -883,32 +864,8 @@ rules: ## Pod Security Standards -EDB Postgres for Kubernetes on OpenShift supports the `restricted` and -`restricted-v2` SCC (`SecurityContextConstraints`), which vary depending on the -version of EDB Postgres for Kubernetes and OpenShift you are running. -Please pay close attention to the following table and notes: - -| EDB Postgres for Kubernetes Version | OpenShift Versions | Supported SCC | -| ----------------------------------- | ------------------ | ------------------------- | -| 1.25.x | 4.12-4.18 | restricted, restricted-v2 | -| 1.24.x | 4.12-4.17 | restricted, restricted-v2 | -| 1.23.x | 4.12-4.16 | restricted, restricted-v2 | -| 1.22.x | 4.12-4.16 | restricted, restricted-v2 | - -!!! Important - Since version 4.10 only provides `restricted`, EDB Postgres for Kubernetes - versions 1.18 and 1.19 support `restricted`. Future releases of EDB Postgres - for Kubernetes are not guaranteed to support `restricted`, since in Openshift - 4.11 `restricted` was replaced with `restricted-v2`. - -!!! Important "Security changes in OpenShift >=4.11" - With Kubernetes 1.21 the `PodSecurityPolicy` has been replaced by the Pod - Security Admission Controller to become the new default way to manage the - security inside Kubernetes. On Openshift 4.11, which is running Kubernetes - 1.21, there is also included a new set of SecurityContextConstraints (SCC) that - will be the default SCCs to manage workloads; these new SCC are - `restricted-v2`, `nonroot-v2` and `hostnetwork-v2`. For more information, - please read ["Important OpenShift changes to Pod Security Standards"](https://connect.redhat.com/en/blog/important-openshift-changes-pod-security-standards). +EDB Postgres for Kubernetes on OpenShift works with the `restricted-v2` SCC +(`SecurityContextConstraints`). Since the operator has been developed with a security focus from the beginning, in addition to always adhering to the Red Hat Certification process, EDB diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx index a003957462a..482cbd92063 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx @@ -3,6 +3,8 @@ title: 'Operator capability levels' originalFilePath: 'src/operator_capability_levels.md' --- + + These capabilities were implemented by EDB Postgres for Kubernetes, classified using the [Operator SDK definition of Capability Levels](https://operatorframework.io/operator-capabilities/) diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx index 49373e83b2b..6bf9c1e0806 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx @@ -3,6 +3,8 @@ title: 'Operator configuration' originalFilePath: 'src/operator_conf.md' --- + + The operator for EDB Postgres for Kubernetes is installed from a standard deployment manifest and follows the convention over configuration paradigm. While this is fine in most cases, there are some scenarios where you want diff --git a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.22.10.mdx b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.22.10.mdx new file mode 100644 index 00000000000..c92dcb1f86a --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.22.10.mdx @@ -0,0 +1,5905 @@ +--- +title: API Reference - v1.22.10 +navTitle: v1.22.10 +pdfExclude: 'true' + +--- + +

Package v1 contains API Schema definitions for the postgresql v1 API group

+ +## Resource Types + +- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup) +- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster) +- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog) +- [Database](#postgresql-k8s-enterprisedb-io-v1-Database) +- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog) +- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler) +- [Publication](#postgresql-k8s-enterprisedb-io-v1-Publication) +- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup) +- [Subscription](#postgresql-k8s-enterprisedb-io-v1-Subscription) + +
+ +## Backup + +

A Backup resource is a request for a PostgreSQL backup by the user.

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Backup
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+BackupSpec +
+

Specification of the desired behavior of the backup. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+BackupStatus +
+

Most recently observed status of the backup. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## Cluster + +

Cluster is the Schema for the PostgreSQL API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Cluster
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ClusterSpec +
+

Specification of the desired behavior of the cluster. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+ClusterStatus +
+

Most recently observed status of the cluster. This data may not be up +to date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## ClusterImageCatalog + +

ClusterImageCatalog is the Schema for the clusterimagecatalogs API

+ + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
ClusterImageCatalog
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ImageCatalogSpec +
+

Specification of the desired behavior of the ClusterImageCatalog. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## Database + +

Database is the Schema for the databases API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Database
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+DatabaseSpec +
+

Specification of the desired Database. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+DatabaseStatus +
+

Most recently observed status of the Database. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## ImageCatalog + +

ImageCatalog is the Schema for the imagecatalogs API

+ + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
ImageCatalog
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ImageCatalogSpec +
+

Specification of the desired behavior of the ImageCatalog. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## Pooler + +

Pooler is the Schema for the poolers API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Pooler
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+PoolerSpec +
+

Specification of the desired behavior of the Pooler. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+PoolerStatus +
+

Most recently observed status of the Pooler. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## Publication + +

Publication is the Schema for the publications API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Publication
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+PublicationSpec +
+ No description provided.
status [Required]
+PublicationStatus +
+ No description provided.
+ +
+ +## ScheduledBackup + +

ScheduledBackup is the Schema for the scheduledbackups API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
ScheduledBackup
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ScheduledBackupSpec +
+

Specification of the desired behavior of the ScheduledBackup. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+ScheduledBackupStatus +
+

Most recently observed status of the ScheduledBackup. This data may not be up +to date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## Subscription + +

Subscription is the Schema for the subscriptions API

+ + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
Subscription
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+SubscriptionSpec +
+ No description provided.
status [Required]
+SubscriptionStatus +
+ No description provided.
+ +
+ +## AffinityConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

AffinityConfiguration contains the info we need to create the +affinity rules for Pods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
enablePodAntiAffinity
+bool +
+

Activates anti-affinity for the pods. The operator will define pods +anti-affinity unless this field is explicitly set to false

+
topologyKey
+string +
+

TopologyKey to use for anti-affinity configuration. See k8s documentation +for more info on that

+
nodeSelector
+map[string]string +
+

NodeSelector is map of key-value pairs used to define the nodes on which +the pods can run. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/

+
nodeAffinity
+core/v1.NodeAffinity +
+

NodeAffinity describes node affinity scheduling rules for the pod. +More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity

+
tolerations
+[]core/v1.Toleration +
+

Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run +on tainted nodes. +More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/

+
podAntiAffinityType
+string +
+

PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be +considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or +"required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are +added if all the existing nodes don't match the required pod anti-affinity rule. +More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity

+
additionalPodAntiAffinity
+core/v1.PodAntiAffinity +
+

AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated +by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.

+
additionalPodAffinity
+core/v1.PodAffinity +
+

AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods.

+
+ +
+ +## AvailableArchitecture + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

AvailableArchitecture represents the state of a cluster's architecture

+ + + + + + + + + + + +
FieldDescription
goArch [Required]
+string +
+

GoArch is the name of the executable architecture

+
hash [Required]
+string +
+

Hash is the hash of the executable

+
+ +
+ +## BackupConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

BackupConfiguration defines how the backup of the cluster are taken. +The supported backup methods are BarmanObjectStore and VolumeSnapshot. +For details and examples refer to the Backup and Recovery section of the +documentation

+ + + + + + + + + + + + + + + + + +
FieldDescription
volumeSnapshot
+VolumeSnapshotConfiguration +
+

VolumeSnapshot provides the configuration for the execution of volume snapshot backups.

+
barmanObjectStore
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +
+

The configuration for the barman-cloud tool suite

+
retentionPolicy
+string +
+

RetentionPolicy is the retention policy to be used for backups +and WALs (i.e. '60d'). The retention policy is expressed in the form +of XXu where XX is a positive integer and u is in [dwm] - +days, weeks, months. +It's currently only applicable when using the BarmanObjectStore method.

+
target
+BackupTarget +
+

The policy to decide which instance should perform backups. Available +options are empty string, which will default to prefer-standby policy, +primary to have backups run always on primary instances, prefer-standby +to have backups run preferably on the most updated standby, if available.

+
+ +
+ +## BackupMethod + +(Alias of `string`) + +**Appears in:** + +- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec) + +- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus) + +- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec) + +

BackupMethod defines the way of executing the physical base backups of +the selected PostgreSQL instance

+ +
+ +## BackupPhase + +(Alias of `string`) + +**Appears in:** + +- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus) + +

BackupPhase is the phase of the backup

+ +
+ +## BackupPluginConfiguration + +**Appears in:** + +- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec) + +- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec) + +

BackupPluginConfiguration contains the backup configuration used by +the backup plugin

+ + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the name of the plugin managing this backup

+
parameters
+map[string]string +
+

Parameters are the configuration parameters passed to the backup +plugin for this backup

+
+ +
+ +## BackupSnapshotElementStatus + +**Appears in:** + +- [BackupSnapshotStatus](#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotStatus) + +

BackupSnapshotElementStatus is a volume snapshot that is part of a volume snapshot method backup

+ + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the snapshot resource name

+
type [Required]
+string +
+

Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE

+
tablespaceName
+string +
+

TablespaceName is the name of the snapshotted tablespace. Only set +when type is PG_TABLESPACE

+
+ +
+ +## BackupSnapshotStatus + +**Appears in:** + +- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus) + +

BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup

+ + + + + + + + +
FieldDescription
elements
+[]BackupSnapshotElementStatus +
+

The elements list, populated with the gathered volume snapshots

+
+ +
+ +## BackupSource + +**Appears in:** + +- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery) + +

BackupSource contains the backup we need to restore from, plus some +information that could be needed to correctly restore it.

+ + + + + + + + + + + +
FieldDescription
LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
(Members of LocalObjectReference are embedded into this type.) + No description provided.
endpointCA
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +
+

EndpointCA store the CA bundle of the barman endpoint. +Useful when using self-signed certificates to avoid +errors with certificate issuer and barman-cloud-wal-archive.

+
+ +
+ +## BackupSpec + +**Appears in:** + +- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup) + +

BackupSpec defines the desired state of Backup

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

The cluster to backup

+
target
+BackupTarget +
+

The policy to decide which instance should perform this backup. If empty, +it defaults to cluster.spec.backup.target. +Available options are empty string, primary and prefer-standby. +primary to have backups run always on primary instances, +prefer-standby to have backups run preferably on the most updated +standby, if available.

+
method
+BackupMethod +
+

The backup method to be used, possible options are barmanObjectStore, +volumeSnapshot or plugin. Defaults to: barmanObjectStore.

+
pluginConfiguration
+BackupPluginConfiguration +
+

Configuration parameters passed to the plugin managing this backup +Only available in 1.25 or later

+
online
+bool +
+

Whether the default type of backup with volume snapshots is +online/hot (true, default) or offline/cold (false) +Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'

+
onlineConfiguration
+OnlineConfiguration +
+

Configuration parameters to control the online/hot backup with volume snapshots +Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza

+
+ +
+ +## BackupStatus + +**Appears in:** + +- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup) + +

BackupStatus defines the observed state of Backup

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
BarmanCredentials
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials +
(Members of BarmanCredentials are embedded into this type.) +

The potential credentials for each cloud provider

+
endpointCA
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +
+

EndpointCA store the CA bundle of the barman endpoint. +Useful when using self-signed certificates to avoid +errors with certificate issuer and barman-cloud-wal-archive.

+
endpointURL
+string +
+

Endpoint to be used to upload data to the cloud, +overriding the automatic endpoint discovery

+
destinationPath
+string +
+

The path where to store the backup (i.e. s3://bucket/path/to/folder) +this path, with different destination folders, will be used for WALs +and for data. This may not be populated in case of errors.

+
serverName
+string +
+

The server name on S3, the cluster name is used if this +parameter is omitted

+
encryption
+string +
+

Encryption method required to S3 API

+
backupId
+string +
+

The ID of the Barman backup

+
backupName
+string +
+

The Name of the Barman backup

+
phase
+BackupPhase +
+

The last backup status

+
startedAt
+meta/v1.Time +
+

When the backup was started

+
stoppedAt
+meta/v1.Time +
+

When the backup was terminated

+
beginWal
+string +
+

The starting WAL

+
endWal
+string +
+

The ending WAL

+
beginLSN
+string +
+

The starting xlog

+
endLSN
+string +
+

The ending xlog

+
error
+string +
+

The detected error

+
commandOutput
+string +
+

Unused. Retained for compatibility with old versions.

+
commandError
+string +
+

The backup command output in case of error

+
backupLabelFile
+[]byte +
+

Backup label file content as returned by Postgres in case of online (hot) backups

+
tablespaceMapFile
+[]byte +
+

Tablespace map file content as returned by Postgres in case of online (hot) backups

+
instanceID
+InstanceID +
+

Information to identify the instance where the backup has been taken from

+
snapshotBackupStatus
+BackupSnapshotStatus +
+

Status of the volumeSnapshot backup

+
method
+BackupMethod +
+

The backup method being used

+
online
+bool +
+

Whether the backup was online/hot (true) or offline/cold (false)

+
pluginMetadata
+map[string]string +
+

A map containing the plugin metadata

+
+ +
+ +## BackupTarget + +(Alias of `string`) + +**Appears in:** + +- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration) + +- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec) + +- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec) + +

BackupTarget describes the preferred targets for a backup

+ +
+ +## BootstrapConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

BootstrapConfiguration contains information about how to create the PostgreSQL +cluster. Only a single bootstrap method can be defined among the supported +ones. initdb will be used as the bootstrap method if left +unspecified. Refer to the Bootstrap page of the documentation for more +information.

+ + + + + + + + + + + + + + +
FieldDescription
initdb
+BootstrapInitDB +
+

Bootstrap the cluster via initdb

+
recovery
+BootstrapRecovery +
+

Bootstrap the cluster from a backup

+
pg_basebackup
+BootstrapPgBaseBackup +
+

Bootstrap the cluster taking a physical backup of another compatible +PostgreSQL instance

+
+ +
+ +## BootstrapInitDB + +**Appears in:** + +- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration) + +

BootstrapInitDB is the configuration of the bootstrap process when +initdb is used +Refer to the Bootstrap page of the documentation for more information.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
database
+string +
+

Name of the database used by the application. Default: app.

+
owner
+string +
+

Name of the owner of the database in the instance to be used +by applications. Defaults to the value of the database key.

+
secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

Name of the secret containing the initial credentials for the +owner of the user database. If empty a new secret will be +created from scratch

+
redwood
+bool +
+

If we need to enable/disable Redwood compatibility. Requires +EPAS and for EPAS defaults to true

+
options
+[]string +
+

The list of options that must be passed to initdb when creating the cluster. +Deprecated: This could lead to inconsistent configurations, +please use the explicit provided parameters instead. +If defined, explicit values will be ignored.

+
dataChecksums
+bool +
+

Whether the -k option should be passed to initdb, +enabling checksums on data pages (default: false)

+
encoding
+string +
+

The value to be passed as option --encoding for initdb (default:UTF8)

+
localeCollate
+string +
+

The value to be passed as option --lc-collate for initdb (default:C)

+
localeCType
+string +
+

The value to be passed as option --lc-ctype for initdb (default:C)

+
locale
+string +
+

Sets the default collation order and character classification in the new database. +Only available in 1.25 or later

+
localeProvider
+string +
+

This option sets the locale provider for databases created in the new cluster. +Available from PostgreSQL 16. +Only available in 1.25 or later

+
icuLocale
+string +
+

Specifies the ICU locale when the ICU provider is used. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 15. +Only available in 1.25 or later

+
icuRules
+string +
+

Specifies additional collation rules to customize the behavior of the default collation. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 16. +Only available in 1.25 or later

+
builtinLocale
+string +
+

Specifies the locale name when the builtin provider is used. +This option requires localeProvider to be set to builtin. +Available from PostgreSQL 17. +Only available in 1.25 or later

+
walSegmentSize
+int +
+

The value in megabytes (1 to 1024) to be passed to the --wal-segsize +option for initdb (default: empty, resulting in PostgreSQL default: 16MB)

+
postInitSQL
+[]string +
+

List of SQL queries to be executed as a superuser in the postgres +database right after the cluster has been created - to be used with extreme care +(by default empty)

+
postInitApplicationSQL
+[]string +
+

List of SQL queries to be executed as a superuser in the application +database right after the cluster has been created - to be used with extreme care +(by default empty)

+
postInitTemplateSQL
+[]string +
+

List of SQL queries to be executed as a superuser in the template1 +database right after the cluster has been created - to be used with extreme care +(by default empty)

+
import
+Import +
+

Bootstraps the new cluster by importing data from an existing PostgreSQL +instance using logical backup (pg_dump and pg_restore)

+
postInitApplicationSQLRefs
+SQLRefs +
+

List of references to ConfigMaps or Secrets containing SQL files +to be executed as a superuser in the application database right after +the cluster has been created. The references are processed in a specific order: +first, all Secrets are processed, followed by all ConfigMaps. +Within each group, the processing order follows the sequence specified +in their respective arrays. +(by default empty)

+
postInitTemplateSQLRefs
+SQLRefs +
+

List of references to ConfigMaps or Secrets containing SQL files +to be executed as a superuser in the template1 database right after +the cluster has been created. The references are processed in a specific order: +first, all Secrets are processed, followed by all ConfigMaps. +Within each group, the processing order follows the sequence specified +in their respective arrays. +(by default empty) +Only available in 1.25 or later

+
postInitSQLRefs
+SQLRefs +
+

List of references to ConfigMaps or Secrets containing SQL files +to be executed as a superuser in the postgres database right after +the cluster has been created. The references are processed in a specific order: +first, all Secrets are processed, followed by all ConfigMaps. +Within each group, the processing order follows the sequence specified +in their respective arrays. +(by default empty) +Only available in 1.25 or later

+
+ +
+ +## BootstrapPgBaseBackup + +**Appears in:** + +- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration) + +

BootstrapPgBaseBackup contains the configuration required to take +a physical backup of an existing PostgreSQL cluster

+ + + + + + + + + + + + + + + + + +
FieldDescription
source [Required]
+string +
+

The name of the server of which we need to take a physical backup

+
database
+string +
+

Name of the database used by the application. Default: app.

+
owner
+string +
+

Name of the owner of the database in the instance to be used +by applications. Defaults to the value of the database key.

+
secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

Name of the secret containing the initial credentials for the +owner of the user database. If empty a new secret will be +created from scratch

+
+ +
+ +## BootstrapRecovery + +**Appears in:** + +- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration) + +

BootstrapRecovery contains the configuration required to restore +from an existing cluster using 3 methodologies: external cluster, +volume snapshots or backup objects. Full recovery and Point-In-Time +Recovery are supported. +The method can be also be used to create clusters in continuous recovery +(replica clusters), also supporting cascading replication when instances >

+
    +
  1. Once the cluster exits recovery, the password for the superuser +will be changed through the provided secret. +Refer to the Bootstrap page of the documentation for more information.
  2. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
backup
+BackupSource +
+

The backup object containing the physical base backup from which to +initiate the recovery procedure. +Mutually exclusive with source and volumeSnapshots.

+
source
+string +
+

The external cluster whose backup we will restore. This is also +used as the name of the folder under which the backup is stored, +so it must be set to the name of the source cluster +Mutually exclusive with backup.

+
volumeSnapshots
+DataSource +
+

The static PVC data source(s) from which to initiate the +recovery procedure. Currently supporting VolumeSnapshot +and PersistentVolumeClaim resources that map an existing +PVC group, compatible with EDB Postgres for Kubernetes, and taken with +a cold backup copy on a fenced Postgres instance (limitation +which will be removed in the future when online backup +will be implemented). +Mutually exclusive with backup.

+
recoveryTarget
+RecoveryTarget +
+

By default, the recovery process applies all the available +WAL files in the archive (full recovery). However, you can also +end the recovery as soon as a consistent state is reached or +recover to a point-in-time (PITR) by specifying a RecoveryTarget object, +as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). +More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET

+
database
+string +
+

Name of the database used by the application. Default: app.

+
owner
+string +
+

Name of the owner of the database in the instance to be used +by applications. Defaults to the value of the database key.

+
secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

Name of the secret containing the initial credentials for the +owner of the user database. If empty a new secret will be +created from scratch

+
+ +
+ +## CatalogImage + +**Appears in:** + +- [ImageCatalogSpec](#postgresql-k8s-enterprisedb-io-v1-ImageCatalogSpec) + +

CatalogImage defines the image and major version

+ + + + + + + + + + + +
FieldDescription
image [Required]
+string +
+

The image reference

+
major [Required]
+int +
+

The PostgreSQL major version of the image. Must be unique within the catalog.

+
+ +
+ +## CertificatesConfiguration + +**Appears in:** + +- [CertificatesStatus](#postgresql-k8s-enterprisedb-io-v1-CertificatesStatus) + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

CertificatesConfiguration contains the needed configurations to handle server certificates.

+ + + + + + + + + + + + + + + + + + + + +
FieldDescription
serverCASecret
+string +
+

The secret containing the Server CA certificate. If not defined, a new secret will be created +with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. + +Contains: + +

+
    +
  • ca.crt: CA that should be used to validate the server certificate, +used as sslrootcert in client connection strings.
  • +
  • ca.key: key used to generate Server SSL certs, if ServerTLSSecret is provided, +this can be omitted.
  • +
+
serverTLSSecret
+string +
+

The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as +ssl_cert_file and ssl_key_file so that clients can connect to postgres securely. +If not defined, ServerCASecret must provide also ca.key and a new secret will be +created using the provided CA.

+
replicationTLSSecret
+string +
+

The secret of type kubernetes.io/tls containing the client certificate to authenticate as +the streaming_replica user. +If not defined, ClientCASecret must provide also ca.key, and a new secret will be +created using the provided CA.

+
clientCASecret
+string +
+

The secret containing the Client CA certificate. If not defined, a new secret will be created +with a self-signed CA and will be used to generate all the client certificates. + +Contains: + +

+
    +
  • ca.crt: CA that should be used to validate the client certificates, +used as ssl_ca_file of all the instances.
  • +
  • ca.key: key used to generate client certificates, if ReplicationTLSSecret is provided, +this can be omitted.
  • +
+
serverAltDNSNames
+[]string +
+

The list of the server alternative DNS names to be added to the generated server TLS certificates, when required.

+
+ +
+ +## CertificatesStatus + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

CertificatesStatus contains configuration certificates and related expiration dates.

+ + + + + + + + + + + +
FieldDescription
CertificatesConfiguration
+CertificatesConfiguration +
(Members of CertificatesConfiguration are embedded into this type.) +

Needed configurations to handle server certificates, initialized with default values, if needed.

+
expirations
+map[string]string +
+

Expiration dates for all certificates.

+
+ +
+ +## ClusterMonitoringTLSConfiguration + +**Appears in:** + +- [MonitoringConfiguration](#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration) + +

ClusterMonitoringTLSConfiguration is the type containing the TLS configuration +for the cluster's monitoring

+ + + + + + + + +
FieldDescription
enabled
+bool +
+

Enable TLS for the monitoring endpoint. +Changing this option will force a rollout of all instances.

+
+ +
+ +## ClusterSpec + +**Appears in:** + +- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster) + +

ClusterSpec defines the desired state of Cluster

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
description
+string +
+

Description of this PostgreSQL cluster

+
inheritedMetadata
+EmbeddedObjectMetadata +
+

Metadata that will be inherited by all objects related to the Cluster

+
imageName
+string +
+

Name of the container image, supporting both tags (<image>:<tag>) +and digests for deterministic and repeatable deployments +(<image>:<tag>@sha256:<digestValue>)

+
imageCatalogRef
+ImageCatalogRef +
+

Defines the major PostgreSQL version we want to use within an ImageCatalog +Only available in 1.25 or later

+
imagePullPolicy
+core/v1.PullPolicy +
+

Image pull policy. +One of Always, Never or IfNotPresent. +If not defined, it defaults to IfNotPresent. +Cannot be updated. +More info: https://kubernetes.io/docs/concepts/containers/images#updating-images

+
schedulerName
+string +
+

If specified, the pod will be dispatched by specified Kubernetes +scheduler. If not specified, the pod will be dispatched by the default +scheduler. More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/

+
postgresUID
+int64 +
+

The UID of the postgres user inside the image, defaults to 26

+
postgresGID
+int64 +
+

The GID of the postgres user inside the image, defaults to 26

+
instances [Required]
+int +
+

Number of instances required in the cluster

+
minSyncReplicas
+int +
+

Minimum number of instances required in synchronous replication with the +primary. Undefined or 0 allow writes to complete when no standby is +available.

+
maxSyncReplicas
+int +
+

The target value for the synchronous replication quorum, that can be +decreased if the number of ready standbys is lower than this. +Undefined or 0 disable synchronous replication.

+
postgresql
+PostgresConfiguration +
+

Configuration of the PostgreSQL server

+
replicationSlots
+ReplicationSlotsConfiguration +
+

Replication slots management configuration

+
bootstrap
+BootstrapConfiguration +
+

Instructions to bootstrap this cluster

+
replica
+ReplicaClusterConfiguration +
+

Replica cluster configuration

+
superuserSecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

The secret containing the superuser password. If not defined a new +secret will be created with a randomly generated password

+
enableSuperuserAccess
+bool +
+

When this option is enabled, the operator will use the SuperuserSecret +to update the postgres user password (if the secret is +not present, the operator will automatically create one). When this +option is disabled, the operator will ignore the SuperuserSecret content, delete +it when automatically created, and then blank the password of the postgres +user by setting it to NULL. Disabled by default.

+
certificates
+CertificatesConfiguration +
+

The configuration for the CA and related certificates

+
imagePullSecrets
+[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

The list of pull secrets to be used to pull the images. If the license key +contains a pull secret that secret will be automatically included.

+
storage
+StorageConfiguration +
+

Configuration of the storage of the instances

+
serviceAccountTemplate
+ServiceAccountTemplate +
+

Configure the generation of the service account

+
walStorage
+StorageConfiguration +
+

Configuration of the storage for PostgreSQL WAL (Write-Ahead Log)

+
ephemeralVolumeSource
+core/v1.EphemeralVolumeSource +
+

EphemeralVolumeSource allows the user to configure the source of ephemeral volumes.

+
startDelay
+int32 +
+

The time in seconds that is allowed for a PostgreSQL instance to +successfully start up (default 3600). +The startup probe failure threshold is derived from this value using the formula: +ceiling(startDelay / 10).

+
stopDelay
+int32 +
+

The time in seconds that is allowed for a PostgreSQL instance to +gracefully shutdown (default 1800)

+
smartStopDelay
+int32 +
+

Deprecated: please use SmartShutdownTimeout instead

+
smartShutdownTimeout
+int32 +
+

The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. +Make sure you reserve enough time for the operator to request a fast shutdown of Postgres +(that is: stopDelay - smartShutdownTimeout).

+
switchoverDelay
+int32 +
+

The time in seconds that is allowed for a primary PostgreSQL instance +to gracefully shutdown during a switchover. +Default value is 3600 seconds (1 hour).

+
failoverDelay
+int32 +
+

The amount of time (in seconds) to wait before triggering a failover +after the primary PostgreSQL instance in the cluster was detected +to be unhealthy

+
livenessProbeTimeout
+int32 +
+

LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance +to successfully respond to the liveness probe (default 30). +The Liveness probe failure threshold is derived from this value using the formula: +ceiling(livenessProbe / 10).

+
affinity
+AffinityConfiguration +
+

Affinity/Anti-affinity rules for Pods

+
topologySpreadConstraints
+[]core/v1.TopologySpreadConstraint +
+

TopologySpreadConstraints specifies how to spread matching pods among the given topology. +More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/

+
resources
+core/v1.ResourceRequirements +
+

Resources requirements of every generated Pod. Please refer to +https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +for more information.

+
ephemeralVolumesSizeLimit
+EphemeralVolumesSizeLimitConfiguration +
+

EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral +volumes

+
priorityClassName
+string +
+

Name of the priority class which will be used in every generated Pod, if the PriorityClass +specified does not exist, the pod will not be able to schedule. Please refer to +https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass +for more information

+
primaryUpdateStrategy
+PrimaryUpdateStrategy +
+

Deployment strategy to follow to upgrade the primary server during a rolling +update procedure, after all replicas have been successfully updated: +it can be automated (unsupervised - default) or manual (supervised)

+
primaryUpdateMethod
+PrimaryUpdateMethod +
+

Method to follow to upgrade the primary server during a rolling +update procedure, after all replicas have been successfully updated: +it can be with a switchover (switchover) or in-place (restart - default)

+
backup
+BackupConfiguration +
+

The configuration to be used for backups

+
nodeMaintenanceWindow
+NodeMaintenanceWindow +
+

Define a maintenance window for the Kubernetes nodes

+
licenseKey
+string +
+

The license key of the cluster. When empty, the cluster operates in +trial mode and after the expiry date (default 30 days) the operator +will cease any reconciliation attempt. For details, please refer to +the license agreement that comes with the operator.

+
licenseKeySecret
+core/v1.SecretKeySelector +
+

The reference to the license key. When this is set it take precedence over LicenseKey.

+
monitoring
+MonitoringConfiguration +
+

The configuration of the monitoring infrastructure of this cluster

+
externalClusters
+ExternalClusterList +
+

The list of external clusters which are used in the configuration

+
logLevel
+string +
+

The instances' log level, one of the following values: error, warning, info (default), debug, trace

+
projectedVolumeTemplate
+core/v1.ProjectedVolumeSource +
+

Template to be used to define projected volumes, projected volumes will be mounted +under /projected base folder

+
env
+[]core/v1.EnvVar +
+

Env follows the Env format to pass environment variables +to the pods created in the cluster

+
envFrom
+[]core/v1.EnvFromSource +
+

EnvFrom follows the EnvFrom format to pass environment variables +sources to the pods to be used by Env

+
managed
+ManagedConfiguration +
+

The configuration that is used by the portions of PostgreSQL that are managed by the instance manager

+
seccompProfile
+core/v1.SeccompProfile +
+

The SeccompProfile applied to every Pod and Container. +Defaults to: RuntimeDefault

+
tablespaces
+[]TablespaceConfiguration +
+

The tablespaces configuration

+
enablePDB
+bool +
+

Manage the PodDisruptionBudget resources within the cluster. When +configured as true (default setting), the pod disruption budgets +will safeguard the primary node from being terminated. Conversely, +setting it to false will result in the absence of any +PodDisruptionBudget resource, permitting the shutdown of all nodes +hosting the PostgreSQL cluster. This latter configuration is +advisable for any PostgreSQL cluster employed for +development/staging purposes.

+
plugins
+PluginConfigurationList +
+

The plugins configuration, containing +any plugin to be loaded with the corresponding configuration +Only available in 1.25 or later

+
probes
+ProbesConfiguration +
+

The configuration of the probes to be injected +in the PostgreSQL Pods.

+
+ +
+ +## ClusterStatus + +**Appears in:** + +- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster) + +

ClusterStatus defines the observed state of Cluster

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
instances
+int +
+

The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods.

+
readyInstances
+int +
+

The total number of ready instances in the cluster. It is equal to the number of ready instance pods.

+
instancesStatus
+map[PodStatus][]string +
+

InstancesStatus indicates in which status the instances are

+
instancesReportedState
+map[PodName]InstanceReportedState +
+

The reported state of the instances during the last reconciliation loop

+
managedRolesStatus
+ManagedRoles +
+

ManagedRolesStatus reports the state of the managed roles in the cluster

+
tablespacesStatus
+[]TablespaceState +
+

TablespacesStatus reports the state of the declarative tablespaces in the cluster

+
timelineID
+int +
+

The timeline of the Postgres cluster

+
topology
+Topology +
+

Instances topology.

+
latestGeneratedNode
+int +
+

ID of the latest generated node (used to avoid node name clashing)

+
currentPrimary
+string +
+

Current primary instance

+
targetPrimary
+string +
+

Target primary instance, this is different from the previous one +during a switchover or a failover

+
lastPromotionToken
+string +
+

LastPromotionToken is the last verified promotion token that +was used to promote a replica cluster

+
pvcCount
+int32 +
+

How many PVCs have been created by this cluster

+
jobCount
+int32 +
+

How many Jobs have been created by this cluster

+
danglingPVC
+[]string +
+

List of all the PVCs created by this cluster and still available +which are not attached to a Pod

+
resizingPVC
+[]string +
+

List of all the PVCs that have ResizingPVC condition.

+
initializingPVC
+[]string +
+

List of all the PVCs that are being initialized by this cluster

+
healthyPVC
+[]string +
+

List of all the PVCs not dangling nor initializing

+
unusablePVC
+[]string +
+

List of all the PVCs that are unusable because another PVC is missing

+
licenseStatus
+github.com/EnterpriseDB/cloud-native-postgres/pkg/licensekey.Status +
+

Status of the license

+
writeService
+string +
+

Current write pod

+
readService
+string +
+

Current list of read pods

+
phase
+string +
+

Current phase of the cluster

+
phaseReason
+string +
+

Reason for the current phase

+
secretsResourceVersion
+SecretsResourceVersion +
+

The list of resource versions of the secrets +managed by the operator. Every change here is done in the +interest of the instance manager, which will refresh the +secret data

+
configMapResourceVersion
+ConfigMapResourceVersion +
+

The list of resource versions of the configmaps, +managed by the operator. Every change here is done in the +interest of the instance manager, which will refresh the +configmap data

+
certificates
+CertificatesStatus +
+

The configuration for the CA and related certificates, initialized with defaults.

+
firstRecoverabilityPoint
+string +
+

The first recoverability point, stored as a date in RFC3339 format. +This field is calculated from the content of FirstRecoverabilityPointByMethod

+
firstRecoverabilityPointByMethod
+map[BackupMethod]meta/v1.Time +
+

The first recoverability point, stored as a date in RFC3339 format, per backup method type

+
lastSuccessfulBackup
+string +
+

Last successful backup, stored as a date in RFC3339 format +This field is calculated from the content of LastSuccessfulBackupByMethod

+
lastSuccessfulBackupByMethod
+map[BackupMethod]meta/v1.Time +
+

Last successful backup, stored as a date in RFC3339 format, per backup method type

+
lastFailedBackup
+string +
+

Stored as a date in RFC3339 format

+
cloudNativePostgresqlCommitHash
+string +
+

The commit hash number of which this operator running

+
currentPrimaryTimestamp
+string +
+

The timestamp when the last actual promotion to primary has occurred

+
currentPrimaryFailingSinceTimestamp
+string +
+

The timestamp when the primary was detected to be unhealthy +This field is reported when .spec.failoverDelay is populated or during online upgrades

+
targetPrimaryTimestamp
+string +
+

The timestamp when the last request for a new primary has occurred

+
poolerIntegrations
+PoolerIntegrations +
+

The integration needed by poolers referencing the cluster

+
cloudNativePostgresqlOperatorHash
+string +
+

The hash of the binary of the operator

+
availableArchitectures
+[]AvailableArchitecture +
+

AvailableArchitectures reports the available architectures of a cluster

+
conditions
+[]meta/v1.Condition +
+

Conditions for cluster object

+
instanceNames
+[]string +
+

List of instance names in the cluster

+
onlineUpdateEnabled
+bool +
+

OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster

+
azurePVCUpdateEnabled
+bool +
+

AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster

+
image
+string +
+

Image contains the image name used by the pods

+
pluginStatus
+[]PluginStatus +
+

PluginStatus is the status of the loaded plugins

+
switchReplicaClusterStatus
+SwitchReplicaClusterStatus +
+

SwitchReplicaClusterStatus is the status of the switch to replica cluster

+
demotionToken
+string +
+

DemotionToken is a JSON token containing the information +from pg_controldata such as Database system identifier, Latest checkpoint's +TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO +WAL file, and Time of latest checkpoint

+
+ +
+ +## ConfigMapResourceVersion + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

ConfigMapResourceVersion is the resource versions of the secrets +managed by the operator

+ + + + + + + + +
FieldDescription
metrics
+map[string]string +
+

A map with the versions of all the config maps used to pass metrics. +Map keys are the config map names, map values are the versions

+
+ +
+ +## DataDurabilityLevel + +(Alias of `string`) + +**Appears in:** + +- [SynchronousReplicaConfiguration](#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration) + +

DataDurabilityLevel specifies how strictly to enforce synchronous replication +when cluster instances are unavailable. Options are required or preferred.

+ +
+ +## DataSource + +**Appears in:** + +- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery) + +

DataSource contains the configuration required to bootstrap a +PostgreSQL cluster from an existing storage

+ + + + + + + + + + + + + + +
FieldDescription
storage [Required]
+core/v1.TypedLocalObjectReference +
+

Configuration of the storage of the instances

+
walStorage
+core/v1.TypedLocalObjectReference +
+

Configuration of the storage for PostgreSQL WAL (Write-Ahead Log)

+
tablespaceStorage
+map[string]core/v1.TypedLocalObjectReference +
+

Configuration of the storage for PostgreSQL tablespaces

+
+ +
+ +## DatabaseReclaimPolicy + +(Alias of `string`) + +**Appears in:** + +- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec) + +

DatabaseReclaimPolicy describes a policy for end-of-life maintenance of databases.

+ +
+ +## DatabaseRoleRef + +**Appears in:** + +- [TablespaceConfiguration](#postgresql-k8s-enterprisedb-io-v1-TablespaceConfiguration) + +

DatabaseRoleRef is a reference an a role available inside PostgreSQL

+ + + + + + + + +
FieldDescription
name
+string +
+ No description provided.
+ +
+ +## DatabaseSpec + +**Appears in:** + +- [Database](#postgresql-k8s-enterprisedb-io-v1-Database) + +

DatabaseSpec is the specification of a Postgresql Database, built around the +CREATE DATABASE, ALTER DATABASE, and DROP DATABASE SQL commands of +PostgreSQL.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster hosting the database.

+
ensure
+EnsureOption +
+

Ensure the PostgreSQL database is present or absent - defaults to "present".

+
name [Required]
+string +
+

The name of the database to create inside PostgreSQL. This setting cannot be changed.

+
owner [Required]
+string +
+

Maps to the OWNER parameter of CREATE DATABASE. +Maps to the OWNER TO command of ALTER DATABASE. +The role name of the user who owns the database inside PostgreSQL.

+
template
+string +
+

Maps to the TEMPLATE parameter of CREATE DATABASE. This setting +cannot be changed. The name of the template from which to create +this database.

+
encoding
+string +
+

Maps to the ENCODING parameter of CREATE DATABASE. This setting +cannot be changed. Character set encoding to use in the database.

+
locale
+string +
+

Maps to the LOCALE parameter of CREATE DATABASE. This setting +cannot be changed. Sets the default collation order and character +classification in the new database.

+
localeProvider
+string +
+

Maps to the LOCALE_PROVIDER parameter of CREATE DATABASE. This +setting cannot be changed. This option sets the locale provider for +databases created in the new cluster. Available from PostgreSQL 16.

+
localeCollate
+string +
+

Maps to the LC_COLLATE parameter of CREATE DATABASE. This +setting cannot be changed.

+
localeCType
+string +
+

Maps to the LC_CTYPE parameter of CREATE DATABASE. This setting +cannot be changed.

+
icuLocale
+string +
+

Maps to the ICU_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the ICU locale when the ICU +provider is used. This option requires localeProvider to be set to +icu. Available from PostgreSQL 15.

+
icuRules
+string +
+

Maps to the ICU_RULES parameter of CREATE DATABASE. This setting +cannot be changed. Specifies additional collation rules to customize +the behavior of the default collation. This option requires +localeProvider to be set to icu. Available from PostgreSQL 16.

+
builtinLocale
+string +
+

Maps to the BUILTIN_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the locale name when the +builtin provider is used. This option requires localeProvider to +be set to builtin. Available from PostgreSQL 17.

+
collationVersion
+string +
+

Maps to the COLLATION_VERSION parameter of CREATE DATABASE. This +setting cannot be changed.

+
isTemplate
+bool +
+

Maps to the IS_TEMPLATE parameter of CREATE DATABASE and ALTER DATABASE. If true, this database is considered a template and can +be cloned by any user with CREATEDB privileges.

+
allowConnections
+bool +
+

Maps to the ALLOW_CONNECTIONS parameter of CREATE DATABASE and +ALTER DATABASE. If false then no one can connect to this database.

+
connectionLimit
+int +
+

Maps to the CONNECTION LIMIT clause of CREATE DATABASE and +ALTER DATABASE. How many concurrent connections can be made to +this database. -1 (the default) means no limit.

+
tablespace
+string +
+

Maps to the TABLESPACE parameter of CREATE DATABASE. +Maps to the SET TABLESPACE command of ALTER DATABASE. +The name of the tablespace (in PostgreSQL) that will be associated +with the new database. This tablespace will be the default +tablespace used for objects created in this database.

+
databaseReclaimPolicy
+DatabaseReclaimPolicy +
+

The policy for end-of-life maintenance of this database.

+
+ +
+ +## DatabaseStatus + +**Appears in:** + +- [Database](#postgresql-k8s-enterprisedb-io-v1-Database) + +

DatabaseStatus defines the observed state of Database

+ + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the database was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ +
+ +## EPASConfiguration + +**Appears in:** + +- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration) + +

EPASConfiguration contains EDB Postgres Advanced Server specific configurations

+ + + + + + + + + + + +
FieldDescription
audit
+bool +
+

If true enables edb_audit logging

+
tde
+TDEConfiguration +
+

TDE configuration

+
+ +
+ +## EmbeddedObjectMetadata + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

EmbeddedObjectMetadata contains metadata to be inherited by all resources related to a Cluster

+ + + + + + + + + + + +
FieldDescription
labels
+map[string]string +
+ No description provided.
annotations
+map[string]string +
+ No description provided.
+ +
+ +## EnsureOption + +(Alias of `string`) + +**Appears in:** + +- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec) + +- [RoleConfiguration](#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration) + +

EnsureOption represents whether we should enforce the presence or absence of +a Role in a PostgreSQL instance

+ +
+ +## EphemeralVolumesSizeLimitConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

EphemeralVolumesSizeLimitConfiguration contains the configuration of the ephemeral +storage

+ + + + + + + + + + + +
FieldDescription
shm
+k8s.io/apimachinery/pkg/api/resource.Quantity +
+

Shm is the size limit of the shared memory volume

+
temporaryData
+k8s.io/apimachinery/pkg/api/resource.Quantity +
+

TemporaryData is the size limit of the temporary data volume

+
+ +
+ +## ExternalClusterList + +(Alias of `[]github.com/EnterpriseDB/cloud-native-postgres/api/v1.ExternalCluster`) + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ExternalClusterList is a list of external clusters

+ +
+ +## ImageCatalogRef + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ImageCatalogRef defines the reference to a major version in an ImageCatalog

+ + + + + + + + + + + +
FieldDescription
TypedLocalObjectReference
+core/v1.TypedLocalObjectReference +
(Members of TypedLocalObjectReference are embedded into this type.) + No description provided.
major [Required]
+int +
+

The major version of PostgreSQL we want to use from the ImageCatalog

+
+ +
+ +## ImageCatalogSpec + +**Appears in:** + +- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog) + +- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog) + +

ImageCatalogSpec defines the desired ImageCatalog

+ + + + + + + + +
FieldDescription
images [Required]
+[]CatalogImage +
+

List of CatalogImages available in the catalog

+
+ +
+ +## Import + +**Appears in:** + +- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB) + +

Import contains the configuration to init a database from a logic snapshot of an externalCluster

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
source [Required]
+ImportSource +
+

The source of the import

+
type [Required]
+SnapshotType +
+

The import type. Can be microservice or monolith.

+
databases [Required]
+[]string +
+

The databases to import

+
roles
+[]string +
+

The roles to import

+
postImportApplicationSQL
+[]string +
+

List of SQL queries to be executed as a superuser in the application +database right after is imported - to be used with extreme care +(by default empty). Only available in microservice type.

+
schemaOnly
+bool +
+

When set to true, only the pre-data and post-data sections of +pg_restore are invoked, avoiding data import. Default: false.

+
pgDumpExtraOptions
+[]string +
+

List of custom options to pass to the pg_dump command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design. +Only available in 1.25 or later

+
pgRestoreExtraOptions
+[]string +
+

List of custom options to pass to the pg_restore command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design. +Only available in 1.25 or later

+
+ +
+ +## ImportSource + +**Appears in:** + +- [Import](#postgresql-k8s-enterprisedb-io-v1-Import) + +

ImportSource describes the source for the logical snapshot

+ + + + + + + + +
FieldDescription
externalCluster [Required]
+string +
+

The name of the externalCluster used for import

+
+ +
+ +## InstanceID + +**Appears in:** + +- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus) + +

InstanceID contains the information to identify an instance

+ + + + + + + + + + + +
FieldDescription
podName
+string +
+

The pod name

+
ContainerID
+string +
+

The container ID

+
+ +
+ +## InstanceReportedState + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

InstanceReportedState describes the last reported state of an instance during a reconciliation loop

+ + + + + + + + + + + +
FieldDescription
isPrimary [Required]
+bool +
+

indicates if an instance is the primary one

+
timeLineID
+int +
+

indicates on which TimelineId the instance is

+
+ +
+ +## LDAPBindAsAuth + +**Appears in:** + +- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig) + +

LDAPBindAsAuth provides the required fields to use the +bind authentication for LDAP

+ + + + + + + + + + + +
FieldDescription
prefix
+string +
+

Prefix for the bind authentication option

+
suffix
+string +
+

Suffix for the bind authentication option

+
+ +
+ +## LDAPBindSearchAuth + +**Appears in:** + +- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig) + +

LDAPBindSearchAuth provides the required fields to use +the bind+search LDAP authentication process

+ + + + + + + + + + + + + + + + + + + + +
FieldDescription
baseDN
+string +
+

Root DN to begin the user search

+
bindDN
+string +
+

DN of the user to bind to the directory

+
bindPassword
+core/v1.SecretKeySelector +
+

Secret with the password for the user to bind to the directory

+
searchAttribute
+string +
+

Attribute to match against the username

+
searchFilter
+string +
+

Search filter to use when doing the search+bind authentication

+
+ +
+ +## LDAPConfig + +**Appears in:** + +- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration) + +

LDAPConfig contains the parameters needed for LDAP authentication

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
server
+string +
+

LDAP hostname or IP address

+
port
+int +
+

LDAP server port

+
scheme
+LDAPScheme +
+

LDAP schema to be used, possible options are ldap and ldaps

+
bindAsAuth
+LDAPBindAsAuth +
+

Bind as authentication configuration

+
bindSearchAuth
+LDAPBindSearchAuth +
+

Bind+Search authentication configuration

+
tls
+bool +
+

Set to 'true' to enable LDAP over TLS. 'false' is default

+
+ +
+ +## LDAPScheme + +(Alias of `string`) + +**Appears in:** + +- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig) + +

LDAPScheme defines the possible schemes for LDAP

+ +
+ +## ManagedConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ManagedConfiguration represents the portions of PostgreSQL that are managed +by the instance manager

+ + + + + + + + + + + +
FieldDescription
roles
+[]RoleConfiguration +
+

Database roles managed by the Cluster

+
services
+ManagedServices +
+

Services roles managed by the Cluster +Only available in 1.25 or later

+
+ +
+ +## ManagedRoles + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

ManagedRoles tracks the status of a cluster's managed roles

+ + + + + + + + + + + + + + +
FieldDescription
byStatus
+map[RoleStatus][]string +
+

ByStatus gives the list of roles in each state

+
cannotReconcile
+map[string][]string +
+

CannotReconcile lists roles that cannot be reconciled in PostgreSQL, +with an explanation of the cause

+
passwordStatus
+map[string]PasswordState +
+

PasswordStatus gives the last transaction id and password secret version for each managed role

+
+ +
+ +## ManagedService + +**Appears in:** + +- [ManagedServices](#postgresql-k8s-enterprisedb-io-v1-ManagedServices) + +

ManagedService represents a specific service managed by the cluster. +It includes the type of service and its associated template specification.

+ + + + + + + + + + + + + + +
FieldDescription
selectorType [Required]
+ServiceSelectorType +
+

SelectorType specifies the type of selectors that the service will have. +Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.

+
updateStrategy
+ServiceUpdateStrategy +
+

UpdateStrategy describes how the service differences should be reconciled

+
serviceTemplate [Required]
+ServiceTemplateSpec +
+

ServiceTemplate is the template specification for the service.

+
+ +
+ +## ManagedServices + +**Appears in:** + +- [ManagedConfiguration](#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration) + +

ManagedServices represents the services managed by the cluster.

+ + + + + + + + + + + +
FieldDescription
disabledDefaultServices
+[]ServiceSelectorType +
+

DisabledDefaultServices is a list of service types that are disabled by default. +Valid values are "r", and "ro", representing read, and read-only services.

+
additional
+[]ManagedService +
+

Additional is a list of additional managed services specified by the user.

+
+ +
+ +## Metadata + +**Appears in:** + +- [PodTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-PodTemplateSpec) + +- [ServiceAccountTemplate](#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate) + +- [ServiceTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-ServiceTemplateSpec) + +

Metadata is a structure similar to the metav1.ObjectMeta, but still +parseable by controller-gen to create a suitable CRD for the user. +The comment of PodTemplateSpec has an explanation of why we are +not using the core data types.

+ + + + + + + + + + + + + + +
FieldDescription
name
+string +
+

The name of the resource. Only supported for certain types +Only available in 1.25 or later

+
labels
+map[string]string +
+

Map of string keys and values that can be used to organize and categorize +(scope and select) objects. May match selectors of replication controllers +and services. +More info: http://kubernetes.io/docs/user-guide/labels

+
annotations
+map[string]string +
+

Annotations is an unstructured key value map stored with a resource that may be +set by external tools to store and retrieve arbitrary metadata. They are not +queryable and should be preserved when modifying objects. +More info: http://kubernetes.io/docs/user-guide/annotations

+
+ +
+ +## MonitoringConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

MonitoringConfiguration is the type containing all the monitoring +configuration for a certain cluster

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
disableDefaultQueries
+bool +
+

Whether the default queries should be injected. +Set it to true if you don't want to inject default queries into the cluster. +Default: false.

+
customQueriesConfigMap
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +
+

The list of config maps containing the custom queries

+
customQueriesSecret
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +
+

The list of secrets containing the custom queries

+
enablePodMonitor
+bool +
+

Enable or disable the PodMonitor

+
tls
+ClusterMonitoringTLSConfiguration +
+

Configure TLS communication for the metrics endpoint. +Changing tls.enabled option will force a rollout of all instances. +Only available in 1.25 or later

+
podMonitorMetricRelabelings
+[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig +
+

The list of metric relabelings for the PodMonitor. Applied to samples before ingestion.

+
podMonitorRelabelings
+[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig +
+

The list of relabelings for the PodMonitor. Applied to samples before scraping.

+
+ +
+ +## NodeMaintenanceWindow + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

NodeMaintenanceWindow contains information that the operator +will use while upgrading the underlying node.

+

This option is only useful when the chosen storage prevents the Pods +from being freely moved across nodes.

+ + + + + + + + + + + +
FieldDescription
reusePVC
+bool +
+

Reuse the existing PVC (wait for the node to come +up again) or not (recreate it elsewhere - when instances >1)

+
inProgress
+bool +
+

Is there a node maintenance activity in progress?

+
+ +
+ +## OnlineConfiguration + +**Appears in:** + +- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec) + +- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec) + +- [VolumeSnapshotConfiguration](#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration) + +

OnlineConfiguration contains the configuration parameters for the online volume snapshot

+ + + + + + + + + + + +
FieldDescription
waitForArchive
+bool +
+

If false, the function will return immediately after the backup is completed, +without waiting for WAL to be archived. +This behavior is only useful with backup software that independently monitors WAL archiving. +Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. +By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is +enabled. +On a standby, this means that it will wait only when archive_mode = always. +If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger +an immediate segment switch.

+
immediateCheckpoint
+bool +
+

Control whether the I/O workload for the backup initial checkpoint will +be limited, according to the checkpoint_completion_target setting on +the PostgreSQL server. If set to true, an immediate checkpoint will be +used, meaning PostgreSQL will complete the checkpoint as soon as +possible. false by default.

+
+ +
+ +## PasswordState + +**Appears in:** + +- [ManagedRoles](#postgresql-k8s-enterprisedb-io-v1-ManagedRoles) + +

PasswordState represents the state of the password of a managed RoleConfiguration

+ + + + + + + + + + + +
FieldDescription
transactionID
+int64 +
+

the last transaction ID to affect the role definition in PostgreSQL

+
resourceVersion
+string +
+

the resource version of the password secret

+
+ +
+ +## PgBouncerIntegrationStatus + +**Appears in:** + +- [PoolerIntegrations](#postgresql-k8s-enterprisedb-io-v1-PoolerIntegrations) + +

PgBouncerIntegrationStatus encapsulates the needed integration for the pgbouncer poolers referencing the cluster

+ + + + + + + + +
FieldDescription
secrets
+[]string +
+ No description provided.
+ +
+ +## PgBouncerPoolMode + +(Alias of `string`) + +**Appears in:** + +- [PgBouncerSpec](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec) + +

PgBouncerPoolMode is the mode of PgBouncer

+ +
+ +## PgBouncerSecrets + +**Appears in:** + +- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets) + +

PgBouncerSecrets contains the versions of the secrets used +by pgbouncer

+ + + + + + + + +
FieldDescription
authQuery
+SecretVersion +
+

The auth query secret version

+
+ +
+ +## PgBouncerSpec + +**Appears in:** + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

PgBouncerSpec defines how to configure PgBouncer

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
poolMode
+PgBouncerPoolMode +
+

The pool mode. Default: session.

+
authQuerySecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

The credentials of the user that need to be used for the authentication +query. In case it is specified, also an AuthQuery +(e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") +has to be specified and no automatic CNP Cluster integration will be triggered.

+
authQuery
+string +
+

The query that will be used to download the hash of the password +of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". +In case it is specified, also an AuthQuerySecret has to be specified and +no automatic CNP Cluster integration will be triggered.

+
parameters
+map[string]string +
+

Additional parameters to be passed to PgBouncer - please check +the CNP documentation for a list of options you can configure

+
pg_hba
+[]string +
+

PostgreSQL Host Based Authentication rules (lines to be appended +to the pg_hba.conf file)

+
paused
+bool +
+

When set to true, PgBouncer will disconnect from the PostgreSQL +server, first waiting for all queries to complete, and pause all new +client connections until this value is set to false (default). Internally, +the operator calls PgBouncer's PAUSE and RESUME commands.

+
+ +
+ +## PluginConfiguration + +**Appears in:** + +

PluginConfiguration specifies a plugin that need to be loaded for this +cluster to be reconciled

+ + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the plugin name

+
enabled
+bool +
+

Enabled is true if this plugin will be used

+
parameters
+map[string]string +
+

Parameters is the configuration of the plugin

+
+ +
+ +## PluginConfigurationList + +(Alias of `[]github.com/EnterpriseDB/cloud-native-postgres/api/v1.PluginConfiguration`) + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

PluginConfigurationList represent a set of plugin with their +configuration parameters

+ +
+ +## PluginStatus + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

PluginStatus is the status of a loaded plugin

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the name of the plugin

+
version [Required]
+string +
+

Version is the version of the plugin loaded by the +latest reconciliation loop

+
capabilities
+[]string +
+

Capabilities are the list of capabilities of the +plugin

+
operatorCapabilities
+[]string +
+

OperatorCapabilities are the list of capabilities of the +plugin regarding the reconciler

+
walCapabilities
+[]string +
+

WALCapabilities are the list of capabilities of the +plugin regarding the WAL management

+
backupCapabilities
+[]string +
+

BackupCapabilities are the list of capabilities of the +plugin regarding the Backup management

+
restoreJobHookCapabilities
+[]string +
+

RestoreJobHookCapabilities are the list of capabilities of the +plugin regarding the RestoreJobHook management

+
status
+string +
+

Status contain the status reported by the plugin through the SetStatusInCluster interface

+
+ +
+ +## PodTemplateSpec + +**Appears in:** + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

PodTemplateSpec is a structure allowing the user to set +a template for Pod generation.

+

Unfortunately we can't use the corev1.PodTemplateSpec +type because the generated CRD won't have the field for the +metadata section.

+

References: +https://github.com/kubernetes-sigs/controller-tools/issues/385 +https://github.com/kubernetes-sigs/controller-tools/issues/448 +https://github.com/prometheus-operator/prometheus-operator/issues/3041

+ + + + + + + + + + + +
FieldDescription
metadata
+Metadata +
+

Standard object's metadata. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

+
spec
+core/v1.PodSpec +
+

Specification of the desired behavior of the pod. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## PodTopologyLabels + +(Alias of `map[string]string`) + +**Appears in:** + +- [Topology](#postgresql-k8s-enterprisedb-io-v1-Topology) + +

PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue

+ +
+ +## PoolerIntegrations + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

PoolerIntegrations encapsulates the needed integration for the poolers referencing the cluster

+ + + + + + + + +
FieldDescription
pgBouncerIntegration
+PgBouncerIntegrationStatus +
+ No description provided.
+ +
+ +## PoolerMonitoringConfiguration + +**Appears in:** + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

PoolerMonitoringConfiguration is the type containing all the monitoring +configuration for a certain Pooler.

+

Mirrors the Cluster's MonitoringConfiguration but without the custom queries +part for now.

+ + + + + + + + + + + + + + +
FieldDescription
enablePodMonitor
+bool +
+

Enable or disable the PodMonitor

+
podMonitorMetricRelabelings
+[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig +
+

The list of metric relabelings for the PodMonitor. Applied to samples before ingestion.

+
podMonitorRelabelings
+[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig +
+

The list of relabelings for the PodMonitor. Applied to samples before scraping.

+
+ +
+ +## PoolerSecrets + +**Appears in:** + +- [PoolerStatus](#postgresql-k8s-enterprisedb-io-v1-PoolerStatus) + +

PoolerSecrets contains the versions of all the secrets used

+ + + + + + + + + + + + + + + + + +
FieldDescription
serverTLS
+SecretVersion +
+

The server TLS secret version

+
serverCA
+SecretVersion +
+

The server CA secret version

+
clientCA
+SecretVersion +
+

The client CA secret version

+
pgBouncerSecrets
+PgBouncerSecrets +
+

The version of the secrets used by PgBouncer

+
+ +
+ +## PoolerSpec + +**Appears in:** + +- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler) + +

PoolerSpec defines the desired state of Pooler

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

This is the cluster reference on which the Pooler will work. +Pooler name should never match with any cluster name within the same namespace.

+
type
+PoolerType +
+

Type of service to forward traffic to. Default: rw.

+
instances
+int32 +
+

The number of replicas we want. Default: 1.

+
template
+PodTemplateSpec +
+

The template of the Pod to be created

+
pgbouncer [Required]
+PgBouncerSpec +
+

The PgBouncer configuration

+
deploymentStrategy
+apps/v1.DeploymentStrategy +
+

The deployment strategy to use for pgbouncer to replace existing pods with new ones

+
monitoring
+PoolerMonitoringConfiguration +
+

The configuration of the monitoring infrastructure of this pooler.

+
serviceTemplate
+ServiceTemplateSpec +
+

Template for the Service to be created +Only available in 1.25 or later

+
+ +
+ +## PoolerStatus + +**Appears in:** + +- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler) + +

PoolerStatus defines the observed state of Pooler

+ + + + + + + + + + + +
FieldDescription
secrets
+PoolerSecrets +
+

The resource version of the config object

+
instances
+int32 +
+

The number of pods trying to be scheduled

+
+ +
+ +## PoolerType + +(Alias of `string`) + +**Appears in:** + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

PoolerType is the type of the connection pool, meaning the service +we are targeting. Allowed values are rw and ro.

+ +
+ +## PostgresConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

PostgresConfiguration defines the PostgreSQL configuration

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
parameters
+map[string]string +
+

PostgreSQL configuration options (postgresql.conf)

+
synchronous
+SynchronousReplicaConfiguration +
+

Configuration of the PostgreSQL synchronous replication feature +Only available in 1.25 or later

+
pg_hba
+[]string +
+

PostgreSQL Host Based Authentication rules (lines to be appended +to the pg_hba.conf file)

+
pg_ident
+[]string +
+

PostgreSQL User Name Maps rules (lines to be appended +to the pg_ident.conf file)

+
epas
+EPASConfiguration +
+

EDB Postgres Advanced Server specific configurations

+
syncReplicaElectionConstraint
+SyncReplicaElectionConstraints +
+

Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be +set up.

+
shared_preload_libraries
+[]string +
+

Lists of shared preload libraries to add to the default ones

+
ldap
+LDAPConfig +
+

Options to specify LDAP configuration

+
promotionTimeout
+int32 +
+

Specifies the maximum number of seconds to wait when promoting an instance to primary. +Default value is 40000000, greater than one year in seconds, +big enough to simulate an infinite timeout

+
enableAlterSystem
+bool +
+

If this parameter is true, the user will be able to invoke ALTER SYSTEM +on this EDB Postgres for Kubernetes Cluster. +This should only be used for debugging and troubleshooting. +Defaults to false.

+
+ +
+ +## PrimaryUpdateMethod + +(Alias of `string`) + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

PrimaryUpdateMethod contains the method to use when upgrading +the primary server of the cluster as part of rolling updates

+ +
+ +## PrimaryUpdateStrategy + +(Alias of `string`) + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

PrimaryUpdateStrategy contains the strategy to follow when upgrading +the primary server of the cluster as part of rolling updates

+ +
+ +## Probe + +**Appears in:** + +- [ProbesConfiguration](#postgresql-k8s-enterprisedb-io-v1-ProbesConfiguration) + +

Probe describes a health check to be performed against a container to determine whether it is +alive or ready to receive traffic.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
initialDelaySeconds
+int32 +
+

Number of seconds after the container has started before liveness probes are initiated. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
timeoutSeconds
+int32 +
+

Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
periodSeconds
+int32 +
+

How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1.

+
successThreshold
+int32 +
+

Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.

+
failureThreshold
+int32 +
+

Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1.

+
terminationGracePeriodSeconds
+int64 +
+

Optional duration in seconds the pod needs to terminate gracefully upon probe failure. +The grace period is the duration in seconds after the processes running in the pod are sent +a termination signal and the time when the processes are forcibly halted with a kill signal. +Set this value longer than the expected cleanup time for your process. +If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this +value overrides the value provided by the pod spec. +Value must be non-negative integer. The value zero indicates stop immediately via +the kill signal (no opportunity to shut down). +This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. +Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.

+
+ +
+ +## ProbesConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ProbesConfiguration represent the configuration for the probes +to be injected in the PostgreSQL Pods

+ + + + + + + + + + + + + + +
FieldDescription
startup [Required]
+Probe +
+

The startup probe configuration

+
liveness [Required]
+Probe +
+

The liveness probe configuration

+
readiness [Required]
+Probe +
+

The readiness probe configuration

+
+ +
+ +## PublicationReclaimPolicy + +(Alias of `string`) + +**Appears in:** + +- [PublicationSpec](#postgresql-k8s-enterprisedb-io-v1-PublicationSpec) + +

PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.

+ +
+ +## PublicationSpec + +**Appears in:** + +- [Publication](#postgresql-k8s-enterprisedb-io-v1-Publication) + +

PublicationSpec defines the desired state of Publication

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "publisher"

+
name [Required]
+string +
+

The name of the publication inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "publisher" cluster

+
parameters
+map[string]string +
+

Publication parameters part of the WITH clause as expected by +PostgreSQL CREATE PUBLICATION command

+
target [Required]
+PublicationTarget +
+

Target of the publication as expected by PostgreSQL CREATE PUBLICATION command

+
publicationReclaimPolicy
+PublicationReclaimPolicy +
+

The policy for end-of-life maintenance of this publication

+
+ +
+ +## PublicationStatus + +**Appears in:** + +- [Publication](#postgresql-k8s-enterprisedb-io-v1-Publication) + +

PublicationStatus defines the observed state of Publication

+ + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the publication was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ +
+ +## PublicationTarget + +**Appears in:** + +- [PublicationSpec](#postgresql-k8s-enterprisedb-io-v1-PublicationSpec) + +

PublicationTarget is what this publication should publish

+ + + + + + + + + + + +
FieldDescription
allTables
+bool +
+

Marks the publication as one that replicates changes for all tables +in the database, including tables created in the future. +Corresponding to FOR ALL TABLES in PostgreSQL.

+
objects
+[]PublicationTargetObject +
+

Just the following schema objects

+
+ +
+ +## PublicationTargetObject + +**Appears in:** + +- [PublicationTarget](#postgresql-k8s-enterprisedb-io-v1-PublicationTarget) + +

PublicationTargetObject is an object to publish

+ + + + + + + + + + + +
FieldDescription
tablesInSchema
+string +
+

Marks the publication as one that replicates changes for all tables +in the specified list of schemas, including tables created in the +future. Corresponding to FOR TABLES IN SCHEMA in PostgreSQL.

+
table
+PublicationTargetTable +
+

Specifies a list of tables to add to the publication. Corresponding +to FOR TABLE in PostgreSQL.

+
+ +
+ +## PublicationTargetTable + +**Appears in:** + +- [PublicationTargetObject](#postgresql-k8s-enterprisedb-io-v1-PublicationTargetObject) + +

PublicationTargetTable is a table to publish

+ + + + + + + + + + + + + + + + + +
FieldDescription
only
+bool +
+

Whether to limit to the table only or include all its descendants

+
name [Required]
+string +
+

The table name

+
schema
+string +
+

The schema name

+
columns
+[]string +
+

The columns to publish

+
+ +
+ +## RecoveryTarget + +**Appears in:** + +- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery) + +

RecoveryTarget allows to configure the moment where the recovery process +will stop. All the target options except TargetTLI are mutually exclusive.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
backupID
+string +
+

The ID of the backup from which to start the recovery process. +If empty (default) the operator will automatically detect the backup +based on targetTime or targetLSN if specified. Otherwise use the +latest available backup in chronological order.

+
targetTLI
+string +
+

The target timeline ("latest" or a positive integer)

+
targetXID
+string +
+

The target transaction ID

+
targetName
+string +
+

The target name (to be previously created +with pg_create_restore_point)

+
targetLSN
+string +
+

The target LSN (Log Sequence Number)

+
targetTime
+string +
+

The target time as a timestamp in the RFC3339 standard

+
targetImmediate
+bool +
+

End recovery as soon as a consistent state is reached

+
exclusive
+bool +
+

Set the target to be exclusive. If omitted, defaults to false, so that +in Postgres, recovery_target_inclusive will be true

+
+ +
+ +## ReplicaClusterConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ReplicaClusterConfiguration encapsulates the configuration of a replica +cluster

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
self
+string +
+

Self defines the name of this cluster. It is used to determine if this is a primary +or a replica cluster, comparing it with primary +Only available in 1.25 or later

+
primary
+string +
+

Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the +topology specified in externalClusters +Only available in 1.25 or later

+
source [Required]
+string +
+

The name of the external cluster which is the replication origin

+
enabled
+bool +
+

If replica mode is enabled, this cluster will be a replica of an +existing cluster. Replica cluster can be created from a recovery +object store or via streaming through pg_basebackup. +Refer to the Replica clusters page of the documentation for more information.

+
promotionToken
+string +
+

A demotion token generated by an external cluster used to +check if the promotion requirements are met. +Only available in 1.25 or later

+
minApplyDelay
+meta/v1.Duration +
+

When replica mode is enabled, this parameter allows you to replay +transactions only when the system time is at least the configured +time past the commit time. This provides an opportunity to correct +data loss errors. Note that when this parameter is set, a promotion +token cannot be used. +Only available in 1.25 or later

+
+ +
+ +## ReplicationSlotsConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ReplicationSlotsConfiguration encapsulates the configuration +of replication slots

+ + + + + + + + + + + + + + +
FieldDescription
highAvailability
+ReplicationSlotsHAConfiguration +
+

Replication slots for high availability configuration

+
updateInterval
+int +
+

Standby will update the status of the local replication slots +every updateInterval seconds (default 30).

+
synchronizeReplicas
+SynchronizeReplicasConfiguration +
+

Configures the synchronization of the user defined physical replication slots +Only available in 1.25 or later

+
+ +
+ +## ReplicationSlotsHAConfiguration + +**Appears in:** + +- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration) + +

ReplicationSlotsHAConfiguration encapsulates the configuration +of the replication slots that are automatically managed by +the operator to control the streaming replication connections +with the standby instances for high availability (HA) purposes. +Replication slots are a PostgreSQL feature that makes sure +that PostgreSQL automatically keeps WAL files in the primary +when a streaming client (in this specific case a replica that +is part of the HA cluster) gets disconnected.

+ + + + + + + + + + + +
FieldDescription
enabled
+bool +
+

If enabled (default), the operator will automatically manage replication slots +on the primary instance and use them in streaming replication +connections with all the standby instances that are part of the HA +cluster. If disabled, the operator will not take advantage +of replication slots in streaming connections with the replicas. +This feature also controls replication slots in replica cluster, +from the designated primary to its cascading replicas.

+
slotPrefix
+string +
+

Prefix for replication slots managed by the operator for HA. +It may only contain lower case letters, numbers, and the underscore character. +This can only be set at creation time. By default set to _cnp_.

+
+ +
+ +## RoleConfiguration + +**Appears in:** + +- [ManagedConfiguration](#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration) + +

RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role +with the additional field Ensure specifying whether to ensure the presence or +absence of the role in the database

+

The defaults of the CREATE ROLE command are applied +Reference: https://www.postgresql.org/docs/current/sql-createrole.html

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the role

+
comment
+string +
+

Description of the role

+
ensure
+EnsureOption +
+

Ensure the role is present or absent - defaults to "present"

+
passwordSecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

Secret containing the password of the role (if present) +If null, the password will be ignored unless DisablePassword is set

+
connectionLimit
+int64 +
+

If the role can log in, this specifies how many concurrent +connections the role can make. -1 (the default) means no limit.

+
validUntil
+meta/v1.Time +
+

Date and time after which the role's password is no longer valid. +When omitted, the password will never expire (default).

+
inRoles
+[]string +
+

List of one or more existing roles to which this role will be +immediately added as a new member. Default empty.

+
inherit
+bool +
+

Whether a role "inherits" the privileges of roles it is a member of. +Defaults is true.

+
disablePassword
+bool +
+

DisablePassword indicates that a role's password should be set to NULL in Postgres

+
superuser
+bool +
+

Whether the role is a superuser who can override all access +restrictions within the database - superuser status is dangerous and +should be used only when really needed. You must yourself be a +superuser to create a new superuser. Defaults is false.

+
createdb
+bool +
+

When set to true, the role being defined will be allowed to create +new databases. Specifying false (default) will deny a role the +ability to create databases.

+
createrole
+bool +
+

Whether the role will be permitted to create, alter, drop, comment +on, change the security label for, and grant or revoke membership in +other roles. Default is false.

+
login
+bool +
+

Whether the role is allowed to log in. A role having the login +attribute can be thought of as a user. Roles without this attribute +are useful for managing database privileges, but are not users in +the usual sense of the word. Default is false.

+
replication
+bool +
+

Whether a role is a replication role. A role must have this +attribute (or be a superuser) in order to be able to connect to the +server in replication mode (physical or logical replication) and in +order to be able to create or drop replication slots. A role having +the replication attribute is a very highly privileged role, and +should only be used on roles actually used for replication. Default +is false.

+
bypassrls
+bool +
+

Whether a role bypasses every row-level security (RLS) policy. +Default is false.

+
+ +
+ +## SQLRefs + +**Appears in:** + +- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB) + +

SQLRefs holds references to ConfigMaps or Secrets +containing SQL files. The references are processed in a specific order: +first, all Secrets are processed, followed by all ConfigMaps. +Within each group, the processing order follows the sequence specified +in their respective arrays.

+ + + + + + + + + + + +
FieldDescription
secretRefs
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +
+

SecretRefs holds a list of references to Secrets

+
configMapRefs
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +
+

ConfigMapRefs holds a list of references to ConfigMaps

+
+ +
+ +## ScheduledBackupSpec + +**Appears in:** + +- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup) + +

ScheduledBackupSpec defines the desired state of ScheduledBackup

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
suspend
+bool +
+

If this backup is suspended or not

+
immediate
+bool +
+

If the first backup has to be immediately start after creation or not

+
schedule [Required]
+string +
+

The schedule does not follow the same format used in Kubernetes CronJobs +as it includes an additional seconds specifier, +see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format

+
cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +
+

The cluster to backup

+
backupOwnerReference
+string +
+

Indicates which ownerReference should be put inside the created backup resources.

+
    +
  • none: no owner reference for created backup objects (same behavior as before the field was introduced)
  • +
  • self: sets the Scheduled backup object as owner of the backup
  • +
  • cluster: set the cluster as owner of the backup
  • +
+
target
+BackupTarget +
+

The policy to decide which instance should perform this backup. If empty, +it defaults to cluster.spec.backup.target. +Available options are empty string, primary and prefer-standby. +primary to have backups run always on primary instances, +prefer-standby to have backups run preferably on the most updated +standby, if available.

+
method
+BackupMethod +
+

The backup method to be used, possible options are barmanObjectStore, +volumeSnapshot or plugin. Defaults to: barmanObjectStore.

+
pluginConfiguration
+BackupPluginConfiguration +
+

Configuration parameters passed to the plugin managing this backup +Only available in 1.25 or later

+
online
+bool +
+

Whether the default type of backup with volume snapshots is +online/hot (true, default) or offline/cold (false) +Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'

+
onlineConfiguration
+OnlineConfiguration +
+

Configuration parameters to control the online/hot backup with volume snapshots +Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza

+
+ +
+ +## ScheduledBackupStatus + +**Appears in:** + +- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup) + +

ScheduledBackupStatus defines the observed state of ScheduledBackup

+ + + + + + + + + + + + + + +
FieldDescription
lastCheckTime
+meta/v1.Time +
+

The latest time the schedule

+
lastScheduleTime
+meta/v1.Time +
+

Information when was the last time that backup was successfully scheduled.

+
nextScheduleTime
+meta/v1.Time +
+

Next time we will run a backup

+
+ +
+ +## SecretVersion + +**Appears in:** + +- [PgBouncerSecrets](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSecrets) + +- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets) + +

SecretVersion contains a secret name and its ResourceVersion

+ + + + + + + + + + + +
FieldDescription
name
+string +
+

The name of the secret

+
version
+string +
+

The ResourceVersion of the secret

+
+ +
+ +## SecretsResourceVersion + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

SecretsResourceVersion is the resource versions of the secrets +managed by the operator

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
superuserSecretVersion
+string +
+

The resource version of the "postgres" user secret

+
replicationSecretVersion
+string +
+

The resource version of the "streaming_replica" user secret

+
applicationSecretVersion
+string +
+

The resource version of the "app" user secret

+
managedRoleSecretVersion
+map[string]string +
+

The resource versions of the managed roles secrets

+
caSecretVersion
+string +
+

Unused. Retained for compatibility with old versions.

+
clientCaSecretVersion
+string +
+

The resource version of the PostgreSQL client-side CA secret version

+
serverCaSecretVersion
+string +
+

The resource version of the PostgreSQL server-side CA secret version

+
serverSecretVersion
+string +
+

The resource version of the PostgreSQL server-side secret version

+
barmanEndpointCA
+string +
+

The resource version of the Barman Endpoint CA if provided

+
externalClusterSecretVersion
+map[string]string +
+

The resource versions of the external cluster secrets

+
metrics
+map[string]string +
+

A map with the versions of all the secrets used to pass metrics. +Map keys are the secret names, map values are the versions

+
+ +
+ +## ServiceAccountTemplate + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ServiceAccountTemplate contains the template needed to generate the service accounts

+ + + + + + + + +
FieldDescription
metadata [Required]
+Metadata +
+

Metadata are the metadata to be used for the generated +service account

+
+ +
+ +## ServiceSelectorType + +(Alias of `string`) + +**Appears in:** + +- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService) + +- [ManagedServices](#postgresql-k8s-enterprisedb-io-v1-ManagedServices) + +

ServiceSelectorType describes a valid value for generating the service selectors. +It indicates which type of service the selector applies to, such as read-write, read, or read-only

+ +
+ +## ServiceTemplateSpec + +**Appears in:** + +- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService) + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

ServiceTemplateSpec is a structure allowing the user to set +a template for Service generation.

+ + + + + + + + + + + +
FieldDescription
metadata
+Metadata +
+

Standard object's metadata. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

+
spec
+core/v1.ServiceSpec +
+

Specification of the desired behavior of the service. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## ServiceUpdateStrategy + +(Alias of `string`) + +**Appears in:** + +- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService) + +

ServiceUpdateStrategy describes how the changes to the managed service should be handled

+ +
+ +## SnapshotOwnerReference + +(Alias of `string`) + +**Appears in:** + +- [VolumeSnapshotConfiguration](#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration) + +

SnapshotOwnerReference defines the reference type for the owner of the snapshot. +This specifies which owner the processed resources should relate to.

+ +
+ +## SnapshotType + +(Alias of `string`) + +**Appears in:** + +- [Import](#postgresql-k8s-enterprisedb-io-v1-Import) + +

SnapshotType is a type of allowed import

+ +
+ +## StorageConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +- [TablespaceConfiguration](#postgresql-k8s-enterprisedb-io-v1-TablespaceConfiguration) + +

StorageConfiguration is the configuration used to create and reconcile PVCs, +usable for WAL volumes, PGDATA volumes, or tablespaces

+ + + + + + + + + + + + + + + + + +
FieldDescription
storageClass
+string +
+

StorageClass to use for PVCs. Applied after +evaluating the PVC template, if available. +If not specified, the generated PVCs will use the +default storage class

+
size
+string +
+

Size of the storage. Required if not already specified in the PVC template. +Changes to this field are automatically reapplied to the created PVCs. +Size cannot be decreased.

+
resizeInUseVolumes
+bool +
+

Resize existent PVCs, defaults to true

+
pvcTemplate
+core/v1.PersistentVolumeClaimSpec +
+

Template to be used to generate the Persistent Volume Claim

+
+ +
+ +## SubscriptionReclaimPolicy + +(Alias of `string`) + +**Appears in:** + +- [SubscriptionSpec](#postgresql-k8s-enterprisedb-io-v1-SubscriptionSpec) + +

SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.

+ +
+ +## SubscriptionSpec + +**Appears in:** + +- [Subscription](#postgresql-k8s-enterprisedb-io-v1-Subscription) + +

SubscriptionSpec defines the desired state of Subscription

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "subscriber"

+
name [Required]
+string +
+

The name of the subscription inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "subscriber" cluster

+
parameters
+map[string]string +
+

Subscription parameters part of the WITH clause as expected by +PostgreSQL CREATE SUBSCRIPTION command

+
publicationName [Required]
+string +
+

The name of the publication inside the PostgreSQL database in the +"publisher"

+
publicationDBName
+string +
+

The name of the database containing the publication on the external +cluster. Defaults to the one in the external cluster definition.

+
externalClusterName [Required]
+string +
+

The name of the external cluster with the publication ("publisher")

+
subscriptionReclaimPolicy
+SubscriptionReclaimPolicy +
+

The policy for end-of-life maintenance of this subscription

+
+ +
+ +## SubscriptionStatus + +**Appears in:** + +- [Subscription](#postgresql-k8s-enterprisedb-io-v1-Subscription) + +

SubscriptionStatus defines the observed state of Subscription

+ + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the subscription was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ +
+ +## SwitchReplicaClusterStatus + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster

+ + + + + + + + +
FieldDescription
inProgress
+bool +
+

InProgress indicates if there is an ongoing procedure of switching a cluster to a replica cluster.

+
+ +
+ +## SyncReplicaElectionConstraints + +**Appears in:** + +- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration) + +

SyncReplicaElectionConstraints contains the constraints for sync replicas election.

+

For anti-affinity parameters two instances are considered in the same location +if all the labels values match.

+

In future synchronous replica election restriction by name will be supported.

+ + + + + + + + + + + +
FieldDescription
nodeLabelsAntiAffinity
+[]string +
+

A list of node labels values to extract and compare to evaluate if the pods reside in the same topology or not

+
enabled [Required]
+bool +
+

This flag enables the constraints for sync replicas

+
+ +
+ +## SynchronizeReplicasConfiguration + +**Appears in:** + +- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration) + +

SynchronizeReplicasConfiguration contains the configuration for the synchronization of user defined +physical replication slots

+ + + + + + + + + + + +
FieldDescription
enabled [Required]
+bool +
+

When set to true, every replication slot that is on the primary is synchronized on each standby

+
excludePatterns
+[]string +
+

List of regular expression patterns to match the names of replication slots to be excluded (by default empty)

+
+ +
+ +## SynchronousReplicaConfiguration + +**Appears in:** + +- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration) + +

SynchronousReplicaConfiguration contains the configuration of the +PostgreSQL synchronous replication feature. +Important: at this moment, also .spec.minSyncReplicas and .spec.maxSyncReplicas +need to be considered.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
method [Required]
+SynchronousReplicaConfigurationMethod +
+

Method to select synchronous replication standbys from the listed +servers, accepting 'any' (quorum-based synchronous replication) or +'first' (priority-based synchronous replication) as values.

+
number [Required]
+int +
+

Specifies the number of synchronous standby servers that +transactions must wait for responses from.

+
maxStandbyNamesFromCluster
+int +
+

Specifies the maximum number of local cluster pods that can be +automatically included in the synchronous_standby_names option in +PostgreSQL.

+
standbyNamesPre
+[]string +
+

A user-defined list of application names to be added to +synchronous_standby_names before local cluster pods (the order is +only useful for priority-based synchronous replication).

+
standbyNamesPost
+[]string +
+

A user-defined list of application names to be added to +synchronous_standby_names after local cluster pods (the order is +only useful for priority-based synchronous replication).

+
dataDurability
+DataDurabilityLevel +
+

If set to "required", data durability is strictly enforced. Write operations +with synchronous commit settings (on, remote_write, or remote_apply) will +block if there are insufficient healthy replicas, ensuring data persistence. +If set to "preferred", data durability is maintained when healthy replicas +are available, but the required number of instances will adjust dynamically +if replicas become unavailable. This setting relaxes strict durability enforcement +to allow for operational continuity. This setting is only applicable if both +standbyNamesPre and standbyNamesPost are unset (empty).

+
+ +
+ +## SynchronousReplicaConfigurationMethod + +(Alias of `string`) + +**Appears in:** + +- [SynchronousReplicaConfiguration](#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration) + +

SynchronousReplicaConfigurationMethod configures whether to use +quorum based replication or a priority list

+ +
+ +## TDEConfiguration + +**Appears in:** + +- [EPASConfiguration](#postgresql-k8s-enterprisedb-io-v1-EPASConfiguration) + +

TDEConfiguration contains the Transparent Data Encryption configuration

+ + + + + + + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
+

True if we want to have TDE enabled

+
secretKeyRef
+core/v1.SecretKeySelector +
+

Reference to the secret that contains the encryption key

+
wrapCommand
+core/v1.SecretKeySelector +
+

WrapCommand is the encrypt command provided by the user

+
unwrapCommand
+core/v1.SecretKeySelector +
+

UnwrapCommand is the decryption command provided by the user

+
passphraseCommand
+core/v1.SecretKeySelector +
+

PassphraseCommand is the command executed to get the passphrase that will be +passed to the OpenSSL command to encrypt and decrypt

+
+ +
+ +## TablespaceConfiguration + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

TablespaceConfiguration is the configuration of a tablespace, and includes +the storage specification for the tablespace

+ + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

The name of the tablespace

+
storage [Required]
+StorageConfiguration +
+

The storage configuration for the tablespace

+
owner
+DatabaseRoleRef +
+

Owner is the PostgreSQL user owning the tablespace

+
temporary
+bool +
+

When set to true, the tablespace will be added as a temp_tablespaces +entry in PostgreSQL, and will be available to automatically house temp +database objects, or other temporary files. Please refer to PostgreSQL +documentation for more information on the temp_tablespaces GUC.

+
+ +
+ +## TablespaceState + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

TablespaceState represents the state of a tablespace in a cluster

+ + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the name of the tablespace

+
owner
+string +
+

Owner is the PostgreSQL user owning the tablespace

+
state [Required]
+TablespaceStatus +
+

State is the latest reconciliation state

+
error
+string +
+

Error is the reconciliation error, if any

+
+ +
+ +## TablespaceStatus + +(Alias of `string`) + +**Appears in:** + +- [TablespaceState](#postgresql-k8s-enterprisedb-io-v1-TablespaceState) + +

TablespaceStatus represents the status of a tablespace in the cluster

+ +
+ +## Topology + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

Topology contains the cluster topology

+ + + + + + + + + + + + + + +
FieldDescription
instances
+map[PodName]PodTopologyLabels +
+

Instances contains the pod topology of the instances

+
nodesUsed
+int32 +
+

NodesUsed represents the count of distinct nodes accommodating the instances. +A value of '1' suggests that all instances are hosted on a single node, +implying the absence of High Availability (HA). Ideally, this value should +be the same as the number of instances in the Postgres HA cluster, implying +shared nothing architecture on the compute side.

+
successfullyExtracted
+bool +
+

SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors +in synchronous replica election in case of failures

+
+ +
+ +## VolumeSnapshotConfiguration + +**Appears in:** + +- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration) + +

VolumeSnapshotConfiguration represents the configuration for the execution of snapshot backups.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
labels
+map[string]string +
+

Labels are key-value pairs that will be added to .metadata.labels snapshot resources.

+
annotations
+map[string]string +
+

Annotations key-value pairs that will be added to .metadata.annotations snapshot resources.

+
className
+string +
+

ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. +It is the default class for the other types if no specific class is present

+
walClassName
+string +
+

WalClassName specifies the Snapshot Class to be used for the PG_WAL PersistentVolumeClaim.

+
tablespaceClassName
+map[string]string +
+

TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. +defaults to the PGDATA Snapshot Class, if set

+
snapshotOwnerReference
+SnapshotOwnerReference +
+

SnapshotOwnerReference indicates the type of owner reference the snapshot should have

+
online
+bool +
+

Whether the default type of backup with volume snapshots is +online/hot (true, default) or offline/cold (false)

+
onlineConfiguration
+OnlineConfiguration +
+

Configuration parameters to control the online/hot backup with volume snapshots

+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx index db307c2dd53..a22200c5352 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx @@ -3,6 +3,8 @@ title: 'PostgreSQL Configuration' originalFilePath: 'src/postgresql_conf.md' --- + + Users that are familiar with PostgreSQL are aware of the existence of the following three files to configure an instance: @@ -71,7 +73,7 @@ operator by applying the following sections in this order: The **global default parameters** are: ```text -archive_mode = 'on' +archive_timeout = '5min' dynamic_shared_memory_type = 'posix' full_page_writes = 'on' logging_collector = 'on' @@ -84,9 +86,11 @@ log_truncate_on_rotation = 'false' max_parallel_workers = '32' max_replication_slots = '32' max_worker_processes = '32' -shared_memory_type = 'mmap' # for PostgreSQL >= 12 only -wal_keep_size = '512MB' # for PostgreSQL >= 13 only -wal_keep_segments = '32' # for PostgreSQL <= 12 only +shared_memory_type = 'mmap' +shared_preload_libraries = '' +ssl_max_protocol_version = 'TLSv1.3' +ssl_min_protocol_version = 'TLSv1.3' +wal_keep_size = '512MB' wal_level = 'logical' wal_log_hints = 'on' wal_sender_timeout = '5s' diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx index 52c77281de1..f51d106b0c1 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx @@ -3,6 +3,8 @@ title: 'Quickstart' originalFilePath: 'src/quickstart.md' --- + + This section guides you through testing a PostgreSQL cluster on your local machine by deploying EDB Postgres for Kubernetes on a local Kubernetes cluster using either [Kind](https://kind.sigs.k8s.io/) or @@ -220,7 +222,7 @@ In this section we show how to deploy Prometheus and Grafana for observability, and how to create a Grafana Dashboard to monitor EDB Postgres for Kubernetes clusters, and a set of Prometheus Rules defining alert conditions. -We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), +We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart, which is maintained by the [Prometheus Community](https://github.com/prometheus-community). Please refer to the project website for additional documentation and background. @@ -238,7 +240,8 @@ If you don't have [Helm](https://helm.sh) installed yet, please follow the system. We need to add the `prometheus-community` helm chart repository, and then -install the *Kube Prometheus stack* using the sample configuration we provide: +install the *Kube Prometheus stack* with our sample configuration +[`kube-stack-config.yaml`](.././samples/monitoring/kube-stack-config.yaml). We can accomplish this with the following commands: @@ -252,16 +255,17 @@ helm upgrade --install \ prometheus-community/kube-prometheus-stack ``` -After completion, you will have Prometheus, Grafana and Alert Manager installed with values from the -`kube-stack-config.yaml` file: +After completion, you will have Prometheus, Grafana, and Alert Manager, +configured with the `kube-stack-config.yaml` file: -- From the Prometheus installation, you will have the Prometheus Operator watching for **any** `PodMonitor` - (see [*monitoring*](monitoring.md)). -- The Grafana installation will be watching for a Grafana dashboard `ConfigMap`. +- From the Prometheus installation, you will have the Prometheus Operator + watching for **any** `PodMonitor` (see [*monitoring*](monitoring.md)). +- Alert Manager and Grafana are both enabled. !!! Seealso - For further information about the above command, refer to the [helm install](https://helm.sh/docs/helm/helm_install/) - documentation. + For further information about the above helm commands, refer to the [helm + install](https://helm.sh/docs/helm/helm_install/) + documentation. You can see several Custom Resources have been created: @@ -290,7 +294,7 @@ prometheus-community-kube-prometheus ClusterIP 9090/TCP ### Viewing with Prometheus -At this point, a EDB Postgres for Kubernetes cluster deployed with Monitoring activated +At this point, a EDB Postgres for Kubernetes cluster deployed with monitoring activated would be observable via Prometheus. For example, you could deploy a simple cluster with `PodMonitor` enabled: @@ -321,13 +325,15 @@ kubectl port-forward svc/prometheus-community-kube-prometheus 9090 Then access the Prometheus console locally at: [`http://localhost:9090/`](http://localhost:9090/) -Assuming that the monitoring stack was successfully deployed, and you have a Cluster with `enablePodMonitor: true`, -you should find a series of metrics relating to EDB Postgres for Kubernetes clusters. Again, please -refer to the [*monitoring section*](monitoring.md) for more information. +You should find a series of metrics relating to EDB Postgres for Kubernetes clusters. +Please refer to the [monitoring section](monitoring.md) for more information. ![local prometheus](images/prometheus-local.png) -You can now define some alerts by creating a `prometheusRule`: +You can also monitor the EDB Postgres for Kubernetes operator by creating a PodMonitor to +target it. See the relevant section in the [monitoring page](monitoring.md#monitoring-the-operator-with-prometheus). + +You can define some alerts by creating a `prometheusRule`: ```sh kubectl apply -f \ @@ -347,28 +353,32 @@ we just installed. ### Grafana Dashboard -In our "plain" installation, Grafana is deployed with no predefined dashboards. +In our installation so far, Grafana is deployed with no predefined dashboards. -You can port-forward: +To open Grafana, you can port-forward the grafana service: ```sh kubectl port-forward svc/prometheus-community-grafana 3000:80 ``` -And access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) -providing the credentials `admin` as username, `prom-operator` as password (defined in `kube-stack-config.yaml`). +and access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) +providing the credentials `admin` as username, `prom-operator` as password +(defined in `kube-stack-config.yaml`). -EDB Postgres for Kubernetes provides a default dashboard for Grafana as part of the official -[Helm chart](https://github.com/cloudnative-pg/charts). You can also download the +EDB Postgres for Kubernetes provides a default dashboard for Grafana in the dedicated +[`grafana-dashboards` repository](https://github.com/cloudnative-pg/grafana-dashboards). +You can download the file [grafana-dashboard.json](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) -file and manually importing it via the GUI. +and manually import it via the GUI (menu: Dashboards > New > Import). +You can now click on the `EDB Postgres for Kubernetes` dashboard just created: + +![local grafana](images/grafana-local.png) !!! Warning Some graphs in the previous dashboard make use of metrics that are in alpha stage by the time this was created, like `kubelet_volume_stats_available_bytes` and `kubelet_volume_stats_capacity_bytes` producing some graphs to show `No data`. -![local grafana](images/grafana-local.png) - -Note that in our local setup, Prometheus and Grafana are configured to automatically discover -and monitor any EDB Postgres for Kubernetes clusters deployed with the Monitoring feature enabled. +Note that in our local setup, Prometheus and Grafana are configured to +automatically discover and monitor any EDB Postgres for Kubernetes clusters deployed with the +Monitoring feature enabled. diff --git a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx index 090f8f1dfaf..9acff98cb77 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx @@ -3,6 +3,8 @@ title: 'Recovery' originalFilePath: 'src/recovery.md' --- + + In PostgreSQL terminology, recovery is the process of starting a PostgreSQL instance using an existing backup. The PostgreSQL recovery mechanism is very solid and rich. It also supports point-in-time recovery (PITR), which allows diff --git a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx index 51dd6a5e3b7..e7a31c602a5 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx @@ -3,9 +3,14 @@ title: 'Replica clusters' originalFilePath: 'src/replica_cluster.md' --- -A replica cluster is a EDB Postgres for Kubernetes `Cluster` resource designed to -replicate data from another PostgreSQL instance, ideally also managed by -EDB Postgres for Kubernetes. + + +A replica cluster is an independent EDB Postgres for Kubernetes `Cluster` resource that has +the main characteristic to be in replica from another Postgres instance, +ideally also managed by EDB Postgres for Kubernetes. Normally, a replica cluster is in another +Kubernetes cluster in another region. Replica clusters can be cascading too, +and they can solely rely on object stores for replication of the data from +the source, as described further down. Typically, a replica cluster is deployed in a different Kubernetes cluster in another region. These clusters can be configured to perform cascading diff --git a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx index 4618aa50d7c..2478eacfdbe 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx @@ -3,6 +3,8 @@ title: 'Replication' originalFilePath: 'src/replication.md' --- + + Physical replication is one of the strengths of PostgreSQL and one of the reasons why some of the largest organizations in the world have chosen it for the management of their data in business continuity contexts. Primarily used to diff --git a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx index a516d7459ae..e91e1808e93 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx @@ -3,6 +3,8 @@ title: 'Resource management' originalFilePath: 'src/resource_management.md' --- + + In a typical Kubernetes cluster, pods run with unlimited resources. By default, they might be allowed to use as much CPU and RAM as needed. diff --git a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx index 7268cc5eb0f..6c9c41ca1c9 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx @@ -3,6 +3,8 @@ title: 'Rolling Updates' originalFilePath: 'src/rolling_update.md' --- + + The operator allows changing the PostgreSQL version used in a cluster while applications are running against it. diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx index 6015c1764ca..2a03ee73bef 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx @@ -3,6 +3,8 @@ title: 'Examples' originalFilePath: 'src/samples.md' --- + + The examples show configuration files for setting up your PostgreSQL cluster. diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml index 8d064fdeaad..0bac0237f89 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:17.4 + imageName: quay.io/enterprisedb/postgresql:17.5 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/k9s/plugins.yml b/product_docs/docs/postgres_for_kubernetes/1/samples/k9s/plugins.yml index 76974204e1b..2ab7984926a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/k9s/plugins.yml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/k9s/plugins.yml @@ -26,7 +26,7 @@ plugins: background: false args: - -c - - "kubectl cnp backup $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp backup $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-hibernate-status: shortCut: h description: Hibernate status @@ -36,7 +36,7 @@ plugins: background: false args: - -c - - "kubectl cnp hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-hibernate: shortCut: Shift-H description: Hibernate @@ -47,7 +47,7 @@ plugins: background: false args: - -c - - "kubectl cnp hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-hibernate-off: shortCut: Shift-H description: Wake up hibernated cluster in this namespace @@ -58,7 +58,7 @@ plugins: background: false args: - -c - - "kubectl cnp hibernate off $NAME -n $NAME --context \"$CONTEXT\" |& less -R" + - "kubectl cnp hibernate off $NAME -n $NAME --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-logs: shortCut: l description: Logs @@ -89,7 +89,7 @@ plugins: background: false args: - -c - - "kubectl cnp reload $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp reload $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-restart: shortCut: Shift-R description: Restart @@ -100,7 +100,7 @@ plugins: background: false args: - -c - - "kubectl cnp restart $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp restart $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-status: shortCut: s description: Status @@ -110,7 +110,7 @@ plugins: background: false args: - -c - - "kubectl cnp status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnp status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" postgresql-operator-status-verbose: shortCut: Shift-S description: Status (verbose) @@ -120,4 +120,4 @@ plugins: background: false args: - -c - - "kubectl cnp status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose |& less -R" + - "kubectl cnp status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose 2>&1 | less -R" diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/kube-stack-config.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/kube-stack-config.yaml index 68c0885fbb3..58df3e420ae 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/kube-stack-config.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/kube-stack-config.yaml @@ -1,9 +1,6 @@ -# Default values for cnp-sandbox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. # -# -# Copyright The CloudNativePG Contributors +# Copyright © contributors to EDB Postgres for Kubernetes, established as +# EDB Postgres for Kubernetes a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# # -- here you can pass the whole values directly to the kube-prometheus-stack chart enabled: true diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx index c64bc7cc0af..bf8dbb0e63f 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx @@ -3,6 +3,8 @@ title: 'Scheduling' originalFilePath: 'src/scheduling.md' --- + + Scheduling, in Kubernetes, is the process responsible for placing a new pod on the best node possible, based on several criteria. @@ -43,7 +45,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:17.4 + imageName: quay.io/enterprisedb/postgresql:17.5 affinity: enablePodAntiAffinity: true # Default value diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx index e0b3e05ce74..a1503825f14 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx @@ -3,6 +3,8 @@ title: 'Client TLS/SSL connections' originalFilePath: 'src/ssl_connections.md' --- + + !!! Seealso "Certificates" See [Certificates](certificates.md) for more details on how EDB Postgres for Kubernetes supports TLS certificates. @@ -176,7 +178,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 17.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.5 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx index f526be3575d..0cb2288fa01 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx @@ -3,6 +3,8 @@ title: 'Storage' originalFilePath: 'src/storage.md' --- + + Storage is the most critical component in a database workload. Storage must always be available, scale, perform well, and guarantee consistency and durability. The same expectations and diff --git a/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx b/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx index e54ce48a2b9..0a7da9f7c47 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx @@ -3,6 +3,8 @@ title: 'Tablespaces' originalFilePath: 'src/tablespaces.md' --- + + A tablespace is a robust and widely embraced feature in database management systems. It offers a powerful means to enhance the vertical scalability of a database by decoupling the physical and logical modeling of @@ -149,11 +151,13 @@ spec: size: 10Gi tablespaces: - name: current - size: 100Gi - storageClass: fastest + storage: + size: 100Gi + storageClass: fastest - name: this_year - size: 500Gi - storageClass: balanced + storage: + size: 500Gi + storageClass: balanced ``` The `yardbirds` cluster example requests 4 persistent volume claims using diff --git a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx index 7794075710b..32cf6dbf65a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx @@ -183,7 +183,7 @@ Then, create a secret containing the custom wrap/unwrap commands. The wrap and unwrap commands will 'wrap' a binary that is in the EPAS image. The binary will interact with the vault API to encrypt/decrypt the EPAS encryption. -The binary needs 4 flags: `--host`, `--secret`, `--key` and `--vault-endpoint`. The +The binary needs 5 flags: `--file`, `--host`, `--secret`, `--key` and `--vault-endpoint`. The `--host` flag is in the format of `http://vault-host:vault-port` and needs to be provided to reach the Vault. The server`--secret` flag is the name of the Kubernetes secret that contains the vault token and the `--key` flag is the key in that secret @@ -227,3 +227,69 @@ spec: name: vault-token key: unwrap ``` + +### Enable TLS + +The binary needs 6 flags: `--file`, `--host`, `--secret`, `--key`, `--vault-endpoint` and `--enable-tls`. +The `--host` flag is in the format of `http://vault-host:vault-port` and needs to be +provided to reach the Vault. The server`--secret` flag is the name of the Kubernetes +secret that contains the vault token and the `--key` flag is the key in that secret +pointing the vault token. The `--vault-endpoint` flag is the name of the key that +was created inside vault; in the example above it is `pg-tde`. +To enable TLS, `--enable-tls` must be set to `true`. + +```shell +kubectl create secret generic -o yaml vault-token \ + --from-literal=wrap="/bin/vault wrap --file %p --host http://vault:8200 --secret vault-token --key token --vault-endpoint pg-tde --enable-tls true" \ + --from-literal=unwrap="/bin/vault unwrap --file %p --host http://vault:8200 --secret vault-token --key token --vault-endpoint pg-tde --enable-tls true" \ + --from-literal=token="hvs.whatever" +``` + +Then, specify the environment variables in a secret or configMap. + +```shell +kubectl create secret generic -o yaml env-var-secret \ +--from-literal=VAULT_CACERT="/projected/certificate/vault-ca.pem" \ +``` + +Reference the secret in the Cluster spec `envFrom` section, +so that the binary can use the environment variables, as they will be injected in the pods. +If one or more environment variables refers to files, rely on the `projectedVolumeTemplate` +to mount custom files, and on secrets or configmaps for their contents. +Following the example, the values of the `tls-vault-secret` key `ca` +is mounted as file into the path `/projected/certificate/vault-ca.pem`. + +```shell +kubectl create secret generic -o yaml tls-vault-secret --from-file=ca=vault-ca.crt +``` + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: hashicorp-vault-tde +spec: + envFrom: + - secretRef: + name: env-var-secret + projectedVolumeTemplate: + sources: + - secret: + name: tls-vault-secret + items: + - key: ca + path: certificate/vault-ca.pem + instances: 3 + storage: + size: 1Gi + postgresql: + epas: + tde: + enabled: true + wrapCommand: + name: vault-token + key: wrap + unwrapCommand: + name: vault-token + key: unwrap +``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx index a98d2ccf8d3..55021ecfeb1 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx @@ -3,6 +3,8 @@ title: 'Troubleshooting' originalFilePath: 'src/troubleshooting.md' --- + + In this page, you can find some basic information on how to troubleshoot EDB Postgres for Kubernetes in your Kubernetes cluster deployment. @@ -223,7 +225,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: quay.io/enterprisedb/postgresql:17.4-3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:17.5-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -291,7 +293,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: quay.io/enterprisedb/postgresql:17.4-3 + Image Name: quay.io/enterprisedb/postgresql:17.5-3 ``` !!! Note diff --git a/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx index 813300a5cec..1a854aa30b1 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx @@ -3,6 +3,8 @@ title: 'Use cases' originalFilePath: 'src/use_cases.md' --- + + EDB Postgres for Kubernetes has been designed to work with applications that reside in the same Kubernetes cluster, for a full cloud native experience. diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx index 7de3864c39d..4179b909d3e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx @@ -3,6 +3,8 @@ title: 'WAL archiving' originalFilePath: 'src/wal_archiving.md' --- + + WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) in EDB Postgres for Kubernetes.