diff --git a/Dockerfile b/Dockerfile index b256ae48..3c9e99a5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} GO111MODULE=on go FROM registry.access.redhat.com/ubi9/ubi-minimal:latest # Version of Operator (build arg) -ARG VERSION="4.0.2" +ARG VERSION="4.1.0-preview" # User to run container as ARG USER="root" diff --git a/Jenkinsfile b/Jenkinsfile index 284be3f5..978fc23a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -240,13 +240,13 @@ boolean isNightly() { } String getVersion() { - def prefix = "4.0.2" + def prefix = "4.1.0-preview" def candidateName = "" if(isNightly()) { def timestamp = new Date().format("yyyy-MM-dd") candidateName = "nightly-${timestamp}" } else { - candidateName = "candidate-${env.BRANCH_NAME}" + candidateName = "${env.BRANCH_NAME}" } def candidateNameMax = 30 - prefix.length() diff --git a/Makefile b/Makefile index b50e8aea..eb338048 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ OPENSHIFT_VERSION="v4.10" # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) # TODO: Version must be pulled from git tags -VERSION ?= 4.0.2 +VERSION ?= 4.1.0-preview # Platforms supported PLATFORMS ?= linux/amd64,linux/arm64 @@ -313,7 +313,7 @@ submodules: ## Pull and update git submodules recursively # Generate bundle manifests and metadata, then validate generated files. # For OpenShift bundles run -# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.10 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:4.0.2 make bundle +# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.10 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:4.1.0-preview make bundle .PHONY: bundle bundle: manifests kustomize operator-sdk rm -rf $(ROOT_DIR)/bundle.Dockerfile $(BUNDLE_DIR) diff --git a/README.md b/README.md index a1ebf4cf..c6f7e64a 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ Run the following command with the appropriate name and version for the operator ```sh IMAGE_TAG_BASE=aerospike/aerospike-kubernetes-operator-nightly -VERSION=4.0.2 +VERSION=4.1.0-preview make docker-buildx IMG=${IMAGE_TAG_BASE}:${VERSION} PLATFORMS=linux/amd64 ``` **Note**: Change `PLATFORMS` var as per host machine or remove it to build multi-arch image @@ -96,7 +96,7 @@ Set up the environment with image names. ```shell export ACCOUNT=aerospike export IMAGE_TAG_BASE=${ACCOUNT}/aerospike-kubernetes-operator -export VERSION=4.0.2 +export VERSION=4.1.0-preview export IMG=docker.io/${IMAGE_TAG_BASE}-nightly:${VERSION} export BUNDLE_IMG=docker.io/${IMAGE_TAG_BASE}-bundle-nightly:${VERSION} export CATALOG_IMG=docker.io/${IMAGE_TAG_BASE}-catalog-nightly:${VERSION} diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index 6d01b1af..5bb362dc 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -739,8 +739,8 @@ type PersistentVolumeSpec struct { //nolint:govet // for readability // Size of volume. Size resource.Quantity `json:"size"` - // Name for creating PVC for this volume, Name or path should be given - // Name string `json:"name"` + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes // +optional AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` //nolint:lll // for readability @@ -812,6 +812,11 @@ type AerospikeStorageSpec struct { //nolint:govet // for readability // +optional LocalStorageClasses []string `json:"localStorageClasses,omitempty"` + // DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + // by AKO. It only considers local storage classes given in the localStorageClasses field. + // +optional + DeleteLocalStorageOnRestart *bool `json:"deleteLocalStorageOnRestart,omitempty"` + // Volumes list to attach to created pods. // +patchMergeKey=name // +patchStrategy=merge @@ -1206,7 +1211,7 @@ type AerospikePodStatus struct { //nolint:govet // for readability // AerospikeCluster is the schema for the AerospikeCluster API // +operator-sdk:csv:customresourcedefinitions:displayName="Aerospike Cluster",resources={{Service, v1},{Pod,v1},{StatefulSet,v1}} -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // //nolint:lll // for readability type AerospikeCluster struct { //nolint:govet // for readability diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 1d768905..84aa46dd 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -734,6 +734,11 @@ func (in *AerospikeStorageSpec) DeepCopyInto(out *AerospikeStorageSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.DeleteLocalStorageOnRestart != nil { + in, out := &in.DeleteLocalStorageOnRestart, &out.DeleteLocalStorageOnRestart + *out = new(bool) + **out = **in + } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]VolumeSpec, len(*in)) diff --git a/api/v1beta1/aerospikebackup_types.go b/api/v1beta1/aerospikebackup_types.go index d3d3af6f..109f8843 100644 --- a/api/v1beta1/aerospikebackup_types.go +++ b/api/v1beta1/aerospikebackup_types.go @@ -86,7 +86,7 @@ type AerospikeBackupStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1beta1/aerospikebackupservice_types.go b/api/v1beta1/aerospikebackupservice_types.go index 2086313f..1dc8ffd9 100644 --- a/api/v1beta1/aerospikebackupservice_types.go +++ b/api/v1beta1/aerospikebackupservice_types.go @@ -161,7 +161,7 @@ type ServiceContainerSpec struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image` // +kubebuilder:printcolumn:name="Service Type",type=string,JSONPath=`.spec.service.type` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/api/v1beta1/aerospikerestore_types.go b/api/v1beta1/aerospikerestore_types.go index 850bd983..1b688471 100644 --- a/api/v1beta1/aerospikerestore_types.go +++ b/api/v1beta1/aerospikerestore_types.go @@ -89,7 +89,7 @@ type AerospikeRestoreStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml index 782448ce..a489bc03 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml index 9cebec8c..e1ff12ca 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index 98ea6c8b..a36d5819 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikeclusters.asdb.aerospike.com spec: @@ -6221,6 +6221,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -6571,8 +6576,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -7822,6 +7827,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8172,8 +8182,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8497,6 +8507,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8844,8 +8859,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -15362,6 +15377,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -15712,8 +15732,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -16963,6 +16983,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -17313,8 +17338,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -17704,6 +17729,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -18051,8 +18081,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array diff --git a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml index 2104c85c..26b12189 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/go.mod b/go.mod index e7adb1f8..3c0db80e 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 + github.com/prometheus/client_golang v1.21.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 golang.org/x/crypto v0.38.0 @@ -73,7 +74,6 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.21.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.16.0 // indirect diff --git a/helm-charts/aerospike-backup-service/Chart.yaml b/helm-charts/aerospike-backup-service/Chart.yaml index 5e58dcb2..77e59dfe 100644 --- a/helm-charts/aerospike-backup-service/Chart.yaml +++ b/helm-charts/aerospike-backup-service/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup-service # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Backup Service Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-backup/Chart.yaml b/helm-charts/aerospike-backup/Chart.yaml index e08e3cc6..f141a688 100644 --- a/helm-charts/aerospike-backup/Chart.yaml +++ b/helm-charts/aerospike-backup/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Backup Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-cluster/Chart.yaml b/helm-charts/aerospike-cluster/Chart.yaml index b2dd79e5..8bc6b2b0 100644 --- a/helm-charts/aerospike-cluster/Chart.yaml +++ b/helm-charts/aerospike-cluster/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-cluster # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Cluster Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/Chart.yaml b/helm-charts/aerospike-kubernetes-operator/Chart.yaml index 9acc974c..377ea5e8 100644 --- a/helm-charts/aerospike-kubernetes-operator/Chart.yaml +++ b/helm-charts/aerospike-kubernetes-operator/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-kubernetes-operator # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Kubernetes Operator icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/README.md b/helm-charts/aerospike-kubernetes-operator/README.md index efdcd663..ddf4eaae 100644 --- a/helm-charts/aerospike-kubernetes-operator/README.md +++ b/helm-charts/aerospike-kubernetes-operator/README.md @@ -37,7 +37,7 @@ helm install aerospike-kubernetes-operator ./aerospike-kubernetes-operator --set |-------------------------------------|-------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| | `replicas` | Number of operator replicas | `2` | | `operatorImage.repository` | Operator image repository | `aerospike/aerospike-kubernetes-operator` | -| `operatorImage.tag` | Operator image tag | `4.0.2` | +| `operatorImage.tag` | Operator image tag | `4.1.0-preview` | | `operatorImage.pullPolicy` | Image pull policy | `IfNotPresent` | | `imagePullSecrets` | Secrets containing credentials to pull Operator image from a private registry | `{}` (nil) | | `rbac.create` | Set this to `true` to let helm chart automatically create RBAC resources necessary for operator | `true` | diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml index 782448ce..a489bc03 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml index 9cebec8c..e1ff12ca 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index 98ea6c8b..a36d5819 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikeclusters.asdb.aerospike.com spec: @@ -6221,6 +6221,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -6571,8 +6576,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -7822,6 +7827,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8172,8 +8182,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8497,6 +8507,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8844,8 +8859,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -15362,6 +15377,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -15712,8 +15732,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -16963,6 +16983,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -17313,8 +17338,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -17704,6 +17729,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -18051,8 +18081,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml index 2104c85c..26b12189 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/values.yaml b/helm-charts/aerospike-kubernetes-operator/values.yaml index 08d1eb8b..c9c8f8a2 100644 --- a/helm-charts/aerospike-kubernetes-operator/values.yaml +++ b/helm-charts/aerospike-kubernetes-operator/values.yaml @@ -4,7 +4,7 @@ replicas: 2 ## Operator image operatorImage: repository: aerospike/aerospike-kubernetes-operator - tag: 4.0.2 + tag: 4.1.0-preview pullPolicy: IfNotPresent ## In case the above image is pulled from a registry that requires diff --git a/helm-charts/aerospike-restore/Chart.yaml b/helm-charts/aerospike-restore/Chart.yaml index 36adf7ba..5ee42e50 100644 --- a/helm-charts/aerospike-restore/Chart.yaml +++ b/helm-charts/aerospike-restore/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-restore # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Restore Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/internal/controller/cluster/pod.go b/internal/controller/cluster/pod.go index 586d72b1..1c98a238 100644 --- a/internal/controller/cluster/pod.go +++ b/internal/controller/cluster/pod.go @@ -268,9 +268,40 @@ func (r *SingleClusterReconciler) rollingRestartPods( return res } + clientPolicy := r.getClientPolicy() + setMigrateFillDelay := r.shouldSetMigrateFillDelay(rackState, podsToRestart, restartTypeMap) + + r.Log.Info( + fmt.Sprintf("Adjust migrate-fill-delay prior to pod restart: %t", setMigrateFillDelay), + ) + + // Revert migrate-fill-delay to the original value before restarting active pods. + // This will be a no-op in the first reconcile + if setMigrateFillDelay { + // Add an optimisation check to only run set MFD in case of cold restart. + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, false, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, + "Failed to set migrate-fill-delay to original value before restarting the running pods") + return res + } + } + if res := r.restartPods(rackState, activePods, restartTypeMap); !res.IsSuccess { return res } + + // Set migrate-fill-delay O to immediately start the migration. Will be reverted back to the original value + // in the next reconcile. + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, true, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to set migrate-fill-delay to `0` after restarting the running pods") + return res + } + } } return common.ReconcileSuccess() @@ -382,7 +413,6 @@ func (r *SingleClusterReconciler) restartPods( restartedPods := make([]*corev1.Pod, 0, len(podsToRestart)) restartedPodNames := make([]string, 0, len(podsToRestart)) restartedASDPodNames := make([]string, 0, len(podsToRestart)) - blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) for idx := range podsToRestart { pod := podsToRestart[idx] @@ -398,10 +428,7 @@ func (r *SingleClusterReconciler) restartPods( restartedASDPodNames = append(restartedASDPodNames, pod.Name) } else if restartType == podRestart { - if blockedK8sNodes.Has(pod.Spec.NodeName) { - r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", - "podName", pod.Name) - + if r.isLocalPVCDeletionRequired(rackState, pod) { if err := r.deleteLocalPVCs(rackState, pod); err != nil { return common.ReconcileError(err) } @@ -563,9 +590,38 @@ func (r *SingleClusterReconciler) safelyDeletePodsAndEnsureImageUpdated( return res } + clientPolicy := r.getClientPolicy() + setMigrateFillDelay := r.shouldSetMigrateFillDelay(rackState, podsToUpdate, nil) + + r.Log.Info( + fmt.Sprintf("Adjust migrate-fill-delay prior to pod restart: %t", setMigrateFillDelay)) + + // Revert migrate-fill-delay to the original value before restarting active pods. + // This will be a no-op in the first reconcile + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, false, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, + "Failed to set migrate-fill-delay to original value before upgrading the running pods") + return res + } + } + if res := r.deletePodAndEnsureImageUpdated(rackState, activePods); !res.IsSuccess { return res } + + // Set migrate-fill-delay O to immediately start the migration. Will be reverted back to the original value + // in the next reconcile. + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, true, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to set migrate-fill-delay to `0` after upgrading the running pods") + return res + } + } } return common.ReconcileSuccess() @@ -580,14 +636,9 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return common.ReconcileError(err) } - blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) - // Delete pods for _, pod := range podsToUpdate { - if blockedK8sNodes.Has(pod.Spec.NodeName) { - r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", - "podName", pod.Name) - + if r.isLocalPVCDeletionRequired(rackState, pod) { if err := r.deleteLocalPVCs(rackState, pod); err != nil { return common.ReconcileError(err) } @@ -607,6 +658,22 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return r.ensurePodsImageUpdated(podsToUpdate) } +func (r *SingleClusterReconciler) isLocalPVCDeletionRequired(rackState *RackState, pod *corev1.Pod) bool { + if utils.ContainsString(r.aeroCluster.Spec.K8sNodeBlockList, pod.Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", + "podName", pod.Name) + return true + } + + if asdbv1.GetBool(rackState.Rack.Storage.DeleteLocalStorageOnRestart) { + r.Log.Info("deleteLocalStorageOnRestart flag is enabled, deleting corresponding local PVCs if any", + "podName", pod.Name) + return true + } + + return false +} + func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.Pod) common.ReconcileResult { podNames := getPodNames(podsToCheck) updatedPods := sets.Set[string]{} @@ -1651,3 +1718,48 @@ func (r *SingleClusterReconciler) podsToRestart() (quickRestarts, podRestarts se return quickRestarts, podRestarts } + +// shouldSetMigrateFillDelay determines if migrate-fill-delay should be set. +// It only returns true if the following conditions are met: +// 1. DeleteLocalStorageOnRestart is set to true. +// 2. At least one pod needs to be restarted. +// 3. At least one persistent volume is using a local storage class. +func (r *SingleClusterReconciler) shouldSetMigrateFillDelay(rackState *RackState, + podsToRestart []*corev1.Pod, restartTypeMap map[string]RestartType) bool { + if !asdbv1.GetBool(rackState.Rack.Storage.DeleteLocalStorageOnRestart) { + return false + } + + var podRestartNeeded bool + + // If restartTypeMap is nil, we assume that a pod restart is needed. + if restartTypeMap == nil { + podRestartNeeded = true + } else { + for idx := range podsToRestart { + pod := podsToRestart[idx] + restartType := restartTypeMap[pod.Name] + + if restartType == podRestart { + podRestartNeeded = true + break + } + } + } + + if !podRestartNeeded { + return false + } + + localStorageClassSet := sets.NewString(rackState.Rack.Storage.LocalStorageClasses...) + + for idx := range rackState.Rack.Storage.Volumes { + volume := &rackState.Rack.Storage.Volumes[idx] + if volume.Source.PersistentVolume != nil && + localStorageClassSet.Has(volume.Source.PersistentVolume.StorageClass) { + return true + } + } + + return false +} diff --git a/internal/controller/cluster/rack.go b/internal/controller/cluster/rack.go index 20f6ae1f..33b54dc3 100644 --- a/internal/controller/cluster/rack.go +++ b/internal/controller/cluster/rack.go @@ -614,10 +614,10 @@ func (r *SingleClusterReconciler) reconcileRack( } if failedPods == nil { - // Revert migrate-fill-delay to original value if it was set to 0 during scale down. + // Revert migrate-fill-delay to the original value if it was set to 0 during scale down. // Reset will be done if there is scale-down or Rack redistribution. - // This check won't cover a scenario where scale-down operation was done and then reverted to previous value - // before the scale down could complete. + // This check won't cover a scenario where a scale-down operation was done and then reverted to the previous + // value before the scale down could complete. if (r.aeroCluster.Status.Size > r.aeroCluster.Spec.Size) || (!r.IsStatusEmpty() && len(r.aeroCluster.Status.RackConfig.Racks) != len(r.aeroCluster.Spec.RackConfig.Racks)) { if res = r.setMigrateFillDelay( diff --git a/internal/controller/cluster/reconciler.go b/internal/controller/cluster/reconciler.go index b16f327f..70b3d35c 100644 --- a/internal/controller/cluster/reconciler.go +++ b/internal/controller/cluster/reconciler.go @@ -221,9 +221,9 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) // Use policy from spec after setting up access control policy := r.getClientPolicy() - // Revert migrate-fill-delay to original value if it was set to 0 during scale down. + // Revert migrate-fill-delay to the original value if it was set to a different value while processing racks. // Passing the first rack from the list as all the racks will have the same migrate-fill-delay - // Redundant safe check to revert migrate-fill-delay if previous revert operation missed/skipped somehow + // Redundant safe check to revert migrate-fill-delay if the previous revert operation missed/skipped somehow if res := r.setMigrateFillDelay( policy, &r.aeroCluster.Spec.RackConfig.Racks[0].AerospikeConfig, false, ignorablePodNames, diff --git a/internal/webhook/v1/storage.go b/internal/webhook/v1/storage.go index ec513162..04b03be1 100644 --- a/internal/webhook/v1/storage.go +++ b/internal/webhook/v1/storage.go @@ -217,6 +217,12 @@ func getAerospikeStorageList(storage *asdbv1.AerospikeStorageSpec, onlyPV bool) func validateStorage( storage *asdbv1.AerospikeStorageSpec, podSpec *asdbv1.AerospikePodSpec, ) error { + if asdbv1.GetBool(storage.DeleteLocalStorageOnRestart) && len(storage.LocalStorageClasses) == 0 { + return fmt.Errorf( + "localStorageClasses cannot be empty if deleteLocalStorageOnRestart is set", + ) + } + reservedPaths := map[string]int{ // Reserved mount paths for the operator. "/etc/aerospike": 1, diff --git a/test/cluster/batch_restart_pods_test.go b/test/cluster/batch_restart_pods_test.go index 4a12419c..dad59e46 100644 --- a/test/cluster/batch_restart_pods_test.go +++ b/test/cluster/batch_restart_pods_test.go @@ -15,7 +15,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/v4/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/v4/pkg/utils" "github.com/aerospike/aerospike-kubernetes-operator/v4/test" ) @@ -479,20 +478,14 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) }) } -func isBatchRestart(aeroCluster *asdbv1.AerospikeCluster) bool { - // Wait for starting the pod restart process - for { - readyPods := getReadyPods(aeroCluster) - - unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) - if unreadyPods > 0 { - break - } - } +func isBatchRestart(ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster) bool { + err := waitForOperatorToStartPodRestart(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - // Operator should restart batch of pods which will make multiple pods unready + // Operator should restart a batch of pods which will make multiple pods unready for i := 0; i < 100; i++ { - readyPods := getReadyPods(aeroCluster) + readyPods, err := getReadyPods(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) if unreadyPods > 1 { @@ -503,21 +496,6 @@ func isBatchRestart(aeroCluster *asdbv1.AerospikeCluster) bool { return false } -func getReadyPods(aeroCluster *asdbv1.AerospikeCluster) []string { - podList, err := getPodList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) - - var readyPods []string - - for podIndex := range podList.Items { - if utils.IsPodRunningAndReady(&podList.Items[podIndex]) { - readyPods = append(readyPods, podList.Items[podIndex].Name) - } - } - - return readyPods -} - func updateClusterForBatchRestart( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -526,7 +504,7 @@ func updateClusterForBatchRestart( return err } - if !isBatchRestart(aeroCluster) { + if !isBatchRestart(ctx, aeroCluster) { return fmt.Errorf("looks like pods are not restarting in batch") } diff --git a/test/cluster/cluster_helper.go b/test/cluster/cluster_helper.go index 4a342647..61402751 100644 --- a/test/cluster/cluster_helper.go +++ b/test/cluster/cluster_helper.go @@ -66,6 +66,7 @@ var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFil var ( retryInterval = time.Second * 30 + shortRetryInterval = time.Second * 1 cascadeDeleteFalse = false cascadeDeleteTrue = true logger = logr.Discard() @@ -547,7 +548,7 @@ func validateAerospikeConfigServiceClusterUpdate( func validateMigrateFillDelay( ctx goctx.Context, k8sClient client.Client, log logr.Logger, - clusterNamespacedName types.NamespacedName, expectedMigFillDelay int64, + clusterNamespacedName types.NamespacedName, expectedMigFillDelay int64, retryInt *time.Duration, ) error { aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) if err != nil { @@ -567,9 +568,16 @@ func validateMigrateFillDelay( return err } + interval := retryInterval + + if retryInt != nil { + interval = *retryInt + } + asinfo := info.NewAsInfo(log, host, getClientPolicy(aeroCluster, k8sClient)) + err = wait.PollUntilContextTimeout(ctx, - retryInterval, getTimeout(1), true, func(goctx.Context) (done bool, err error) { + interval, getTimeout(1), true, func(goctx.Context) (done bool, err error) { confs, err := getAsConfig(asinfo, "service") if err != nil { return false, err @@ -1696,3 +1704,40 @@ func CheckDataInCluster( return data, nil } + +func getReadyPods(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) ([]string, error) { + podList, err := getPodList(aeroCluster, k8sClient) + if err != nil { + return nil, err + } + + var readyPods []string + + for podIndex := range podList.Items { + if utils.IsPodRunningAndReady(&podList.Items[podIndex]) { + readyPods = append(readyPods, podList.Items[podIndex].Name) + } + } + + return readyPods, nil +} + +func waitForOperatorToStartPodRestart(ctx goctx.Context, k8sClient client.Client, + aeroCluster *asdbv1.AerospikeCluster) error { + // Wait for starting the pod restart process + return wait.PollUntilContextTimeout(ctx, + 1*time.Second, getTimeout(aeroCluster.Spec.Size), true, func(goctx.Context) (done bool, err error) { + readyPods, err := getReadyPods(aeroCluster, k8sClient) + if err != nil { + return false, err + } + + unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) + if unreadyPods > 0 { + return true, nil + } + + return false, nil + }, + ) +} diff --git a/test/cluster/cluster_test.go b/test/cluster/cluster_test.go index f0193d66..2ca79baa 100644 --- a/test/cluster/cluster_test.go +++ b/test/cluster/cluster_test.go @@ -331,7 +331,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) // verify that migrate-fill-delay is set to 0 while scaling down - err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0) + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0, nil) Expect(err).ToNot(HaveOccurred()) err = waitForAerospikeCluster( @@ -341,7 +341,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) // verify that migrate-fill-delay is reverted to original value after scaling down - err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, migrateFillDelay) + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, migrateFillDelay, nil) Expect(err).ToNot(HaveOccurred()) }, ) diff --git a/test/cluster/k8snode_block_list_test.go b/test/cluster/k8snode_block_list_test.go index f5f596c9..4aea385b 100644 --- a/test/cluster/k8snode_block_list_test.go +++ b/test/cluster/k8snode_block_list_test.go @@ -105,7 +105,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -123,7 +123,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -139,7 +139,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -155,7 +155,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod local pvcs are deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) }, ) @@ -181,7 +181,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the failed pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) }, @@ -210,7 +210,7 @@ func extractPodPVC(pod *corev1.Pod) (map[string]types.UID, error) { return pvcUIDMap, nil } -func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, shouldDelete bool) error { +func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, shouldDeletePVC bool) error { pvc := &corev1.PersistentVolumeClaim{} for pvcName, pvcUID := range pvcUIDMap { @@ -222,12 +222,14 @@ func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, sh return err } - if shouldDelete && pvc.UID != pvcUID { - return fmt.Errorf("PVC %s is unintentionally deleted", pvcName) - } - - if !shouldDelete && pvc.UID == pvcUID { - return fmt.Errorf("PVC %s is not deleted", pvcName) + if shouldDeletePVC { + if pvc.UID == pvcUID { + return fmt.Errorf("PVC %s is not deleted", pvcName) + } + } else { + if pvc.UID != pvcUID { + return fmt.Errorf("PVC %s is unintentionally deleted", pvcName) + } } } @@ -235,12 +237,12 @@ func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, sh } func validatePodAndPVCMigration(ctx context.Context, podName, oldK8sNode string, - oldPvcInfo map[string]types.UID, shouldDelete bool) { + oldPvcInfo map[string]types.UID, shouldDeletePVC bool) { pod := &corev1.Pod{} err := k8sClient.Get(ctx, test.GetNamespacedName(podName, namespace), pod) Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.NodeName).ToNot(Equal(oldK8sNode)) - err = validatePVCDeletion(ctx, oldPvcInfo, shouldDelete) + err = validatePVCDeletion(ctx, oldPvcInfo, shouldDeletePVC) Expect(err).ToNot(HaveOccurred()) } diff --git a/test/cluster/local_pvc_delete_test.go b/test/cluster/local_pvc_delete_test.go new file mode 100644 index 00000000..9df8893d --- /dev/null +++ b/test/cluster/local_pvc_delete_test.go @@ -0,0 +1,249 @@ +package cluster + +import ( + goctx "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/v4/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/v4/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/v4/test" + lib "github.com/aerospike/aerospike-management-lib" +) + +var _ = Describe( + "LocalPVCDelete", func() { + ctx := goctx.TODO() + clusterName := fmt.Sprintf("local-pvc-%d", GinkgoParallelProcess()) + migrateFillDelay := int64(300) + clusterNamespacedName := test.GetNamespacedName(clusterName, namespace) + + Context("When doing valid operations", func() { + AfterEach( + func() { + aeroCluster := &asdbv1.AerospikeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + } + Expect(DeleteCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + Expect(CleanupPVC(k8sClient, aeroCluster.Namespace, aeroCluster.Name)).ToNot(HaveOccurred()) + }, + ) + + Context("When doing rolling restart", func() { + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set and set MFD dynamically", + func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating pod metadata to trigger rolling restart") + aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ + Labels: map[string]string{ + "test-label": "test-value", + }, + } + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) + + It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ + "at the rack level and set MFD dynamically", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + rackConfig := asdbv1.RackConfig{ + Racks: getDummyRackConf(1, 2), + } + storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) + rackConfig.Racks[0].InputStorage = storage + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.To(true) + rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} + aeroCluster.Spec.RackConfig = rackConfig + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating pod metadata to trigger rolling restart") + aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ + Labels: map[string]string{ + "test-label": "test-value", + }, + } + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") + }) + }) + + Context("When doing upgrade", func() { + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set and set MFD dynamically", + func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + oldPodIDs, err := getPodIDs(ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Enable DeleteLocalStorageOnRestart and set localStorageClasses") + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + operationTypeMap := map[string]asdbv1.OperationKind{ + aeroCluster.Name + "-0-0": noRestart, + aeroCluster.Name + "-0-1": noRestart, + } + + err = validateOperationTypes(ctx, aeroCluster, oldPodIDs, operationTypeMap) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the image") + Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) + + It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ + "at the rack level and set MFD dynamically", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + rackConfig := asdbv1.RackConfig{ + Racks: getDummyRackConf(1, 2), + } + storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) + rackConfig.Racks[0].InputStorage = storage + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.To(true) + rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} + aeroCluster.Spec.RackConfig = rackConfig + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the image") + Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") + }) + }) + }) + + Context("When doing invalid operations", func() { + It("Should fail when deleteLocalStorageOnRestart is set but localStorageClasses is not set", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).To(HaveOccurred()) + }) + }) + }) + +func validateClusterPVCDeletion(ctx goctx.Context, oldPvcInfoPerPod map[string]map[string]types.UID, + podsToSkipFromPVCDeletion ...string) { + for podName := range oldPvcInfoPerPod { + shouldDeletePVC := true + + if utils.ContainsString(podsToSkipFromPVCDeletion, podName) { + shouldDeletePVC = false + } + + err := validatePVCDeletion(ctx, oldPvcInfoPerPod[podName], shouldDeletePVC) + Expect(err).ToNot(HaveOccurred()) + } +} + +func extractClusterPVC(ctx goctx.Context, k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, +) (map[string]map[string]types.UID, error) { + podList, err := getClusterPodList(k8sClient, ctx, aeroCluster) + if err != nil { + return nil, err + } + + oldPvcInfoPerPod := make(map[string]map[string]types.UID) + + for idx := range podList.Items { + pvcMap, err := extractPodPVC(&podList.Items[idx]) + if err != nil { + return nil, err + } + + oldPvcInfoPerPod[podList.Items[idx].Name] = pvcMap + } + + return oldPvcInfoPerPod, nil +} + +func updateAndValidateIntermediateMFD(ctx goctx.Context, k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, + expectedMigFillDelay int64) { + aeroCluster.Spec.AerospikeConfig.Value["service"].(map[string]interface{})["migrate-fill-delay"] = + expectedMigFillDelay + Expect(updateClusterWithNoWait(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + clusterNamespacedName := utils.GetNamespacedName(aeroCluster) + + err := waitForOperatorToStartPodRestart(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value before the restart") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to 0 after the restart (pod is running)") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value before the restart of next pod") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + err = waitForAerospikeCluster( + k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval, + getTimeout(2), []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted}, + ) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value after the operation is completed") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) +} diff --git a/test/cluster/services_test.go b/test/cluster/services_test.go index ac2900c3..b89b380c 100644 --- a/test/cluster/services_test.go +++ b/test/cluster/services_test.go @@ -46,7 +46,7 @@ var _ = Describe( }, ) It( - "Validate create LB create", func() { + "Validate create LB", func() { By("DeployCluster with LB") clusterNamespacedName := test.GetNamespacedName( "load-balancer-create", namespace,