From c4cc7a698f47f128a5cf5f6117c32f19a27e8930 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Thu, 15 May 2025 16:20:21 +0530 Subject: [PATCH 1/8] feat: add support for local PVC cleanup during upgrade and rolling restart [KO-427] --- api/v1/aerospikecluster_types.go | 5 + api/v1/zz_generated.deepcopy.go | 5 + .../asdb.aerospike.com_aerospikeclusters.yaml | 30 +++ go.mod | 2 +- ..._aerospikeclusters.asdb.aerospike.com.yaml | 30 +++ internal/controller/cluster/pod.go | 29 ++- internal/webhook/v1/storage.go | 6 + test/cluster/k8snode_block_list_test.go | 30 +-- test/cluster/local_pvc_delete_test.go | 189 ++++++++++++++++++ test/cluster/services_test.go | 2 +- 10 files changed, 301 insertions(+), 27 deletions(-) create mode 100644 test/cluster/local_pvc_delete_test.go diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index 6d01b1af..db6a9ce5 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -812,6 +812,11 @@ type AerospikeStorageSpec struct { //nolint:govet // for readability // +optional LocalStorageClasses []string `json:"localStorageClasses,omitempty"` + // DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + // by AKO. It only considers local storage classes given in the localStorageClasses field. + // +optional + DeleteLocalStorageOnRestart *bool `json:"deleteLocalStorageOnRestart,omitempty"` + // Volumes list to attach to created pods. // +patchMergeKey=name // +patchStrategy=merge diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 1d768905..84aa46dd 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -734,6 +734,11 @@ func (in *AerospikeStorageSpec) DeepCopyInto(out *AerospikeStorageSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.DeleteLocalStorageOnRestart != nil { + in, out := &in.DeleteLocalStorageOnRestart, &out.DeleteLocalStorageOnRestart + *out = new(bool) + **out = **in + } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]VolumeSpec, len(*in)) diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index 98ea6c8b..37bcfcb1 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -6221,6 +6221,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -7822,6 +7827,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8497,6 +8507,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -15362,6 +15377,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -16963,6 +16983,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -17704,6 +17729,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. diff --git a/go.mod b/go.mod index 85bcb97d..50f24f47 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/aerospike/aerospike-client-go/v8 v8.2.1 github.com/aerospike/aerospike-management-lib v1.7.1-0.20250519063642-57d55e3eddf8 github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d + github.com/aws/smithy-go v1.22.3 github.com/deckarep/golang-set/v2 v2.3.1 github.com/evanphx/json-patch v4.12.0+incompatible github.com/go-logr/logr v1.4.2 @@ -30,7 +31,6 @@ require ( require ( github.com/aerospike/backup-go v0.3.2-0.20250330113002-7fb1b5be7ffc // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/aws/smithy-go v1.22.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index 98ea6c8b..37bcfcb1 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -6221,6 +6221,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -7822,6 +7827,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -8497,6 +8507,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -15362,6 +15377,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -16963,6 +16983,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. @@ -17704,6 +17729,11 @@ spec: description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer + deleteLocalStorageOnRestart: + description: |- + DeleteLocalStorageOnRestart enables the deletion of local storage PVCs when a pod is restarted or rescheduled + by AKO. It only considers local storage classes given in the localStorageClasses field. + type: boolean filesystemVolumePolicy: description: FileSystemVolumePolicy contains default policies for filesystem volumes. diff --git a/internal/controller/cluster/pod.go b/internal/controller/cluster/pod.go index 1e9a0e66..c976f6ed 100644 --- a/internal/controller/cluster/pod.go +++ b/internal/controller/cluster/pod.go @@ -363,7 +363,6 @@ func (r *SingleClusterReconciler) restartPods( restartedPods := make([]*corev1.Pod, 0, len(podsToRestart)) restartedPodNames := make([]string, 0, len(podsToRestart)) restartedASDPodNames := make([]string, 0, len(podsToRestart)) - blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) for idx := range podsToRestart { pod := podsToRestart[idx] @@ -379,10 +378,7 @@ func (r *SingleClusterReconciler) restartPods( restartedASDPodNames = append(restartedASDPodNames, pod.Name) } else if restartType == podRestart { - if blockedK8sNodes.Has(pod.Spec.NodeName) { - r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", - "podName", pod.Name) - + if r.isLocalPVCDeletionRequired(rackState, pod) { if err := r.deleteLocalPVCs(rackState, pod); err != nil { return common.ReconcileError(err) } @@ -561,14 +557,9 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return common.ReconcileError(err) } - blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) - // Delete pods for _, pod := range podsToUpdate { - if blockedK8sNodes.Has(pod.Spec.NodeName) { - r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", - "podName", pod.Name) - + if r.isLocalPVCDeletionRequired(rackState, pod) { if err := r.deleteLocalPVCs(rackState, pod); err != nil { return common.ReconcileError(err) } @@ -588,6 +579,22 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return r.ensurePodsImageUpdated(podsToUpdate) } +func (r *SingleClusterReconciler) isLocalPVCDeletionRequired(rackState *RackState, pod *corev1.Pod) bool { + if utils.ContainsString(r.aeroCluster.Spec.K8sNodeBlockList, pod.Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", + "podName", pod.Name) + return true + } + + if asdbv1.GetBool(rackState.Rack.Storage.DeleteLocalStorageOnRestart) { + r.Log.Info("deleteLocalStorageOnRestart flag is enabled, deleting corresponding local PVCs if any", + "podName", pod.Name) + return true + } + + return false +} + func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.Pod) common.ReconcileResult { podNames := getPodNames(podsToCheck) updatedPods := sets.Set[string]{} diff --git a/internal/webhook/v1/storage.go b/internal/webhook/v1/storage.go index ec513162..65bec817 100644 --- a/internal/webhook/v1/storage.go +++ b/internal/webhook/v1/storage.go @@ -217,6 +217,12 @@ func getAerospikeStorageList(storage *asdbv1.AerospikeStorageSpec, onlyPV bool) func validateStorage( storage *asdbv1.AerospikeStorageSpec, podSpec *asdbv1.AerospikePodSpec, ) error { + if asdbv1.GetBool(storage.DeleteLocalStorageOnRestart) && len(storage.LocalStorageClasses) == 0 { + return fmt.Errorf( + "deleteLocalStorageOnRestart is set to true, but no local storage classes are defined", + ) + } + reservedPaths := map[string]int{ // Reserved mount paths for the operator. "/etc/aerospike": 1, diff --git a/test/cluster/k8snode_block_list_test.go b/test/cluster/k8snode_block_list_test.go index f5f596c9..4aea385b 100644 --- a/test/cluster/k8snode_block_list_test.go +++ b/test/cluster/k8snode_block_list_test.go @@ -105,7 +105,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -123,7 +123,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -139,7 +139,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) @@ -155,7 +155,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the pod is migrated to other nodes and pod local pvcs are deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) }, ) @@ -181,7 +181,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) By("Verifying if the failed pod is migrated to other nodes and pod pvcs are not deleted") - validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, true) + validatePodAndPVCMigration(ctx, podName, oldK8sNode, oldPvcInfo, false) }, ) }, @@ -210,7 +210,7 @@ func extractPodPVC(pod *corev1.Pod) (map[string]types.UID, error) { return pvcUIDMap, nil } -func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, shouldDelete bool) error { +func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, shouldDeletePVC bool) error { pvc := &corev1.PersistentVolumeClaim{} for pvcName, pvcUID := range pvcUIDMap { @@ -222,12 +222,14 @@ func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, sh return err } - if shouldDelete && pvc.UID != pvcUID { - return fmt.Errorf("PVC %s is unintentionally deleted", pvcName) - } - - if !shouldDelete && pvc.UID == pvcUID { - return fmt.Errorf("PVC %s is not deleted", pvcName) + if shouldDeletePVC { + if pvc.UID == pvcUID { + return fmt.Errorf("PVC %s is not deleted", pvcName) + } + } else { + if pvc.UID != pvcUID { + return fmt.Errorf("PVC %s is unintentionally deleted", pvcName) + } } } @@ -235,12 +237,12 @@ func validatePVCDeletion(ctx context.Context, pvcUIDMap map[string]types.UID, sh } func validatePodAndPVCMigration(ctx context.Context, podName, oldK8sNode string, - oldPvcInfo map[string]types.UID, shouldDelete bool) { + oldPvcInfo map[string]types.UID, shouldDeletePVC bool) { pod := &corev1.Pod{} err := k8sClient.Get(ctx, test.GetNamespacedName(podName, namespace), pod) Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.NodeName).ToNot(Equal(oldK8sNode)) - err = validatePVCDeletion(ctx, oldPvcInfo, shouldDelete) + err = validatePVCDeletion(ctx, oldPvcInfo, shouldDeletePVC) Expect(err).ToNot(HaveOccurred()) } diff --git a/test/cluster/local_pvc_delete_test.go b/test/cluster/local_pvc_delete_test.go new file mode 100644 index 00000000..860a9620 --- /dev/null +++ b/test/cluster/local_pvc_delete_test.go @@ -0,0 +1,189 @@ +package cluster + +import ( + goctx "context" + "fmt" + + "github.com/aws/smithy-go/ptr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/v4/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/v4/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/v4/test" + lib "github.com/aerospike/aerospike-management-lib" +) + +var _ = Describe( + "LocalPVCDelete", func() { + ctx := goctx.TODO() + clusterName := fmt.Sprintf("local-pvc-%d", GinkgoParallelProcess()) + clusterNamespacedName := test.GetNamespacedName(clusterName, namespace) + + Context("When doing valid operations", func() { + AfterEach( + func() { + aeroCluster := &asdbv1.AerospikeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + } + Expect(DeleteCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + Expect(CleanupPVC(k8sClient, aeroCluster.Namespace, aeroCluster.Name)).ToNot(HaveOccurred()) + }, + ) + + Context("When doing rolling restart", func() { + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating pod metadata to trigger rolling restart") + aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ + Labels: map[string]string{ + "test-label": "test-value", + }, + } + + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) + + It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ + "at the rack level", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + rackConfig := asdbv1.RackConfig{ + Racks: getDummyRackConf(1, 2), + } + storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) + rackConfig.Racks[0].InputStorage = storage + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.Bool(true) + rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} + aeroCluster.Spec.RackConfig = rackConfig + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating pod metadata to trigger rolling restart") + aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ + Labels: map[string]string{ + "test-label": "test-value", + }, + } + + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") + }) + }) + + Context("When doing upgrade", func() { + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the image") + Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) + + It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ + "at the rack level", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + rackConfig := asdbv1.RackConfig{ + Racks: getDummyRackConf(1, 2), + } + storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) + rackConfig.Racks[0].InputStorage = storage + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.Bool(true) + rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} + aeroCluster.Spec.RackConfig = rackConfig + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the image") + Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") + }) + }) + }) + + Context("When doing invalid operations", func() { + It("Should fail when deleteLocalStorageOnRestart is set but localStorageClasses is not set", func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).To(HaveOccurred()) + }) + }) + }) + +func validateClusterPVCDeletion(ctx goctx.Context, oldPvcInfoPerPod map[string]map[string]types.UID, + podsToSkipFromPVCDeletion ...string) { + for podName := range oldPvcInfoPerPod { + shouldDeletePVC := true + + if utils.ContainsString(podsToSkipFromPVCDeletion, podName) { + shouldDeletePVC = false + } + + err := validatePVCDeletion(ctx, oldPvcInfoPerPod[podName], shouldDeletePVC) + Expect(err).ToNot(HaveOccurred()) + } +} + +func extractClusterPVC(ctx goctx.Context, k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, +) (map[string]map[string]types.UID, error) { + podList, err := getClusterPodList(k8sClient, ctx, aeroCluster) + if err != nil { + return nil, err + } + + oldPvcInfoPerPod := make(map[string]map[string]types.UID) + + for idx := range podList.Items { + pvcMap, err := extractPodPVC(&podList.Items[idx]) + if err != nil { + return nil, err + } + + oldPvcInfoPerPod[podList.Items[idx].Name] = pvcMap + } + + return oldPvcInfoPerPod, nil +} diff --git a/test/cluster/services_test.go b/test/cluster/services_test.go index ac2900c3..b89b380c 100644 --- a/test/cluster/services_test.go +++ b/test/cluster/services_test.go @@ -46,7 +46,7 @@ var _ = Describe( }, ) It( - "Validate create LB create", func() { + "Validate create LB", func() { By("DeployCluster with LB") clusterNamespacedName := test.GetNamespacedName( "load-balancer-create", namespace, From 0d87a9edbf6c6b9ce67b09765ace44c3211d0e2f Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Fri, 23 May 2025 16:39:13 +0530 Subject: [PATCH 2/8] Incorporated review comments --- test/cluster/local_pvc_delete_test.go | 27 +++++++++++++++++------ test/cluster/on_demand_operations_test.go | 6 +++-- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/test/cluster/local_pvc_delete_test.go b/test/cluster/local_pvc_delete_test.go index 860a9620..f59366e5 100644 --- a/test/cluster/local_pvc_delete_test.go +++ b/test/cluster/local_pvc_delete_test.go @@ -4,11 +4,11 @@ import ( goctx "context" "fmt" - "github.com/aws/smithy-go/ptr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/v4/api/v1" @@ -42,7 +42,7 @@ var _ = Describe( aeroCluster := createDummyAerospikeCluster( clusterNamespacedName, 2, ) - aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) @@ -72,7 +72,7 @@ var _ = Describe( } storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) rackConfig.Racks[0].InputStorage = storage - rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.Bool(true) + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.To(true) rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} aeroCluster.Spec.RackConfig = rackConfig Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) @@ -99,12 +99,25 @@ var _ = Describe( aeroCluster := createDummyAerospikeCluster( clusterNamespacedName, 2, ) - aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) - aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) Expect(err).ToNot(HaveOccurred()) + oldPodIDs, err := getPodIDs(ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Enable DeleteLocalStorageOnRestart and set localStorageClasses") + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + operationTypeMap := map[string]asdbv1.OperationKind{ + aeroCluster.Name + "-0-0": noRestart, + aeroCluster.Name + "-0-1": noRestart, + } + + err = validateOperationTypes(ctx, aeroCluster, oldPodIDs, operationTypeMap) + Expect(err).ToNot(HaveOccurred()) By("Updating the image") Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) @@ -124,7 +137,7 @@ var _ = Describe( } storage := lib.DeepCopy(&aeroCluster.Spec.Storage).(*asdbv1.AerospikeStorageSpec) rackConfig.Racks[0].InputStorage = storage - rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.Bool(true) + rackConfig.Racks[0].InputStorage.DeleteLocalStorageOnRestart = ptr.To(true) rackConfig.Racks[0].InputStorage.LocalStorageClasses = []string{storageClass} aeroCluster.Spec.RackConfig = rackConfig Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) @@ -147,7 +160,7 @@ var _ = Describe( aeroCluster := createDummyAerospikeCluster( clusterNamespacedName, 2, ) - aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.Bool(true) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) Expect(DeployCluster(k8sClient, ctx, aeroCluster)).To(HaveOccurred()) }) }) diff --git a/test/cluster/on_demand_operations_test.go b/test/cluster/on_demand_operations_test.go index 90b0e191..224ae82b 100644 --- a/test/cluster/on_demand_operations_test.go +++ b/test/cluster/on_demand_operations_test.go @@ -14,6 +14,8 @@ import ( "github.com/aerospike/aerospike-kubernetes-operator/v4/test" ) +const noRestart = "noRestart" + var _ = Describe( "OnDemandOperations", func() { @@ -202,7 +204,7 @@ var _ = Describe( operationTypeMap := map[string]asdbv1.OperationKind{ aeroCluster.Name + "-1-0": asdbv1.OperationPodRestart, - aeroCluster.Name + "-1-1": "noRestart", + aeroCluster.Name + "-1-1": noRestart, } err = validateOperationTypes(ctx, aeroCluster, oldPodIDs, operationTypeMap) @@ -465,7 +467,7 @@ func validateOperationTypes(ctx goctx.Context, aeroCluster *asdbv1.AerospikeClus if newPodPidMap[podName].podUID == pid[podName].podUID { return fmt.Errorf("failed to restart pod %s", podName) } - case "noRestart": + case noRestart: if newPodPidMap[podName].podUID != pid[podName].podUID || newPodPidMap[podName].asdPID != pid[podName].asdPID { return fmt.Errorf("unexpected restart pod %s", podName) } From e911ab241976ed680ca012ca1909bf64d2c27e77 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Wed, 11 Jun 2025 12:15:20 +0530 Subject: [PATCH 3/8] Merge branch 'master' into feature/KO-427-local-pvc-cleanup --- test/cluster/on_demand_operations_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/cluster/on_demand_operations_test.go b/test/cluster/on_demand_operations_test.go index 224ae82b..5777d3dc 100644 --- a/test/cluster/on_demand_operations_test.go +++ b/test/cluster/on_demand_operations_test.go @@ -14,8 +14,6 @@ import ( "github.com/aerospike/aerospike-kubernetes-operator/v4/test" ) -const noRestart = "noRestart" - var _ = Describe( "OnDemandOperations", func() { From 7ccca95201a5d6ad0377dda02429926a674c76f5 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Tue, 17 Jun 2025 21:39:14 +0530 Subject: [PATCH 4/8] Incorporated review comments --- api/v1/aerospikecluster_types.go | 4 ++-- .../asdb.aerospike.com_aerospikeclusters.yaml | 24 +++++++++---------- ..._aerospikeclusters.asdb.aerospike.com.yaml | 24 +++++++++---------- internal/webhook/v1/storage.go | 2 +- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index db6a9ce5..b1eddf4d 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -739,8 +739,8 @@ type PersistentVolumeSpec struct { //nolint:govet // for readability // Size of volume. Size resource.Quantity `json:"size"` - // Name for creating PVC for this volume, Name or path should be given - // Name string `json:"name"` + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes // +optional AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` //nolint:lll // for readability diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index 37bcfcb1..4e2eb4ec 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -6576,8 +6576,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8182,8 +8182,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8859,8 +8859,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -15732,8 +15732,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -17338,8 +17338,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -18081,8 +18081,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index 37bcfcb1..4e2eb4ec 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -6576,8 +6576,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8182,8 +8182,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -8859,8 +8859,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -15732,8 +15732,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -17338,8 +17338,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array @@ -18081,8 +18081,8 @@ spec: properties: accessModes: description: |- - Name for creating PVC for this volume, Name or path should be given - Name string `json:"name"` + AccessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes items: type: string type: array diff --git a/internal/webhook/v1/storage.go b/internal/webhook/v1/storage.go index 65bec817..04b03be1 100644 --- a/internal/webhook/v1/storage.go +++ b/internal/webhook/v1/storage.go @@ -219,7 +219,7 @@ func validateStorage( ) error { if asdbv1.GetBool(storage.DeleteLocalStorageOnRestart) && len(storage.LocalStorageClasses) == 0 { return fmt.Errorf( - "deleteLocalStorageOnRestart is set to true, but no local storage classes are defined", + "localStorageClasses cannot be empty if deleteLocalStorageOnRestart is set", ) } From 355d50a23e1f1ba46687846435a6f3738e0842b1 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Fri, 20 Jun 2025 19:31:05 +0530 Subject: [PATCH 5/8] feat: Optimise upgrade and rolling restart with dynamic migrate-fill-delay [KO-427] (#383) * feat: optimise rolling-restart and upgrade flow with dynamic MFD [KO-427] --- internal/controller/cluster/pod.go | 105 +++++++++++++++ internal/controller/cluster/rack.go | 6 +- internal/controller/cluster/reconciler.go | 4 +- test/cluster/batch_restart_pods_test.go | 36 +----- test/cluster/cluster_helper.go | 49 ++++++- test/cluster/cluster_test.go | 4 +- test/cluster/local_pvc_delete_test.go | 151 ++++++++++++++-------- 7 files changed, 265 insertions(+), 90 deletions(-) diff --git a/internal/controller/cluster/pod.go b/internal/controller/cluster/pod.go index ec454386..1c98a238 100644 --- a/internal/controller/cluster/pod.go +++ b/internal/controller/cluster/pod.go @@ -268,9 +268,40 @@ func (r *SingleClusterReconciler) rollingRestartPods( return res } + clientPolicy := r.getClientPolicy() + setMigrateFillDelay := r.shouldSetMigrateFillDelay(rackState, podsToRestart, restartTypeMap) + + r.Log.Info( + fmt.Sprintf("Adjust migrate-fill-delay prior to pod restart: %t", setMigrateFillDelay), + ) + + // Revert migrate-fill-delay to the original value before restarting active pods. + // This will be a no-op in the first reconcile + if setMigrateFillDelay { + // Add an optimisation check to only run set MFD in case of cold restart. + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, false, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, + "Failed to set migrate-fill-delay to original value before restarting the running pods") + return res + } + } + if res := r.restartPods(rackState, activePods, restartTypeMap); !res.IsSuccess { return res } + + // Set migrate-fill-delay O to immediately start the migration. Will be reverted back to the original value + // in the next reconcile. + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, true, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to set migrate-fill-delay to `0` after restarting the running pods") + return res + } + } } return common.ReconcileSuccess() @@ -559,9 +590,38 @@ func (r *SingleClusterReconciler) safelyDeletePodsAndEnsureImageUpdated( return res } + clientPolicy := r.getClientPolicy() + setMigrateFillDelay := r.shouldSetMigrateFillDelay(rackState, podsToUpdate, nil) + + r.Log.Info( + fmt.Sprintf("Adjust migrate-fill-delay prior to pod restart: %t", setMigrateFillDelay)) + + // Revert migrate-fill-delay to the original value before restarting active pods. + // This will be a no-op in the first reconcile + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, false, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, + "Failed to set migrate-fill-delay to original value before upgrading the running pods") + return res + } + } + if res := r.deletePodAndEnsureImageUpdated(rackState, activePods); !res.IsSuccess { return res } + + // Set migrate-fill-delay O to immediately start the migration. Will be reverted back to the original value + // in the next reconcile. + if setMigrateFillDelay { + if res := r.setMigrateFillDelay(clientPolicy, &rackState.Rack.AerospikeConfig, true, + ignorablePodNames, + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to set migrate-fill-delay to `0` after upgrading the running pods") + return res + } + } } return common.ReconcileSuccess() @@ -1658,3 +1718,48 @@ func (r *SingleClusterReconciler) podsToRestart() (quickRestarts, podRestarts se return quickRestarts, podRestarts } + +// shouldSetMigrateFillDelay determines if migrate-fill-delay should be set. +// It only returns true if the following conditions are met: +// 1. DeleteLocalStorageOnRestart is set to true. +// 2. At least one pod needs to be restarted. +// 3. At least one persistent volume is using a local storage class. +func (r *SingleClusterReconciler) shouldSetMigrateFillDelay(rackState *RackState, + podsToRestart []*corev1.Pod, restartTypeMap map[string]RestartType) bool { + if !asdbv1.GetBool(rackState.Rack.Storage.DeleteLocalStorageOnRestart) { + return false + } + + var podRestartNeeded bool + + // If restartTypeMap is nil, we assume that a pod restart is needed. + if restartTypeMap == nil { + podRestartNeeded = true + } else { + for idx := range podsToRestart { + pod := podsToRestart[idx] + restartType := restartTypeMap[pod.Name] + + if restartType == podRestart { + podRestartNeeded = true + break + } + } + } + + if !podRestartNeeded { + return false + } + + localStorageClassSet := sets.NewString(rackState.Rack.Storage.LocalStorageClasses...) + + for idx := range rackState.Rack.Storage.Volumes { + volume := &rackState.Rack.Storage.Volumes[idx] + if volume.Source.PersistentVolume != nil && + localStorageClassSet.Has(volume.Source.PersistentVolume.StorageClass) { + return true + } + } + + return false +} diff --git a/internal/controller/cluster/rack.go b/internal/controller/cluster/rack.go index 20f6ae1f..33b54dc3 100644 --- a/internal/controller/cluster/rack.go +++ b/internal/controller/cluster/rack.go @@ -614,10 +614,10 @@ func (r *SingleClusterReconciler) reconcileRack( } if failedPods == nil { - // Revert migrate-fill-delay to original value if it was set to 0 during scale down. + // Revert migrate-fill-delay to the original value if it was set to 0 during scale down. // Reset will be done if there is scale-down or Rack redistribution. - // This check won't cover a scenario where scale-down operation was done and then reverted to previous value - // before the scale down could complete. + // This check won't cover a scenario where a scale-down operation was done and then reverted to the previous + // value before the scale down could complete. if (r.aeroCluster.Status.Size > r.aeroCluster.Spec.Size) || (!r.IsStatusEmpty() && len(r.aeroCluster.Status.RackConfig.Racks) != len(r.aeroCluster.Spec.RackConfig.Racks)) { if res = r.setMigrateFillDelay( diff --git a/internal/controller/cluster/reconciler.go b/internal/controller/cluster/reconciler.go index b16f327f..70b3d35c 100644 --- a/internal/controller/cluster/reconciler.go +++ b/internal/controller/cluster/reconciler.go @@ -221,9 +221,9 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) // Use policy from spec after setting up access control policy := r.getClientPolicy() - // Revert migrate-fill-delay to original value if it was set to 0 during scale down. + // Revert migrate-fill-delay to the original value if it was set to a different value while processing racks. // Passing the first rack from the list as all the racks will have the same migrate-fill-delay - // Redundant safe check to revert migrate-fill-delay if previous revert operation missed/skipped somehow + // Redundant safe check to revert migrate-fill-delay if the previous revert operation missed/skipped somehow if res := r.setMigrateFillDelay( policy, &r.aeroCluster.Spec.RackConfig.Racks[0].AerospikeConfig, false, ignorablePodNames, diff --git a/test/cluster/batch_restart_pods_test.go b/test/cluster/batch_restart_pods_test.go index 4a12419c..dad59e46 100644 --- a/test/cluster/batch_restart_pods_test.go +++ b/test/cluster/batch_restart_pods_test.go @@ -15,7 +15,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/v4/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/v4/pkg/utils" "github.com/aerospike/aerospike-kubernetes-operator/v4/test" ) @@ -479,20 +478,14 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) }) } -func isBatchRestart(aeroCluster *asdbv1.AerospikeCluster) bool { - // Wait for starting the pod restart process - for { - readyPods := getReadyPods(aeroCluster) - - unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) - if unreadyPods > 0 { - break - } - } +func isBatchRestart(ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster) bool { + err := waitForOperatorToStartPodRestart(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - // Operator should restart batch of pods which will make multiple pods unready + // Operator should restart a batch of pods which will make multiple pods unready for i := 0; i < 100; i++ { - readyPods := getReadyPods(aeroCluster) + readyPods, err := getReadyPods(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) if unreadyPods > 1 { @@ -503,21 +496,6 @@ func isBatchRestart(aeroCluster *asdbv1.AerospikeCluster) bool { return false } -func getReadyPods(aeroCluster *asdbv1.AerospikeCluster) []string { - podList, err := getPodList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) - - var readyPods []string - - for podIndex := range podList.Items { - if utils.IsPodRunningAndReady(&podList.Items[podIndex]) { - readyPods = append(readyPods, podList.Items[podIndex].Name) - } - } - - return readyPods -} - func updateClusterForBatchRestart( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -526,7 +504,7 @@ func updateClusterForBatchRestart( return err } - if !isBatchRestart(aeroCluster) { + if !isBatchRestart(ctx, aeroCluster) { return fmt.Errorf("looks like pods are not restarting in batch") } diff --git a/test/cluster/cluster_helper.go b/test/cluster/cluster_helper.go index 4a342647..61402751 100644 --- a/test/cluster/cluster_helper.go +++ b/test/cluster/cluster_helper.go @@ -66,6 +66,7 @@ var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFil var ( retryInterval = time.Second * 30 + shortRetryInterval = time.Second * 1 cascadeDeleteFalse = false cascadeDeleteTrue = true logger = logr.Discard() @@ -547,7 +548,7 @@ func validateAerospikeConfigServiceClusterUpdate( func validateMigrateFillDelay( ctx goctx.Context, k8sClient client.Client, log logr.Logger, - clusterNamespacedName types.NamespacedName, expectedMigFillDelay int64, + clusterNamespacedName types.NamespacedName, expectedMigFillDelay int64, retryInt *time.Duration, ) error { aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) if err != nil { @@ -567,9 +568,16 @@ func validateMigrateFillDelay( return err } + interval := retryInterval + + if retryInt != nil { + interval = *retryInt + } + asinfo := info.NewAsInfo(log, host, getClientPolicy(aeroCluster, k8sClient)) + err = wait.PollUntilContextTimeout(ctx, - retryInterval, getTimeout(1), true, func(goctx.Context) (done bool, err error) { + interval, getTimeout(1), true, func(goctx.Context) (done bool, err error) { confs, err := getAsConfig(asinfo, "service") if err != nil { return false, err @@ -1696,3 +1704,40 @@ func CheckDataInCluster( return data, nil } + +func getReadyPods(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) ([]string, error) { + podList, err := getPodList(aeroCluster, k8sClient) + if err != nil { + return nil, err + } + + var readyPods []string + + for podIndex := range podList.Items { + if utils.IsPodRunningAndReady(&podList.Items[podIndex]) { + readyPods = append(readyPods, podList.Items[podIndex].Name) + } + } + + return readyPods, nil +} + +func waitForOperatorToStartPodRestart(ctx goctx.Context, k8sClient client.Client, + aeroCluster *asdbv1.AerospikeCluster) error { + // Wait for starting the pod restart process + return wait.PollUntilContextTimeout(ctx, + 1*time.Second, getTimeout(aeroCluster.Spec.Size), true, func(goctx.Context) (done bool, err error) { + readyPods, err := getReadyPods(aeroCluster, k8sClient) + if err != nil { + return false, err + } + + unreadyPods := int(aeroCluster.Spec.Size) - len(readyPods) + if unreadyPods > 0 { + return true, nil + } + + return false, nil + }, + ) +} diff --git a/test/cluster/cluster_test.go b/test/cluster/cluster_test.go index f0193d66..2ca79baa 100644 --- a/test/cluster/cluster_test.go +++ b/test/cluster/cluster_test.go @@ -331,7 +331,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) // verify that migrate-fill-delay is set to 0 while scaling down - err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0) + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0, nil) Expect(err).ToNot(HaveOccurred()) err = waitForAerospikeCluster( @@ -341,7 +341,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) // verify that migrate-fill-delay is reverted to original value after scaling down - err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, migrateFillDelay) + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, migrateFillDelay, nil) Expect(err).ToNot(HaveOccurred()) }, ) diff --git a/test/cluster/local_pvc_delete_test.go b/test/cluster/local_pvc_delete_test.go index f59366e5..9df8893d 100644 --- a/test/cluster/local_pvc_delete_test.go +++ b/test/cluster/local_pvc_delete_test.go @@ -21,6 +21,7 @@ var _ = Describe( "LocalPVCDelete", func() { ctx := goctx.TODO() clusterName := fmt.Sprintf("local-pvc-%d", GinkgoParallelProcess()) + migrateFillDelay := int64(300) clusterNamespacedName := test.GetNamespacedName(clusterName, namespace) Context("When doing valid operations", func() { @@ -38,32 +39,33 @@ var _ = Describe( ) Context("When doing rolling restart", func() { - It("Should delete the local PVCs when deleteLocalStorageOnRestart is set", func() { - aeroCluster := createDummyAerospikeCluster( - clusterNamespacedName, 2, - ) - aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) - aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} - Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) - - oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Updating pod metadata to trigger rolling restart") - aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ - Labels: map[string]string{ - "test-label": "test-value", - }, - } - - Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) - - By("Validating PVCs deletion") - validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) - }) + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set and set MFD dynamically", + func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating pod metadata to trigger rolling restart") + aeroCluster.Spec.PodSpec.AerospikeObjectMeta = asdbv1.AerospikeObjectMeta{ + Labels: map[string]string{ + "test-label": "test-value", + }, + } + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ - "at the rack level", func() { + "at the rack level and set MFD dynamically", func() { aeroCluster := createDummyAerospikeCluster( clusterNamespacedName, 2, ) @@ -87,7 +89,7 @@ var _ = Describe( }, } - Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) By("Validating PVCs deletion") validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") @@ -95,40 +97,42 @@ var _ = Describe( }) Context("When doing upgrade", func() { - It("Should delete the local PVCs when deleteLocalStorageOnRestart is set", func() { - aeroCluster := createDummyAerospikeCluster( - clusterNamespacedName, 2, - ) - Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + It("Should delete the local PVCs when deleteLocalStorageOnRestart is set and set MFD dynamically", + func() { + aeroCluster := createDummyAerospikeCluster( + clusterNamespacedName, 2, + ) + Expect(DeployCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) - oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - oldPodIDs, err := getPodIDs(ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) + oldPvcInfoPerPod, err := extractClusterPVC(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + oldPodIDs, err := getPodIDs(ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - By("Enable DeleteLocalStorageOnRestart and set localStorageClasses") - aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) - aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} - Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + By("Enable DeleteLocalStorageOnRestart and set localStorageClasses") + aeroCluster.Spec.Storage.DeleteLocalStorageOnRestart = ptr.To(true) + aeroCluster.Spec.Storage.LocalStorageClasses = []string{storageClass} + Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) - operationTypeMap := map[string]asdbv1.OperationKind{ - aeroCluster.Name + "-0-0": noRestart, - aeroCluster.Name + "-0-1": noRestart, - } + operationTypeMap := map[string]asdbv1.OperationKind{ + aeroCluster.Name + "-0-0": noRestart, + aeroCluster.Name + "-0-1": noRestart, + } - err = validateOperationTypes(ctx, aeroCluster, oldPodIDs, operationTypeMap) - Expect(err).ToNot(HaveOccurred()) + err = validateOperationTypes(ctx, aeroCluster, oldPodIDs, operationTypeMap) + Expect(err).ToNot(HaveOccurred()) - By("Updating the image") - Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) - Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + By("Updating the image") + Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) - By("Validating PVCs deletion") - validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) - }) + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) + + By("Validating PVCs deletion") + validateClusterPVCDeletion(ctx, oldPvcInfoPerPod) + }) It("Should delete the local PVCs of only 1 rack when deleteLocalStorageOnRestart is set "+ - "at the rack level", func() { + "at the rack level and set MFD dynamically", func() { aeroCluster := createDummyAerospikeCluster( clusterNamespacedName, 2, ) @@ -147,7 +151,8 @@ var _ = Describe( By("Updating the image") Expect(UpdateClusterImage(aeroCluster, nextImage)).ToNot(HaveOccurred()) - Expect(updateCluster(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + updateAndValidateIntermediateMFD(ctx, k8sClient, aeroCluster, migrateFillDelay) By("Validating PVCs deletion") validateClusterPVCDeletion(ctx, oldPvcInfoPerPod, clusterName+"-2-0") @@ -200,3 +205,45 @@ func extractClusterPVC(ctx goctx.Context, k8sClient client.Client, aeroCluster * return oldPvcInfoPerPod, nil } + +func updateAndValidateIntermediateMFD(ctx goctx.Context, k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, + expectedMigFillDelay int64) { + aeroCluster.Spec.AerospikeConfig.Value["service"].(map[string]interface{})["migrate-fill-delay"] = + expectedMigFillDelay + Expect(updateClusterWithNoWait(k8sClient, ctx, aeroCluster)).ToNot(HaveOccurred()) + + clusterNamespacedName := utils.GetNamespacedName(aeroCluster) + + err := waitForOperatorToStartPodRestart(ctx, k8sClient, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value before the restart") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to 0 after the restart (pod is running)") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, 0, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value before the restart of next pod") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) + + err = waitForAerospikeCluster( + k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval, + getTimeout(2), []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted}, + ) + Expect(err).ToNot(HaveOccurred()) + + By("Validating the migrate-fill-delay is set to given value after the operation is completed") + + err = validateMigrateFillDelay(ctx, k8sClient, logger, clusterNamespacedName, expectedMigFillDelay, + &shortRetryInterval) + Expect(err).ToNot(HaveOccurred()) +} From 47096592bb7b0b5e71427268cfaf788ab06c752f Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Fri, 20 Jun 2025 19:44:08 +0530 Subject: [PATCH 6/8] Update release tag to 4.1.0-preview --- Dockerfile | 2 +- Jenkinsfile | 2 +- Makefile | 4 ++-- README.md | 4 ++-- api/v1/aerospikecluster_types.go | 2 +- api/v1beta1/aerospikebackup_types.go | 2 +- api/v1beta1/aerospikebackupservice_types.go | 2 +- api/v1beta1/aerospikerestore_types.go | 2 +- config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml | 2 +- .../crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml | 2 +- config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml | 2 +- config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml | 2 +- helm-charts/aerospike-backup-service/Chart.yaml | 4 ++-- helm-charts/aerospike-backup/Chart.yaml | 4 ++-- helm-charts/aerospike-cluster/Chart.yaml | 4 ++-- helm-charts/aerospike-kubernetes-operator/Chart.yaml | 4 ++-- helm-charts/aerospike-kubernetes-operator/README.md | 2 +- ...esourcedefinition_aerospikebackups.asdb.aerospike.com.yaml | 2 +- ...definition_aerospikebackupservices.asdb.aerospike.com.yaml | 2 +- ...sourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml | 2 +- ...sourcedefinition_aerospikerestores.asdb.aerospike.com.yaml | 2 +- helm-charts/aerospike-kubernetes-operator/values.yaml | 2 +- helm-charts/aerospike-restore/Chart.yaml | 4 ++-- 23 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Dockerfile b/Dockerfile index b256ae48..3c9e99a5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} GO111MODULE=on go FROM registry.access.redhat.com/ubi9/ubi-minimal:latest # Version of Operator (build arg) -ARG VERSION="4.0.2" +ARG VERSION="4.1.0-preview" # User to run container as ARG USER="root" diff --git a/Jenkinsfile b/Jenkinsfile index 284be3f5..8d87c7fc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -240,7 +240,7 @@ boolean isNightly() { } String getVersion() { - def prefix = "4.0.2" + def prefix = "4.1.0-preview" def candidateName = "" if(isNightly()) { def timestamp = new Date().format("yyyy-MM-dd") diff --git a/Makefile b/Makefile index b50e8aea..eb338048 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ OPENSHIFT_VERSION="v4.10" # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) # TODO: Version must be pulled from git tags -VERSION ?= 4.0.2 +VERSION ?= 4.1.0-preview # Platforms supported PLATFORMS ?= linux/amd64,linux/arm64 @@ -313,7 +313,7 @@ submodules: ## Pull and update git submodules recursively # Generate bundle manifests and metadata, then validate generated files. # For OpenShift bundles run -# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.10 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:4.0.2 make bundle +# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.10 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:4.1.0-preview make bundle .PHONY: bundle bundle: manifests kustomize operator-sdk rm -rf $(ROOT_DIR)/bundle.Dockerfile $(BUNDLE_DIR) diff --git a/README.md b/README.md index a1ebf4cf..c6f7e64a 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ Run the following command with the appropriate name and version for the operator ```sh IMAGE_TAG_BASE=aerospike/aerospike-kubernetes-operator-nightly -VERSION=4.0.2 +VERSION=4.1.0-preview make docker-buildx IMG=${IMAGE_TAG_BASE}:${VERSION} PLATFORMS=linux/amd64 ``` **Note**: Change `PLATFORMS` var as per host machine or remove it to build multi-arch image @@ -96,7 +96,7 @@ Set up the environment with image names. ```shell export ACCOUNT=aerospike export IMAGE_TAG_BASE=${ACCOUNT}/aerospike-kubernetes-operator -export VERSION=4.0.2 +export VERSION=4.1.0-preview export IMG=docker.io/${IMAGE_TAG_BASE}-nightly:${VERSION} export BUNDLE_IMG=docker.io/${IMAGE_TAG_BASE}-bundle-nightly:${VERSION} export CATALOG_IMG=docker.io/${IMAGE_TAG_BASE}-catalog-nightly:${VERSION} diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index b1eddf4d..5bb362dc 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -1211,7 +1211,7 @@ type AerospikePodStatus struct { //nolint:govet // for readability // AerospikeCluster is the schema for the AerospikeCluster API // +operator-sdk:csv:customresourcedefinitions:displayName="Aerospike Cluster",resources={{Service, v1},{Pod,v1},{StatefulSet,v1}} -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // //nolint:lll // for readability type AerospikeCluster struct { //nolint:govet // for readability diff --git a/api/v1beta1/aerospikebackup_types.go b/api/v1beta1/aerospikebackup_types.go index d3d3af6f..109f8843 100644 --- a/api/v1beta1/aerospikebackup_types.go +++ b/api/v1beta1/aerospikebackup_types.go @@ -86,7 +86,7 @@ type AerospikeBackupStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1beta1/aerospikebackupservice_types.go b/api/v1beta1/aerospikebackupservice_types.go index 2086313f..1dc8ffd9 100644 --- a/api/v1beta1/aerospikebackupservice_types.go +++ b/api/v1beta1/aerospikebackupservice_types.go @@ -161,7 +161,7 @@ type ServiceContainerSpec struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image` // +kubebuilder:printcolumn:name="Service Type",type=string,JSONPath=`.spec.service.type` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/api/v1beta1/aerospikerestore_types.go b/api/v1beta1/aerospikerestore_types.go index 850bd983..1b688471 100644 --- a/api/v1beta1/aerospikerestore_types.go +++ b/api/v1beta1/aerospikerestore_types.go @@ -89,7 +89,7 @@ type AerospikeRestoreStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.0.2" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=4.1.0-preview" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml index 782448ce..a489bc03 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml index 9cebec8c..e1ff12ca 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index 4e2eb4ec..a36d5819 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikeclusters.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml index 2104c85c..26b12189 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-backup-service/Chart.yaml b/helm-charts/aerospike-backup-service/Chart.yaml index 5e58dcb2..77e59dfe 100644 --- a/helm-charts/aerospike-backup-service/Chart.yaml +++ b/helm-charts/aerospike-backup-service/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup-service # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Backup Service Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-backup/Chart.yaml b/helm-charts/aerospike-backup/Chart.yaml index e08e3cc6..f141a688 100644 --- a/helm-charts/aerospike-backup/Chart.yaml +++ b/helm-charts/aerospike-backup/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Backup Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-cluster/Chart.yaml b/helm-charts/aerospike-cluster/Chart.yaml index b2dd79e5..8bc6b2b0 100644 --- a/helm-charts/aerospike-cluster/Chart.yaml +++ b/helm-charts/aerospike-cluster/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-cluster # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Cluster Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/Chart.yaml b/helm-charts/aerospike-kubernetes-operator/Chart.yaml index 9acc974c..377ea5e8 100644 --- a/helm-charts/aerospike-kubernetes-operator/Chart.yaml +++ b/helm-charts/aerospike-kubernetes-operator/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-kubernetes-operator # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Kubernetes Operator icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/README.md b/helm-charts/aerospike-kubernetes-operator/README.md index efdcd663..ddf4eaae 100644 --- a/helm-charts/aerospike-kubernetes-operator/README.md +++ b/helm-charts/aerospike-kubernetes-operator/README.md @@ -37,7 +37,7 @@ helm install aerospike-kubernetes-operator ./aerospike-kubernetes-operator --set |-------------------------------------|-------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| | `replicas` | Number of operator replicas | `2` | | `operatorImage.repository` | Operator image repository | `aerospike/aerospike-kubernetes-operator` | -| `operatorImage.tag` | Operator image tag | `4.0.2` | +| `operatorImage.tag` | Operator image tag | `4.1.0-preview` | | `operatorImage.pullPolicy` | Image pull policy | `IfNotPresent` | | `imagePullSecrets` | Secrets containing credentials to pull Operator image from a private registry | `{}` (nil) | | `rbac.create` | Set this to `true` to let helm chart automatically create RBAC resources necessary for operator | `true` | diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml index 782448ce..a489bc03 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml index 9cebec8c..e1ff12ca 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index 4e2eb4ec..a36d5819 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikeclusters.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml index 2104c85c..26b12189 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 4.0.2 + aerospike-kubernetes-operator/version: 4.1.0-preview controller-gen.kubebuilder.io/version: v0.16.1 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/values.yaml b/helm-charts/aerospike-kubernetes-operator/values.yaml index 08d1eb8b..c9c8f8a2 100644 --- a/helm-charts/aerospike-kubernetes-operator/values.yaml +++ b/helm-charts/aerospike-kubernetes-operator/values.yaml @@ -4,7 +4,7 @@ replicas: 2 ## Operator image operatorImage: repository: aerospike/aerospike-kubernetes-operator - tag: 4.0.2 + tag: 4.1.0-preview pullPolicy: IfNotPresent ## In case the above image is pulled from a registry that requires diff --git a/helm-charts/aerospike-restore/Chart.yaml b/helm-charts/aerospike-restore/Chart.yaml index 36adf7ba..5ee42e50 100644 --- a/helm-charts/aerospike-restore/Chart.yaml +++ b/helm-charts/aerospike-restore/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-restore # version tracks chart changes -version: 4.0.2 +version: 4.1.0-preview # appVersion tracks operator version -appVersion: 4.0.2 +appVersion: 4.1.0-preview description: A Helm chart for Aerospike Restore Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 From 87cc10b23566a40ebfb70dc2e7337331b8fdc4c4 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Fri, 20 Jun 2025 20:02:32 +0530 Subject: [PATCH 7/8] Update tag pattern to trigger on preview builds --- .github/workflows/docker-image-release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image-release.yaml b/.github/workflows/docker-image-release.yaml index 37781434..0db4a064 100644 --- a/.github/workflows/docker-image-release.yaml +++ b/.github/workflows/docker-image-release.yaml @@ -2,7 +2,7 @@ name: Release Container Image on: push: tags: - - 'v[0-9]+.[0-9]+.[0-9]+' + - 'v*.*.*' jobs: build-and-push: From 3849630e53f15fa4d51f59e00f5e5ee70fc0a17c Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Tue, 24 Jun 2025 11:15:17 +0530 Subject: [PATCH 8/8] Update tag generation func in Jenkins file --- .github/workflows/docker-image-release.yaml | 2 +- Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-image-release.yaml b/.github/workflows/docker-image-release.yaml index 0db4a064..37781434 100644 --- a/.github/workflows/docker-image-release.yaml +++ b/.github/workflows/docker-image-release.yaml @@ -2,7 +2,7 @@ name: Release Container Image on: push: tags: - - 'v*.*.*' + - 'v[0-9]+.[0-9]+.[0-9]+' jobs: build-and-push: diff --git a/Jenkinsfile b/Jenkinsfile index 8d87c7fc..978fc23a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -246,7 +246,7 @@ String getVersion() { def timestamp = new Date().format("yyyy-MM-dd") candidateName = "nightly-${timestamp}" } else { - candidateName = "candidate-${env.BRANCH_NAME}" + candidateName = "${env.BRANCH_NAME}" } def candidateNameMax = 30 - prefix.length()