From 93ba308c112cd6a025ec02feb98abe60f4efed07 Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Thu, 17 Jul 2025 10:02:28 +0200 Subject: [PATCH] Add waitForPreDrainHookStartTime and waitForPreTerminateHookStartTime and omit draining/detachment on existence Signed-off-by: Danil-Grigorev --- api/core/v1beta1/machine_types.go | 12 + api/core/v1beta1/zz_generated.conversion.go | 4 + api/core/v1beta1/zz_generated.deepcopy.go | 8 + api/core/v1beta1/zz_generated.openapi.go | 12 + api/core/v1beta2/machine_types.go | 12 + api/core/v1beta2/zz_generated.deepcopy.go | 8 + api/core/v1beta2/zz_generated.openapi.go | 12 + .../crd/bases/cluster.x-k8s.io_machines.yaml | 28 +++ .../controllers/machine/machine_controller.go | 39 +++- .../machine/machine_controller_test.go | 209 ++++++++++++++++-- 10 files changed, 321 insertions(+), 23 deletions(-) diff --git a/api/core/v1beta1/machine_types.go b/api/core/v1beta1/machine_types.go index d36848090d29..5e7f2f8b4b99 100644 --- a/api/core/v1beta1/machine_types.go +++ b/api/core/v1beta1/machine_types.go @@ -622,6 +622,18 @@ type MachineDeletionStatus struct { // Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. // +optional WaitForNodeVolumeDetachStartTime *metav1.Time `json:"waitForNodeVolumeDetachStartTime,omitempty"` + + // waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started + // and is used to determine if the pre-drain hooks are taking too long. + // Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started. + // +optional + WaitForPreDrainHookStartTime *metav1.Time `json:"waitForPreDrainHookStartTime,omitempty"` + + // waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started + // and is used to determine if the pre-terminate hooks are taking too long. + // Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started. + // +optional + WaitForPreTerminateHookStartTime *metav1.Time `json:"waitForPreTerminateHookStartTime,omitempty"` } // SetTypedPhase sets the Phase field to the string representation of MachinePhase. diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index b275acf137e5..d02432069267 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -2314,6 +2314,8 @@ func Convert_v1beta2_MachineAddress_To_v1beta1_MachineAddress(in *v1beta2.Machin func autoConvert_v1beta1_MachineDeletionStatus_To_v1beta2_MachineDeletionStatus(in *MachineDeletionStatus, out *v1beta2.MachineDeletionStatus, s conversion.Scope) error { out.NodeDrainStartTime = (*v1.Time)(unsafe.Pointer(in.NodeDrainStartTime)) out.WaitForNodeVolumeDetachStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForNodeVolumeDetachStartTime)) + out.WaitForPreDrainHookStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForPreDrainHookStartTime)) + out.WaitForPreTerminateHookStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForPreTerminateHookStartTime)) return nil } @@ -2325,6 +2327,8 @@ func Convert_v1beta1_MachineDeletionStatus_To_v1beta2_MachineDeletionStatus(in * func autoConvert_v1beta2_MachineDeletionStatus_To_v1beta1_MachineDeletionStatus(in *v1beta2.MachineDeletionStatus, out *MachineDeletionStatus, s conversion.Scope) error { out.NodeDrainStartTime = (*v1.Time)(unsafe.Pointer(in.NodeDrainStartTime)) out.WaitForNodeVolumeDetachStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForNodeVolumeDetachStartTime)) + out.WaitForPreDrainHookStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForPreDrainHookStartTime)) + out.WaitForPreTerminateHookStartTime = (*v1.Time)(unsafe.Pointer(in.WaitForPreTerminateHookStartTime)) return nil } diff --git a/api/core/v1beta1/zz_generated.deepcopy.go b/api/core/v1beta1/zz_generated.deepcopy.go index a1b090669cb4..0fbb181c8277 100644 --- a/api/core/v1beta1/zz_generated.deepcopy.go +++ b/api/core/v1beta1/zz_generated.deepcopy.go @@ -1174,6 +1174,14 @@ func (in *MachineDeletionStatus) DeepCopyInto(out *MachineDeletionStatus) { in, out := &in.WaitForNodeVolumeDetachStartTime, &out.WaitForNodeVolumeDetachStartTime *out = (*in).DeepCopy() } + if in.WaitForPreDrainHookStartTime != nil { + in, out := &in.WaitForPreDrainHookStartTime, &out.WaitForPreDrainHookStartTime + *out = (*in).DeepCopy() + } + if in.WaitForPreTerminateHookStartTime != nil { + in, out := &in.WaitForPreTerminateHookStartTime, &out.WaitForPreTerminateHookStartTime + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeletionStatus. diff --git a/api/core/v1beta1/zz_generated.openapi.go b/api/core/v1beta1/zz_generated.openapi.go index 13a78b23719c..e7f4ae32561e 100644 --- a/api/core/v1beta1/zz_generated.openapi.go +++ b/api/core/v1beta1/zz_generated.openapi.go @@ -2048,6 +2048,18 @@ func schema_cluster_api_api_core_v1beta1_MachineDeletionStatus(ref common.Refere Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "waitForPreDrainHookStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started and is used to determine if the pre-drain hooks are taking too long. Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "waitForPreTerminateHookStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started and is used to determine if the pre-terminate hooks are taking too long. Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, diff --git a/api/core/v1beta2/machine_types.go b/api/core/v1beta2/machine_types.go index 8358d8a6a291..1a05a2413a95 100644 --- a/api/core/v1beta2/machine_types.go +++ b/api/core/v1beta2/machine_types.go @@ -671,6 +671,18 @@ type MachineDeletionStatus struct { // Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. // +optional WaitForNodeVolumeDetachStartTime *metav1.Time `json:"waitForNodeVolumeDetachStartTime,omitempty"` + + // waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started + // and is used to determine if the pre-drain hooks are taking too long. + // Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started. + // +optional + WaitForPreDrainHookStartTime *metav1.Time `json:"waitForPreDrainHookStartTime,omitempty"` + + // waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started + // and is used to determine if the pre-terminate hooks are taking too long. + // Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started. + // +optional + WaitForPreTerminateHookStartTime *metav1.Time `json:"waitForPreTerminateHookStartTime,omitempty"` } // SetTypedPhase sets the Phase field to the string representation of MachinePhase. diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index f89f39e07b79..7b94b3f5d3a7 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -1255,6 +1255,14 @@ func (in *MachineDeletionStatus) DeepCopyInto(out *MachineDeletionStatus) { in, out := &in.WaitForNodeVolumeDetachStartTime, &out.WaitForNodeVolumeDetachStartTime *out = (*in).DeepCopy() } + if in.WaitForPreDrainHookStartTime != nil { + in, out := &in.WaitForPreDrainHookStartTime, &out.WaitForPreDrainHookStartTime + *out = (*in).DeepCopy() + } + if in.WaitForPreTerminateHookStartTime != nil { + in, out := &in.WaitForPreTerminateHookStartTime, &out.WaitForPreTerminateHookStartTime + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeletionStatus. diff --git a/api/core/v1beta2/zz_generated.openapi.go b/api/core/v1beta2/zz_generated.openapi.go index bfaace019b24..a70c5e23dc64 100644 --- a/api/core/v1beta2/zz_generated.openapi.go +++ b/api/core/v1beta2/zz_generated.openapi.go @@ -2310,6 +2310,18 @@ func schema_cluster_api_api_core_v1beta2_MachineDeletionStatus(ref common.Refere Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "waitForPreDrainHookStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started and is used to determine if the pre-drain hooks are taking too long. Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "waitForPreTerminateHookStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started and is used to determine if the pre-terminate hooks are taking too long. Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 5274c3f64253..a67ed891d183 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -1215,6 +1215,20 @@ spec: Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. format: date-time type: string + waitForPreDrainHookStartTime: + description: |- + waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started + and is used to determine if the pre-drain hooks are taking too long. + Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started. + format: date-time + type: string + waitForPreTerminateHookStartTime: + description: |- + waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started + and is used to determine if the pre-terminate hooks are taking too long. + Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started. + format: date-time + type: string type: object failureMessage: description: |- @@ -1855,6 +1869,20 @@ spec: Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. format: date-time type: string + waitForPreDrainHookStartTime: + description: |- + waitForPreDrainHookStartTime is the time when waiting for pre-drain hooks started + and is used to determine if the pre-drain hooks are taking too long. + Only present when the Machine has a deletionTimestamp and waiting for pre-drain hooks had been started. + format: date-time + type: string + waitForPreTerminateHookStartTime: + description: |- + waitForPreTerminateHookStartTime is the time when waiting for pre-terminate hooks started + and is used to determine if the pre-terminate hooks are taking too long. + Only present when the Machine has a deletionTimestamp and waiting for pre-terminate hooks had been started. + format: date-time + type: string type: object deprecated: description: deprecated groups all the status fields that are deprecated diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 0577a96455bc..8131b55bcda4 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -466,6 +466,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } slices.Sort(hooks) log.Info("Waiting for pre-drain hooks to succeed", "hooks", strings.Join(hooks, ",")) + if m.Status.Deletion == nil { + m.Status.Deletion = &clusterv1.MachineDeletionStatus{} + } + if m.Status.Deletion.WaitForPreDrainHookStartTime == nil { + m.Status.Deletion.WaitForPreDrainHookStartTime = ptr.To(metav1.Now()) + } v1beta1conditions.MarkFalse(m, clusterv1.PreDrainDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") s.deletingReason = clusterv1.MachineDeletingWaitingForPreDrainHookReason s.deletingMessage = fmt.Sprintf("Waiting for pre-drain hooks to succeed (hooks: %s)", strings.Join(hooks, ",")) @@ -474,7 +480,8 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result v1beta1conditions.MarkTrue(m, clusterv1.PreDrainDeleteHookSucceededV1Beta1Condition) // Drain node before deletion and issue a patch in order to make this operation visible to the users. - if r.isNodeDrainAllowed(m) { + // In case the preTerminateHook is started or the infra machine is not found the detachment is skipped. + if r.isNodeDrainAllowed(m, s.infraMachine) { patchHelper, err := patch.NewHelper(m, r.Client) if err != nil { s.deletingReason = clusterv1.MachineDeletingInternalErrorReason @@ -521,8 +528,8 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result // After node draining is completed, and if isNodeVolumeDetachingAllowed returns True, make sure all // volumes are detached before proceeding to delete the Node. - // In case the node is unreachable, the detachment is skipped. - if r.isNodeVolumeDetachingAllowed(m) { + // In case the node is unreachable, preTerminateHook is started or the infra machine is not found the detachment is skipped. + if r.isNodeVolumeDetachingAllowed(m, s.infraMachine) { if m.Status.Deletion == nil { m.Status.Deletion = &clusterv1.MachineDeletionStatus{} } @@ -563,6 +570,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } slices.Sort(hooks) log.Info("Waiting for pre-terminate hooks to succeed", "hooks", strings.Join(hooks, ",")) + if m.Status.Deletion == nil { + m.Status.Deletion = &clusterv1.MachineDeletionStatus{} + } + if m.Status.Deletion.WaitForPreTerminateHookStartTime == nil { + m.Status.Deletion.WaitForPreTerminateHookStartTime = ptr.To(metav1.Now()) + } v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") s.deletingReason = clusterv1.MachineDeletingWaitingForPreTerminateHookReason s.deletingMessage = fmt.Sprintf("Waiting for pre-terminate hooks to succeed (hooks: %s)", strings.Join(hooks, ",")) @@ -637,7 +650,7 @@ const ( KubeadmControlPlanePreTerminateHookCleanupAnnotation = clusterv1.PreTerminateDeleteHookAnnotationPrefix + "/kcp-cleanup" ) -func (r *Reconciler) isNodeDrainAllowed(m *clusterv1.Machine) bool { +func (r *Reconciler) isNodeDrainAllowed(m *clusterv1.Machine, infraMachine *unstructured.Unstructured) bool { if util.IsControlPlaneMachine(m) && util.HasOwner(m.GetOwnerReferences(), clusterv1.GroupVersionControlPlane.String(), []string{"KubeadmControlPlane"}) { if _, exists := m.Annotations[KubeadmControlPlanePreTerminateHookCleanupAnnotation]; !exists { return false @@ -648,6 +661,14 @@ func (r *Reconciler) isNodeDrainAllowed(m *clusterv1.Machine) bool { return false } + if m.Status.Deletion != nil && m.Status.Deletion.WaitForPreTerminateHookStartTime != nil { + return false + } + + if infraMachine == nil || !infraMachine.GetDeletionTimestamp().IsZero() { + return false + } + if r.nodeDrainTimeoutExceeded(m) { return false } @@ -657,7 +678,7 @@ func (r *Reconciler) isNodeDrainAllowed(m *clusterv1.Machine) bool { // isNodeVolumeDetachingAllowed returns False if either ExcludeWaitForNodeVolumeDetachAnnotation annotation is set OR // nodeVolumeDetachTimeoutExceeded timeout is exceeded, otherwise returns True. -func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine) bool { +func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine, infraMachine *unstructured.Unstructured) bool { if util.IsControlPlaneMachine(m) && util.HasOwner(m.GetOwnerReferences(), clusterv1.GroupVersionControlPlane.String(), []string{"KubeadmControlPlane"}) { if _, exists := m.Annotations[KubeadmControlPlanePreTerminateHookCleanupAnnotation]; !exists { return false @@ -668,6 +689,14 @@ func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine) bool { return false } + if m.Status.Deletion != nil && m.Status.Deletion.WaitForPreTerminateHookStartTime != nil { + return false + } + + if infraMachine == nil || !infraMachine.GetDeletionTimestamp().IsZero() { + return false + } + if r.nodeVolumeDetachTimeoutExceeded(m) { return false } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index c0b3d5df99b9..81f1ee8a15d3 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1412,9 +1412,10 @@ func TestIsNodeDrainedAllowed(t *testing.T) { } tests := []struct { - name string - machine *clusterv1.Machine - expected bool + name string + machine *clusterv1.Machine + infraMachine *unstructured.Unstructured + expected bool }{ { name: "Exclude node draining annotation exists", @@ -1432,8 +1433,84 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, + infraMachine: &unstructured.Unstructured{}, + expected: false, + }, + { + name: "Machine without exclude draining annotaion should drain", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, + infraMachine: &unstructured.Unstructured{}, + expected: true, + }, + { + name: "Machine without infra machine should not drain", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, expected: false, }, + { + name: "Machine with infra machine in deletion should not drain", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, + infraMachine: toUnstructured(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{Time: time.Now()}}}), + expected: false, + }, + { + name: "Machine in the pre-terminate hook phase should not drain", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{ + Deletion: &clusterv1.MachineDeletionStatus{ + WaitForPreTerminateHookStartTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, + infraMachine: &unstructured.Unstructured{}, + expected: false, + }, { name: "KCP machine with the pre terminate hook should drain", machine: &clusterv1.Machine{ @@ -1457,7 +1534,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, { name: "KCP machine without the pre terminate hook should stop draining", @@ -1481,7 +1559,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, - expected: false, + infraMachine: &unstructured.Unstructured{}, + expected: false, }, { name: "Node draining timeout is over", @@ -1504,7 +1583,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, }, }, - expected: false, + infraMachine: &unstructured.Unstructured{}, + expected: false, }, { name: "Node draining timeout is not yet over", @@ -1526,7 +1606,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, }, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, { name: "NodeDrainTimeoutSeconds option is set to its default value 0", @@ -1547,7 +1628,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, }, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, } for _, tt := range tests { @@ -1562,7 +1644,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Client: c, } - got := r.isNodeDrainAllowed(tt.machine) + got := r.isNodeDrainAllowed(tt.machine, tt.infraMachine) g.Expect(got).To(Equal(tt.expected)) }) } @@ -1973,9 +2055,10 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { } tests := []struct { - name string - machine *clusterv1.Machine - expected bool + name string + machine *clusterv1.Machine + infraMachine *unstructured.Unstructured + expected bool }{ { name: "Exclude wait node volume detaching annotation exists", @@ -1993,8 +2076,84 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, + infraMachine: &unstructured.Unstructured{}, + expected: false, + }, + { + name: "Machine without volume detaching annotaion should detach", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, + infraMachine: &unstructured.Unstructured{}, + expected: true, + }, + { + name: "Machine in the pre-terminate hook phase should not detach", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{ + Deletion: &clusterv1.MachineDeletionStatus{ + WaitForPreTerminateHookStartTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, + infraMachine: &unstructured.Unstructured{}, + expected: false, + }, + { + name: "Machine without infra machine should not detach", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, expected: false, }, + { + name: "Machine with infra machine in deletion should not detach", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + }, + Status: clusterv1.MachineStatus{}, + }, + infraMachine: toUnstructured(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{Time: time.Now()}}}), + expected: false, + }, { name: "KCP machine with the pre terminate hook should wait", machine: &clusterv1.Machine{ @@ -2018,7 +2177,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, { name: "KCP machine without the pre terminate hook should stop waiting", @@ -2042,7 +2202,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{}, }, - expected: false, + infraMachine: &unstructured.Unstructured{}, + expected: false, }, { name: "Volume detach timeout is over", @@ -2065,7 +2226,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, }, }, - expected: false, + infraMachine: &unstructured.Unstructured{}, + expected: false, }, { name: "Volume detach timeout is not yet over", @@ -2087,7 +2249,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, }, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, { name: "Volume detach timeout option is set to it's default value 0", @@ -2108,7 +2271,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, }, }, - expected: true, + infraMachine: &unstructured.Unstructured{}, + expected: true, }, } for _, tt := range tests { @@ -2123,7 +2287,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Client: c, } - got := r.isNodeVolumeDetachingAllowed(tt.machine) + got := r.isNodeVolumeDetachingAllowed(tt.machine, tt.infraMachine) g.Expect(got).To(Equal(tt.expected)) }) } @@ -3698,3 +3862,12 @@ func podByNodeName(o client.Object) []string { return []string{pod.Spec.NodeName} } + +func toUnstructured(obj client.Object) *unstructured.Unstructured { + unstructuredObj := &unstructured.Unstructured{} + unstructuredObj.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + unstructuredObj.SetName(obj.GetName()) + unstructuredObj.SetNamespace(obj.GetNamespace()) + unstructuredObj.SetDeletionTimestamp(obj.GetDeletionTimestamp()) + return unstructuredObj +}