Skip to content

Commit 1f629e1

Browse files
🌱 Cleanup v1beta1 updateStatus functions (#12190)
* Cleanup v1beta1 updateStatus functions * Address comments # Conflicts: # internal/controllers/machineset/machineset_controller.go * More feedback * Fix CI error
1 parent 7c5f467 commit 1f629e1

15 files changed

+540
-351
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -214,26 +214,31 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
214214

215215
defer func() {
216216
// Always attempt to update status.
217-
if err := r.updateV1Beta1Status(ctx, controlPlane); err != nil {
217+
if err := r.updateStatus(ctx, controlPlane); err != nil {
218218
var connFailure *internal.RemoteClusterConnectionError
219219
if errors.As(err, &connFailure) {
220220
log.Error(err, "Could not connect to workload cluster to fetch status")
221221
} else {
222-
log.Error(err, "Failed to update KubeadmControlPlane status")
223-
reterr = kerrors.NewAggregate([]error{reterr, err})
222+
reterr = kerrors.NewAggregate([]error{reterr, errors.Wrap(err, "failed to update KubeadmControlPlane status")})
224223
}
225224
}
226225

227-
r.updateStatus(ctx, controlPlane)
226+
if err := r.updateV1Beta1Status(ctx, controlPlane); err != nil {
227+
var connFailure *internal.RemoteClusterConnectionError
228+
if errors.As(err, &connFailure) {
229+
log.Error(err, "Could not connect to workload cluster to fetch deprecated v1beta1 status")
230+
} else {
231+
reterr = kerrors.NewAggregate([]error{reterr, errors.Wrap(err, "failed to update KubeadmControlPlane deprecated v1beta1 status")})
232+
}
233+
}
228234

229235
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
230236
patchOpts := []patch.Option{}
231237
if reterr == nil {
232238
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
233239
}
234240
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
235-
log.Error(err, "Failed to patch KubeadmControlPlane")
236-
reterr = kerrors.NewAggregate([]error{reterr, err})
241+
reterr = kerrors.NewAggregate([]error{reterr, errors.Wrap(err, "failed to patch KubeadmControlPlane")})
237242
}
238243

239244
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.

controlplane/kubeadm/internal/controllers/status.go

Lines changed: 79 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525

2626
"github.com/pkg/errors"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28+
kerrors "k8s.io/apimachinery/pkg/util/errors"
2829
"k8s.io/apimachinery/pkg/util/sets"
2930
"k8s.io/utils/ptr"
3031
ctrl "sigs.k8s.io/controller-runtime"
@@ -42,11 +43,6 @@ import (
4243
// updateV1Beta1Status is called after every reconciliation loop in a defer statement to always make sure we have the
4344
// KubeadmControlPlane status up-to-date.
4445
func (r *KubeadmControlPlaneReconciler) updateV1Beta1Status(ctx context.Context, controlPlane *internal.ControlPlane) error {
45-
selector := collections.ControlPlaneSelectorForCluster(controlPlane.Cluster.Name)
46-
// Copy label selector to its status counterpart in string format.
47-
// This is necessary for CRDs including scale subresources.
48-
controlPlane.KCP.Status.Selector = selector.String()
49-
5046
upToDateMachines := controlPlane.UpToDateMachines()
5147
if controlPlane.KCP.Status.Deprecated == nil {
5248
controlPlane.KCP.Status.Deprecated = &controlplanev1.KubeadmControlPlaneDeprecatedStatus{}
@@ -60,7 +56,6 @@ func (r *KubeadmControlPlaneReconciler) updateV1Beta1Status(ctx context.Context,
6056
desiredReplicas := *controlPlane.KCP.Spec.Replicas
6157

6258
// set basic data that does not require interacting with the workload cluster
63-
controlPlane.KCP.Status.Replicas = replicas
6459
controlPlane.KCP.Status.Deprecated.V1Beta1.ReadyReplicas = 0
6560
controlPlane.KCP.Status.Deprecated.V1Beta1.UnavailableReplicas = replicas
6661

@@ -70,11 +65,6 @@ func (r *KubeadmControlPlaneReconciler) updateV1Beta1Status(ctx context.Context,
7065
return nil
7166
}
7267

73-
lowestVersion := controlPlane.Machines.LowestVersion()
74-
if lowestVersion != nil {
75-
controlPlane.KCP.Status.Version = lowestVersion
76-
}
77-
7868
switch {
7969
// We are scaling up
8070
case replicas < desiredReplicas:
@@ -109,51 +99,17 @@ func (r *KubeadmControlPlaneReconciler) updateV1Beta1Status(ctx context.Context,
10999
controlPlane.KCP.Status.Deprecated.V1Beta1.ReadyReplicas = status.ReadyNodes
110100
controlPlane.KCP.Status.Deprecated.V1Beta1.UnavailableReplicas = replicas - status.ReadyNodes
111101

112-
// This only gets initialized once and does not change if the kubeadm config map goes away.
113102
if status.HasKubeadmConfig {
114-
if controlPlane.KCP.Status.Initialization == nil {
115-
controlPlane.KCP.Status.Initialization = &controlplanev1.KubeadmControlPlaneInitializationStatus{}
116-
}
117-
controlPlane.KCP.Status.Initialization.ControlPlaneInitialized = true
118103
v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.AvailableV1Beta1Condition)
119104
}
120-
121-
// Surface lastRemediation data in status.
122-
// LastRemediation is the remediation currently in progress, in any, or the
123-
// most recent of the remediation we are keeping track on machines.
124-
var lastRemediation *RemediationData
125-
126-
if v, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok {
127-
remediationData, err := RemediationDataFromAnnotation(v)
128-
if err != nil {
129-
return err
130-
}
131-
lastRemediation = remediationData
132-
} else {
133-
for _, m := range controlPlane.Machines.UnsortedList() {
134-
if v, ok := m.Annotations[controlplanev1.RemediationForAnnotation]; ok {
135-
remediationData, err := RemediationDataFromAnnotation(v)
136-
if err != nil {
137-
return err
138-
}
139-
if lastRemediation == nil || lastRemediation.Timestamp.Time.Before(remediationData.Timestamp.Time) {
140-
lastRemediation = remediationData
141-
}
142-
}
143-
}
144-
}
145-
146-
if lastRemediation != nil {
147-
controlPlane.KCP.Status.LastRemediation = lastRemediation.ToStatus()
148-
}
149105
return nil
150106
}
151107

152108
// updateStatus reconciles KubeadmControlPlane's status during the entire lifecycle of the object.
153-
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) {
109+
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) error {
154110
// If the code failed initializing the control plane, do not update the status.
155111
if controlPlane == nil {
156-
return
112+
return nil
157113
}
158114

159115
// Note: some of the status is set on reconcileControlPlaneAndMachinesConditions (EtcdClusterHealthy, ControlPlaneComponentsHealthy conditions),
@@ -163,6 +119,21 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro
163119
// Note: KCP also sets status on machines in reconcileUnhealthyMachines and reconcileControlPlaneAndMachinesConditions; if for
164120
// any reason those functions are not called before, e.g. an error, this func relies on existing Machine's condition.
165121

122+
// Copy label selector to its status counterpart in string format.
123+
// This is necessary for CRDs including scale subresources.
124+
selector := collections.ControlPlaneSelectorForCluster(controlPlane.Cluster.Name)
125+
controlPlane.KCP.Status.Selector = selector.String()
126+
127+
// Set status.version with the lowest K8s version from CP machines.
128+
lowestVersion := controlPlane.Machines.LowestVersion()
129+
if lowestVersion != nil {
130+
controlPlane.KCP.Status.Version = lowestVersion
131+
}
132+
133+
allErrors := []error{}
134+
if err := setControlPlaneInitialized(ctx, controlPlane); err != nil {
135+
allErrors = append(allErrors, err)
136+
}
166137
setReplicas(ctx, controlPlane.KCP, controlPlane.Machines)
167138
setInitializedCondition(ctx, controlPlane.KCP)
168139
setRollingOutCondition(ctx, controlPlane.KCP, controlPlane.Machines)
@@ -173,6 +144,34 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro
173144
setRemediatingCondition(ctx, controlPlane.KCP, controlPlane.MachinesToBeRemediatedByKCP(), controlPlane.UnhealthyMachines())
174145
setDeletingCondition(ctx, controlPlane.KCP, controlPlane.DeletingReason, controlPlane.DeletingMessage)
175146
setAvailableCondition(ctx, controlPlane.KCP, controlPlane.IsEtcdManaged(), controlPlane.EtcdMembers, controlPlane.EtcdMembersAndMachinesAreMatching, controlPlane.Machines)
147+
if err := setLastRemediation(ctx, controlPlane); err != nil {
148+
allErrors = append(allErrors, err)
149+
}
150+
return kerrors.NewAggregate(allErrors)
151+
}
152+
153+
// setControlPlaneInitialized surface control plane initialized when it is possible to check that the Kubeadm config exists in the workload cluster;
154+
// this is considered a proxy information about the API Server being up and running and kubeadm init successfully completed.
155+
// Note: This only gets initialized once and does not change if the kubeadm config map goes away.
156+
func setControlPlaneInitialized(ctx context.Context, controlPlane *internal.ControlPlane) error {
157+
if controlPlane.KCP.Status.Initialization == nil || !controlPlane.KCP.Status.Initialization.ControlPlaneInitialized {
158+
workloadCluster, err := controlPlane.GetWorkloadCluster(ctx)
159+
if err != nil {
160+
return errors.Wrap(err, "failed to create remote cluster client")
161+
}
162+
status, err := workloadCluster.ClusterStatus(ctx)
163+
if err != nil {
164+
return err
165+
}
166+
167+
if status.HasKubeadmConfig {
168+
if controlPlane.KCP.Status.Initialization == nil {
169+
controlPlane.KCP.Status.Initialization = &controlplanev1.KubeadmControlPlaneInitializationStatus{}
170+
}
171+
controlPlane.KCP.Status.Initialization.ControlPlaneInitialized = true
172+
}
173+
}
174+
return nil
176175
}
177176

178177
func setReplicas(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines) {
@@ -189,6 +188,7 @@ func setReplicas(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, mac
189188
}
190189
}
191190

191+
kcp.Status.Replicas = int32(len(machines))
192192
kcp.Status.ReadyReplicas = ptr.To(readyReplicas)
193193
kcp.Status.AvailableReplicas = ptr.To(availableReplicas)
194194
kcp.Status.UpToDateReplicas = ptr.To(upToDateReplicas)
@@ -768,6 +768,38 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl
768768
})
769769
}
770770

771+
// setLastRemediation surface lastRemediation data in status.
772+
// LastRemediation is the remediation currently in progress, if any, or the
773+
// most recent of the remediation we are keeping track on machines.
774+
func setLastRemediation(_ context.Context, controlPlane *internal.ControlPlane) error {
775+
var lastRemediation *RemediationData
776+
777+
if v, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok {
778+
remediationData, err := RemediationDataFromAnnotation(v)
779+
if err != nil {
780+
return err
781+
}
782+
lastRemediation = remediationData
783+
} else {
784+
for _, m := range controlPlane.Machines.UnsortedList() {
785+
if v, ok := m.Annotations[controlplanev1.RemediationForAnnotation]; ok {
786+
remediationData, err := RemediationDataFromAnnotation(v)
787+
if err != nil {
788+
return err
789+
}
790+
if lastRemediation == nil || lastRemediation.Timestamp.Time.Before(remediationData.Timestamp.Time) {
791+
lastRemediation = remediationData
792+
}
793+
}
794+
}
795+
}
796+
797+
if lastRemediation != nil {
798+
controlPlane.KCP.Status.LastRemediation = lastRemediation.ToStatus()
799+
}
800+
return nil
801+
}
802+
771803
// shouldSurfaceWhenAvailableTrue defines when a control plane components/etcd issue should surface when
772804
// Available condition is true.
773805
// The main goal of this check is to avoid to surface false negatives/flakes, and thus it requires that

0 commit comments

Comments
 (0)