Skip to content

Commit 11f2a15

Browse files
Address comments
1 parent aaff195 commit 11f2a15

File tree

5 files changed

+62
-64
lines changed

5 files changed

+62
-64
lines changed

controlplane/kubeadm/internal/controllers/inplace_trigger.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"time"
2222

2323
"github.com/pkg/errors"
24+
corev1 "k8s.io/api/core/v1"
2425
"k8s.io/apimachinery/pkg/util/wait"
2526
"k8s.io/klog/v2"
2627
ctrl "sigs.k8s.io/controller-runtime"
@@ -136,6 +137,7 @@ func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context
136137
}
137138

138139
log.Info("Completed triggering in-place update", "Machine", klog.KObj(machine))
140+
r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulStartInPlaceUpdate", "Machine starting in-place update")
139141

140142
// Wait until the cache observed the Machine with PendingHooksAnnotation to ensure subsequent reconciles
141143
// will observe it as well and won't repeatedly call triggerInPlaceUpdate.

controlplane/kubeadm/internal/controllers/inplace_trigger_test.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
. "github.com/onsi/gomega"
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2626
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
27+
"k8s.io/client-go/tools/record"
2728
"k8s.io/utils/ptr"
2829
"sigs.k8s.io/controller-runtime/pkg/client"
2930

@@ -278,7 +279,8 @@ func Test_triggerInPlaceUpdate(t *testing.T) {
278279
}
279280

280281
r := KubeadmControlPlaneReconciler{
281-
Client: env.Client,
282+
Client: env.Client,
283+
recorder: record.NewFakeRecorder(32),
282284
}
283285

284286
err := r.triggerInPlaceUpdate(ctx, currentMachineForPatch, upToDateResult)

internal/controllers/machineset/machineset_controller.go

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -369,13 +369,6 @@ func (r *Reconciler) triggerInPlaceUpdate(ctx context.Context, s *scope) (ctrl.R
369369
continue
370370
}
371371

372-
// Complete the move operation started by the source MachinesSet by updating machine, infraMachine and boostrapConfig
373-
// to align to the desiredState for the current MachineSet.
374-
if err := r.completeMoveMachine(ctx, s, machine); err != nil {
375-
errs = append(errs, err)
376-
continue
377-
}
378-
379372
// If the existing machine is pending acknowledge from the MD controller after a move operation,
380373
// wait until if it possible to drop the PendingAcknowledgeMove annotation.
381374
orig := machine.DeepCopy()
@@ -403,21 +396,27 @@ func (r *Reconciler) triggerInPlaceUpdate(ctx context.Context, s *scope) (ctrl.R
403396
}
404397
}
405398

399+
// Complete the move operation started by the source MachinesSet by updating machine, infraMachine and boostrapConfig
400+
// to align to the desiredState for the current MachineSet.
401+
if err := r.completeMoveMachine(ctx, s, machine); err != nil {
402+
errs = append(errs, err)
403+
continue
404+
}
405+
406406
// Note: Once we write PendingHooksAnnotation the Machine controller will start with the in-place update.
407407
hooks.MarkObjectAsPending(machine, runtimehooksv1.UpdateMachine)
408408

409409
// Note: Intentionally using client.Patch instead of SSA. Otherwise we would
410410
// have to ensure we preserve PendingHooksAnnotation on existing Machines in MachineSet and that would lead to race
411411
// conditions when the Machine controller tries to remove the annotation and MachineSet adds it back.
412412
if err := r.Client.Patch(ctx, machine, client.MergeFrom(orig)); err != nil {
413-
r.recorder.Eventf(s.machineSet, corev1.EventTypeWarning, "FailedStartInPlaceUpdate", "Failed to start in-place update for Machine %q: %v", machine.Name, err)
414413
errs = append(errs, errors.Wrapf(err, "failed to start in-place update for Machine %s", klog.KObj(machine)))
415414
continue
416415
}
417416

418417
machinesTriggeredInPlace = append(machinesTriggeredInPlace, machine)
419418
log.Info("Completed triggering in-place update", "Machine", klog.KObj(machine))
420-
r.recorder.Eventf(s.machineSet, corev1.EventTypeNormal, "SuccessfulStartInPlaceUpdate", "Machine %q started in place update", machine.Name)
419+
r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulStartInPlaceUpdate", "Machine starting in-place update")
421420
}
422421

423422
// Wait until the cache observed the Machine with PendingHooksAnnotation to ensure subsequent reconciles
@@ -911,7 +910,6 @@ func (r *Reconciler) createMachines(ctx context.Context, s *scope, machinesToAdd
911910
log := log
912911
machine, computeMachineErr := r.computeDesiredMachine(ms, nil)
913912
if computeMachineErr != nil {
914-
r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create Machine: %v", err)
915913
v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedV1Beta1Condition, clusterv1.MachineCreationFailedV1Beta1Reason,
916914
clusterv1.ConditionSeverityError, "%s", computeMachineErr.Error())
917915
return ctrl.Result{}, errors.Wrap(computeMachineErr, "failed to create Machine: failed to compute desired Machine")
@@ -926,7 +924,6 @@ func (r *Reconciler) createMachines(ctx context.Context, s *scope, machinesToAdd
926924
if ms.Spec.Template.Spec.Bootstrap.ConfigRef.IsDefined() {
927925
bootstrapConfig, bootstrapRef, err = r.createBootstrapConfig(ctx, ms, machine)
928926
if err != nil {
929-
r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create Machine: %v", err)
930927
v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedV1Beta1Condition, clusterv1.BootstrapTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error())
931928
return ctrl.Result{}, errors.Wrapf(err, "failed to clone bootstrap configuration from %s %s while creating a Machine",
932929
ms.Spec.Template.Spec.Bootstrap.ConfigRef.Kind,
@@ -947,7 +944,6 @@ func (r *Reconciler) createMachines(ctx context.Context, s *scope, machinesToAdd
947944
}
948945
}
949946

950-
r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create Machine: %v", err)
951947
v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedV1Beta1Condition, clusterv1.InfrastructureTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error())
952948
return ctrl.Result{}, kerrors.NewAggregate([]error{errors.Wrapf(err, "failed to clone infrastructure machine from %s %s while creating a Machine",
953949
ms.Spec.Template.Spec.InfrastructureRef.Kind,
@@ -969,7 +965,6 @@ func (r *Reconciler) createMachines(ctx context.Context, s *scope, machinesToAdd
969965
}
970966
}
971967

972-
r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create Machine: %v", err)
973968
v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedV1Beta1Condition, clusterv1.MachineCreationFailedV1Beta1Reason,
974969
clusterv1.ConditionSeverityError, "%s", err.Error())
975970
return ctrl.Result{}, kerrors.NewAggregate(errs)
@@ -1016,15 +1011,13 @@ func (r *Reconciler) deleteMachines(ctx context.Context, s *scope, machinesToDel
10161011
log := log.WithValues("Machine", klog.KObj(machine))
10171012
if machine.GetDeletionTimestamp().IsZero() {
10181013
if err := r.Client.Delete(ctx, machine); err != nil {
1019-
log.Error(err, "Unable to delete Machine")
1020-
r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedDelete", "Failed to delete machine %q: %v", machine.Name, err)
10211014
errs = append(errs, err)
10221015
continue
10231016
}
10241017

10251018
machinesDeleted = append(machinesDeleted, machine)
10261019
log.Info(fmt.Sprintf("Deleting Machine (scale down, deleting %d of %d)", i+1, machinesToDelete))
1027-
r.recorder.Eventf(ms, corev1.EventTypeNormal, "SuccessfulDelete", "Deleted machine %q", machine.Name)
1020+
r.recorder.Eventf(ms, corev1.EventTypeNormal, "SuccessfulDelete", "Deleted Machine %q", machine.Name)
10281021
} else {
10291022
log.Info(fmt.Sprintf("Waiting for Machine to be deleted (scale down, deleting %d of %d)", i+1, machinesToDelete))
10301023
}
@@ -1121,7 +1114,7 @@ func (r *Reconciler) startMoveMachines(ctx context.Context, s *scope, targetMSNa
11211114
// this label between "manager" and "capi-machineset", but this is not an issue because the MS controller will never unset this label.
11221115
targetUniqueLabel, ok := targetMS.Labels[clusterv1.MachineDeploymentUniqueLabel]
11231116
if !ok {
1124-
return ctrl.Result{}, errors.Errorf("MachineSet %s does not have a unique label", targetMS.Name)
1117+
return ctrl.Result{}, errors.Errorf("MachineSet %s does not have the %s label", targetMS.Name, clusterv1.MachineDeploymentUniqueLabel)
11251118
}
11261119
machine.Labels[clusterv1.MachineDeploymentUniqueLabel] = targetUniqueLabel
11271120

internal/controllers/machineset/machineset_controller_status_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1204,14 +1204,15 @@ func withCondition(c metav1.Condition) fakeMachinesOption {
12041204
}
12051205
}
12061206

1207-
func withHealthyStatus() fakeMachinesOption {
1207+
func withHealthyNode() fakeMachinesOption {
1208+
// Note: This is what is required by delete priority functions to consider the machine healthy.
12081209
return func(m *clusterv1.Machine) {
12091210
m.Status = clusterv1.MachineStatus{
12101211
NodeRef: clusterv1.MachineNodeReference{Name: "some-node"},
12111212
Conditions: []metav1.Condition{
12121213
{
12131214
Type: clusterv1.MachineNodeHealthyCondition,
1214-
Status: metav1.ConditionUnknown,
1215+
Status: metav1.ConditionTrue,
12151216
},
12161217
},
12171218
}

0 commit comments

Comments
 (0)