Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -806,12 +806,6 @@ func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.C
}

// syncMachines updates Machines, InfrastructureMachines and KubeadmConfigs to propagate in-place mutable fields from KCP.
// Note: It also cleans up managed fields of all Machines so that Machines that were
// created/patched before (< v1.4.0) the controller adopted Server-Side-Apply (SSA) can also work with SSA.
// Note: For InfrastructureMachines and KubeadmConfigs it also drops ownership of "metadata.labels" and
// "metadata.annotations" from "manager" so that "capi-kubeadmcontrolplane" can own these fields and can work with SSA.
// Otherwise, fields would be co-owned by our "old" "manager" and "capi-kubeadmcontrolplane" and then we would not be
// able to e.g. drop labels and annotations.
func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, controlPlane *internal.ControlPlane) error {
patchHelpers := map[string]*patch.Helper{}
for machineName := range controlPlane.Machines {
Expand Down
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/controllers/inplace_trigger.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context
// of an in-place update here, e.g. for the case where the InfraMachineTemplate was rotated.
clusterv1.TemplateClonedFromNameAnnotation: desiredInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation],
clusterv1.TemplateClonedFromGroupKindAnnotation: desiredInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation],
// Machine controller waits for this annotation to exist on machine and relate objects before starting in place.
// Machine controller waits for this annotation to exist on Machine and related objects before starting the in-place update.
clusterv1.UpdateInProgressAnnotation: "",
})
if err := ssa.Patch(ctx, r.Client, kcpManagerName, desiredInfraMachine); err != nil {
Expand All @@ -110,7 +110,7 @@ func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context
// Write KubeadmConfig without the labels & annotations that are written continuously by updateLabelsAndAnnotations.
desiredKubeadmConfig.Labels = nil
desiredKubeadmConfig.Annotations = map[string]string{
// Machine controller waits for this annotation to exist on machine and relate objects before starting in place.
// Machine controller waits for this annotation to exist on Machine and related objects before starting the in-place update.
clusterv1.UpdateInProgressAnnotation: "",
}
if err := ssa.Patch(ctx, r.Client, kcpManagerName, desiredKubeadmConfig); err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ func machineSetControllerMutator(log *fileLogger, ms *clusterv1.MachineSet, scop
// The prod code for the MachineSet controller performs in order triggerInPlaceUpdate and then syncReplicas and then updateStatus.
// This func mimics the same code structure, with the addition of the following operation that is implemented here for convenience.

// In order to simulate an in-place update being completed, remove the updatingInPlaceAnnotation from machines after
// In order to simulate an in-place update being completed, remove the UpdateInProgressAnnotation from machines after
// pendingAcknowledgeMove is gone in a previous reconcile.
// Note: ideally this should be implemented in the fake Machine controller, but current implementation is
// considered an acceptable trade-off because it provides a signal about in-place update completed, without
Expand Down Expand Up @@ -577,7 +577,7 @@ func machineSetControllerMutatorSyncReplicas(log *fileLogger, ms *clusterv1.Mach
diff := len(scope.machineSetMachines[ms.Name]) - int(ptr.Deref(ms.Spec.Replicas, 0))
switch {
case diff < 0:
// if too few machines, create missing machine unless machine creation is disabled.
// If there are not enough Machines, create missing Machines unless Machine creation is disabled.
machinesToAdd := -diff
if ms.Annotations != nil {
if value, ok := ms.Annotations[clusterv1.DisableMachineCreateAnnotation]; ok && value == "true" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,19 +160,19 @@ func (p *rolloutPlanner) reconcileReplicasPendingAcknowledgeMove(ctx context.Con
}
}

// reconcileNewMachineSet reconciles replica number for the new MS.
// Note: In case of scale down this function does not make consideration about possible impacts on availability.
// reconcileNewMachineSet reconciles the replica number for the new MS.
// Note: In case of scale down this function does not consider the possible impact on availability.
// This is considered acceptable because historically it never led to any problem, but we might revisit this in the future
// because some limitations of this approach are becoming more evident, e.g.
//
// when users scale down the MD, the operation might temporarily breach min availability
// when users scale down the MD, the operation might temporarily breach min availability (maxUnavailable)
//
// There are code paths specifically added to prevent this issue to become more relevant when doing in-place updates;
// e.g. the MS controller will give highest delete priority to machines still updating in-place,
// which are also unavailable machines.
// There are code paths specifically added to prevent this issue becoming more relevant when doing in-place updates;
// e.g. the MS controller will give highest delete priority to Machines still updating in-place,
// which are also unavailable Machines.
//
// Notably, there is also not agreement yet on a different way forward because e.g. limiting scale down of the
// new MS could lead e.g. to completing in place upgrade of machines that will be otherwise deleted.
// Notably, there is also no agreement yet on a different way forward because e.g. limiting scale down of the
// new MS could lead e.g. to completing in place update of Machines that will be otherwise deleted.
func (p *rolloutPlanner) reconcileNewMachineSet(ctx context.Context) error {
log := ctrl.LoggerFrom(ctx)
allMSs := append(p.oldMSs, p.newMS)
Expand Down
Loading