diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index aeb80bfa4640..f0bcfa35d20d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -27,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -303,6 +305,93 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope) error { return errors.Errorf("unexpected deployment strategy type: %s", md.Spec.Rollout.Strategy.Type) } +// createOrUpdateMachineSetsAndSyncMachineDeploymentRevision applies changes identified by the rolloutPlanner to both newMS and oldMSs. +// Note: Both newMS and oldMS include the full intent for the SSA apply call with mandatory labels, +// in place propagated fields, the annotations derived from the MachineDeployment, revision annotations +// and also annotations influencing how to perform scale up/down operations. +// scaleIntents instead are handled separately in the rolloutPlanner and should be applied to MachineSets +// before persisting changes. +// Note: When the newMS has been created by the rollout planner, also wait for the cache to be up to date. +func (r *Reconciler) createOrUpdateMachineSetsAndSyncMachineDeploymentRevision(ctx context.Context, p *rolloutPlanner) error { + log := ctrl.LoggerFrom(ctx) + allMSs := append(p.oldMSs, p.newMS) + + for _, ms := range allMSs { + log = log.WithValues("MachineSet", klog.KObj(ms)) + ctx = ctrl.LoggerInto(ctx, log) + + originalReplicas := ptr.Deref(ms.Spec.Replicas, 0) + if scaleIntent, ok := p.scaleIntents[ms.Name]; ok { + ms.Spec.Replicas = &scaleIntent + } + + if ms.GetUID() == "" { + // Create the MachineSet. + if err := ssa.Patch(ctx, r.Client, machineDeploymentManagerName, ms); err != nil { + r.recorder.Eventf(p.md, corev1.EventTypeWarning, "FailedCreate", "Failed to create MachineSet %s: %v", klog.KObj(ms), err) + return errors.Wrapf(err, "failed to create new MachineSet %s", klog.KObj(ms)) + } + log.Info(fmt.Sprintf("MachineSet created (%s)", p.createReason)) + r.recorder.Eventf(p.md, corev1.EventTypeNormal, "SuccessfulCreate", "Created MachineSet %s with %d replicas", klog.KObj(ms), ptr.Deref(ms.Spec.Replicas, 0)) + + // Keep trying to get the MachineSet. This will force the cache to update and prevent any future reconciliation of + // the MachineDeployment to reconcile with an outdated list of MachineSets which could lead to unwanted creation of + // a duplicate MachineSet. + var pollErrors []error + tmpMS := &clusterv1.MachineSet{} + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Second, true, func(ctx context.Context) (bool, error) { + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(ms), tmpMS); err != nil { + // Do not return error here. Continue to poll even if we hit an error + // so that we avoid existing because of transient errors like network flakes. + // Capture all the errors and return the aggregate error if the poll fails eventually. + pollErrors = append(pollErrors, err) + return false, nil + } + return true, nil + }); err != nil { + return errors.Wrapf(kerrors.NewAggregate(pollErrors), "failed to get the MachineSet %s after creation", klog.KObj(ms)) + } + + // Report back creation timestamp, because legacy scale func uses it to sort machines. + // TODO(in-place): drop this as soon as handling of MD with paused rollouts is moved into rollout planner (see scale in machinedeployment_sync.go). + ms.CreationTimestamp = tmpMS.CreationTimestamp + continue + } + + // Update the MachineSet to propagate in-place mutable fields from the MachineDeployment and/or changes applied by the rollout planner. + originalMS, ok := p.originalMS[ms.Name] + if !ok { + return errors.Errorf("failed to update MachineSet %s, original MS is missing", klog.KObj(ms)) + } + + err := ssa.Patch(ctx, r.Client, machineDeploymentManagerName, ms, ssa.WithCachingProxy{Cache: r.ssaCache, Original: originalMS}) + if err != nil { + r.recorder.Eventf(p.md, corev1.EventTypeWarning, "FailedUpdate", "Failed to update MachineSet %s: %v", klog.KObj(ms), err) + return errors.Wrapf(err, "failed to update MachineSet %s", klog.KObj(ms)) + } + + newReplicas := ptr.Deref(ms.Spec.Replicas, 0) + if newReplicas < originalReplicas { + log.Info(fmt.Sprintf("Scaled down MachineSet %s to %d replicas (-%d)", ms.Name, newReplicas, originalReplicas-newReplicas)) + r.recorder.Eventf(p.md, corev1.EventTypeNormal, "SuccessfulScale", "Scaled down MachineSet %v: %d -> %d", ms.Name, originalReplicas, newReplicas) + } + if newReplicas > originalReplicas { + log.Info(fmt.Sprintf("Scaled up MachineSet %s to %d replicas (+%d)", ms.Name, newReplicas, newReplicas-originalReplicas)) + r.recorder.Eventf(p.md, corev1.EventTypeNormal, "SuccessfulScale", "Scaled up MachineSet %v: %d -> %d", ms.Name, originalReplicas, newReplicas) + } + } + + // Surface the revision annotation on the MD level + if p.md.Annotations == nil { + p.md.Annotations = make(map[string]string) + } + if p.md.Annotations[clusterv1.RevisionAnnotation] != p.revision { + p.md.Annotations[clusterv1.RevisionAnnotation] = p.revision + } + + return nil +} + func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) error { log := ctrl.LoggerFrom(ctx) if err := r.getAndAdoptMachineSetsForDeployment(ctx, s); err != nil { diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 7631075c898a..62d892cb35ec 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -348,10 +348,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { g.Expect(env.List(ctx, machineSets, msListOpts...)).To(Succeed()) // Verify we still only have 2 MachineSets. g.Expect(machineSets.Items).To(HaveLen(2)) - // Verify that the new MachineSet gets the updated labels. + // Verify that the new and old MachineSet gets the updated labels. g.Expect(machineSets.Items[0].Spec.Template.Labels).To(HaveKeyWithValue("updated", "true")) - // Verify that the old MachineSet does not get the updated labels. - g.Expect(machineSets.Items[1].Spec.Template.Labels).ShouldNot(HaveKeyWithValue("updated", "true")) + g.Expect(machineSets.Items[1].Spec.Template.Labels).To(HaveKeyWithValue("updated", "true")) }, timeout).Should(Succeed()) // Update the NodeDrainTimout, NodeDeletionTimeoutSeconds, NodeVolumeDetachTimeoutSeconds of the MachineDeployment, @@ -384,10 +383,19 @@ func TestMachineDeploymentReconciler(t *testing.T) { HaveValue(Equal(duration10s)), ), "NodeVolumeDetachTimeoutSeconds value does not match expected") - // Verify that the old machine set keeps the old values. - g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds).Should(BeNil()) - g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(BeNil()) - g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(BeNil()) + // Verify that the old machine set have the new values. + g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeDrainTimout value does not match expected") + g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeDeletionTimeoutSeconds value does not match expected") + g.Expect(machineSets.Items[1].Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeVolumeDetachTimeoutSeconds value does not match expected") }).Should(Succeed()) // Update the deletion.order of the MachineDeployment, @@ -404,8 +412,8 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Verify the deletion.order value is updated g.Expect(machineSets.Items[0].Spec.Deletion.Order).Should(Equal(clusterv1.NewestMachineSetDeletionOrder)) - // Verify that the old machine set retains its delete policy - g.Expect(machineSets.Items[1].Spec.Deletion.Order).To(Equal(clusterv1.OldestMachineSetDeletionOrder)) + // Verify that the old machine set have the new values. + g.Expect(machineSets.Items[1].Spec.Deletion.Order).Should(Equal(clusterv1.NewestMachineSetDeletionOrder)) }).Should(Succeed()) // Verify that all the MachineSets have the expected OwnerRef. diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go index 1b6b3a875075..b83a215b63a5 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go @@ -27,46 +27,26 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" - "sigs.k8s.io/cluster-api/util/patch" ) // rolloutOnDelete reconcile machine sets controlled by a MachineDeployment that is using the OnDelete strategy. func (r *Reconciler) rolloutOnDelete(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, templateExists bool) error { - // TODO(in-place): move create newMS into rolloutPlanner - newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, md, msList, true, templateExists) - if err != nil { + planner := newRolloutPlanner() + if err := planner.init(ctx, md, msList, nil, true, templateExists); err != nil { return err } - planner := newRolloutPlanner() - planner.md = md - planner.newMS = newMS - planner.oldMSs = oldMSs - if err := planner.planOnDelete(ctx); err != nil { return err } - allMSs := append(oldMSs, newMS) - - // TODO(in-place): also apply/remove labels to MS should go into rolloutPlanner - if err := r.cleanupDisableMachineCreateAnnotation(ctx, newMS); err != nil { - return err - } - if err := r.addDisableMachineCreateAnnotation(ctx, oldMSs); err != nil { + if err := r.createOrUpdateMachineSetsAndSyncMachineDeploymentRevision(ctx, planner); err != nil { return err } - // TODO(in-place): this should be changed as soon as rolloutPlanner support MS creation and adding/removing labels from MS - for _, ms := range allMSs { - scaleIntent := ptr.Deref(ms.Spec.Replicas, 0) - if v, ok := planner.scaleIntents[ms.Name]; ok { - scaleIntent = v - } - if err := r.scaleMachineSet(ctx, ms, scaleIntent, md); err != nil { - return err - } - } + newMS := planner.newMS + oldMSs := planner.oldMSs + allMSs := append(oldMSs, newMS) if err := r.syncDeploymentStatus(allMSs, newMS, md); err != nil { return err @@ -164,26 +144,3 @@ func (p *rolloutPlanner) reconcileOldMachineSetsOnDelete(ctx context.Context) { } } } - -// addDisableMachineCreateAnnotation will add the disable machine create annotation to old MachineSets. -func (r *Reconciler) addDisableMachineCreateAnnotation(ctx context.Context, oldMSs []*clusterv1.MachineSet) error { - for _, oldMS := range oldMSs { - log := ctrl.LoggerFrom(ctx, "MachineSet", klog.KObj(oldMS)) - if _, ok := oldMS.Annotations[clusterv1.DisableMachineCreateAnnotation]; !ok { - log.V(4).Info("adding annotation on old MachineSet to disable machine creation") - patchHelper, err := patch.NewHelper(oldMS, r.Client) - if err != nil { - return err - } - if oldMS.Annotations == nil { - oldMS.Annotations = map[string]string{} - } - oldMS.Annotations[clusterv1.DisableMachineCreateAnnotation] = "true" - err = patchHelper.Patch(ctx, oldMS) - if err != nil { - return err - } - } - } - return nil -} diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete_test.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete_test.go index 561c0f5f7435..d21f917c6a17 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete_test.go @@ -20,15 +20,15 @@ import ( "context" "fmt" "math/rand" + "os" "strings" "testing" "time" "github.com/google/go-cmp/cmp" - "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -390,8 +390,7 @@ type onDeleteSequenceTestCase struct { func Test_OnDeleteSequences(t *testing.T) { ctx := context.Background() - ctx = ctrl.LoggerInto(ctx, klog.Background()) - klog.SetOutput(ginkgo.GinkgoWriter) + ctx = ctrl.LoggerInto(ctx, textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(os.Stdout)))) tests := []onDeleteSequenceTestCase{ { // delete 1 @@ -581,22 +580,25 @@ func runOnDeleteTestCase(ctx context.Context, t *testing.T, tt onDeleteSequenceT // Running a small subset of MD reconcile (the rollout logic and a bit of setReplicas) p := newRolloutPlanner() - p.md = current.machineDeployment - p.newMS = current.newMS() - p.oldMSs = current.oldMSs() + p.computeDesiredMS = func(_ context.Context, deployment *clusterv1.MachineDeployment, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + desiredNewMS := currentNewMS + if currentNewMS == nil { + // uses a predictable MS name when creating newMS, also add the newMS to current.machineSets + totMS := len(current.machineSets) + desiredNewMS = createMS(fmt.Sprintf("ms%d", totMS+1), deployment.Spec.Template.Spec.FailureDomain, 0) + current.machineSets = append(current.machineSets, desiredNewMS) + } + return desiredNewMS, nil + } - err := p.planOnDelete(ctx) + // init the rollout planner and plan next step for a rollout. + err := p.init(ctx, current.machineDeployment, current.machineSets, current.machines(), true, true) g.Expect(err).ToNot(HaveOccurred()) - // Apply changes. - delete(p.newMS.Annotations, clusterv1.DisableMachineCreateAnnotation) - for _, oldMS := range current.oldMSs() { - if oldMS.Annotations == nil { - oldMS.Annotations = map[string]string{} - } - oldMS.Annotations[clusterv1.DisableMachineCreateAnnotation] = "true" - } + err = p.planOnDelete(ctx) + g.Expect(err).ToNot(HaveOccurred()) + // Apply changes. for _, ms := range current.machineSets { if scaleIntent, ok := p.scaleIntents[ms.Name]; ok { ms.Spec.Replicas = ptr.To(scaleIntent) diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_planner.go b/internal/controllers/machinedeployment/machinedeployment_rollout_planner.go new file mode 100644 index 000000000000..225ed27af7d1 --- /dev/null +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_planner.go @@ -0,0 +1,308 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" + "sigs.k8s.io/cluster-api/internal/util/hash" + "sigs.k8s.io/cluster-api/util/annotations" +) + +type rolloutPlanner struct { + md *clusterv1.MachineDeployment + revision string + + originalMS map[string]*clusterv1.MachineSet + machines []*clusterv1.Machine + + newMS *clusterv1.MachineSet + createReason string + + oldMSs []*clusterv1.MachineSet + upToDateResults map[string]mdutil.UpToDateResult + + scaleIntents map[string]int32 + computeDesiredMS func(ctx context.Context, deployment *clusterv1.MachineDeployment, currentMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) +} + +func newRolloutPlanner() *rolloutPlanner { + return &rolloutPlanner{ + scaleIntents: make(map[string]int32), + computeDesiredMS: computeDesiredMS, + } +} + +// init rollout planner internal state by taking care of: +// - Identifying newMS and oldMSs +// - Create the newMS if it not exists +// - Compute the initial version of desired state for newMS and oldMSs with mandatory labels, in place propagated fields +// and the annotations derived from the MachineDeployment. +// +// Note: rollout planner might change desired state later on in the planning phase, e.g. add/remove annotations influencing +// how to perform scale up/down operations. +func (p *rolloutPlanner) init(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, machines []*clusterv1.Machine, createNewMSIfNotExist bool, mdTemplateExists bool) error { + if md == nil { + return errors.New("machineDeployment is nil, this is unexpected") + } + + if md.Spec.Replicas == nil { + return errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected", client.ObjectKeyFromObject(p.md)) + } + + for _, ms := range msList { + if ms.Spec.Replicas == nil { + return errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(ms)) + } + } + + // Store md and machines. + p.md = md + p.machines = machines + + // Store original MS, for usage later with SSA patches / SSA caching. + p.originalMS = make(map[string]*clusterv1.MachineSet) + for _, ms := range msList { + p.originalMS[ms.Name] = ms.DeepCopy() + } + + // Try to find a MachineSet which matches the MachineDeployments intent, the newMS; consider all the other MachineSets as oldMs. + // NOTE: Fields propagated in-place from the MD are not considered by the comparison, they are not relevant for the rollout decision. + // NOTE: Expiration of MD rolloutAfter is relevant for the rollout decision, and thus it is considered in FindNewAndOldMachineSets. + currentNewMS, currentOldMSs, upToDateResults, createReason := mdutil.FindNewAndOldMachineSets(md, msList, metav1.Now()) + p.upToDateResults = upToDateResults + + // Compute desired state for the old MS, with mandatory labels, fields in-place propagated from the MachineDeployment etc. + for _, currentOldMS := range currentOldMSs { + desiredOldMS, err := p.computeDesiredOldMS(ctx, currentOldMS) + if err != nil { + return err + } + p.oldMSs = append(p.oldMSs, desiredOldMS) + } + + // If there is a current NewMS, compute the desired state for it with mandatory labels, fields in-place propagated from the MachineDeployment etc. + if currentNewMS != nil { + desiredNewMS, err := p.computeDesiredNewMS(ctx, currentNewMS) + if err != nil { + return err + } + p.newMS = desiredNewMS + return nil + } + + // If there is no current NewMS, create one if required and possible. + if !createNewMSIfNotExist { + return nil + } + + if !mdTemplateExists { + return errors.New("cannot create a new MachineSet when templates do not exist") + } + + // Compute a new MachineSet with mandatory labels, fields in-place propagated from the MachineDeployment etc. + desiredNewMS, err := p.computeDesiredNewMS(ctx, nil) + if err != nil { + return err + } + p.newMS = desiredNewMS + p.createReason = createReason + return nil +} + +// computeDesiredNewMS with mandatory labels, in place propagated fields and the annotations derived from the MachineDeployment. +// Additionally, this procedure ensure the annotations tracking revisions numbers on the newMS is upToDate. +// Note: because we are using Server-Side-Apply we always have to calculate the full object. +func (p *rolloutPlanner) computeDesiredNewMS(ctx context.Context, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + desiredNewMS, err := p.computeDesiredMS(ctx, p.md, currentNewMS) + if err != nil { + return nil, err + } + + // For newMS, make sure the revision annotation has the highest revision number across all MS + update the revision history annotation accordingly. + revisionAnnotations, revision, err := mdutil.ComputeRevisionAnnotations(ctx, currentNewMS, p.oldMSs) + if err != nil { + return nil, err + } + annotations.AddAnnotations(desiredNewMS, revisionAnnotations) + p.revision = revision + + // Always allow creation of machines on newMS. + desiredNewMS.Annotations[clusterv1.DisableMachineCreateAnnotation] = "false" + return desiredNewMS, nil +} + +// computeDesiredOldMS with mandatory labels, in place propagated fields and the annotations derived from the MachineDeployment. +// Additionally, this procedure ensure the annotations tracking revisions numbers are carried over. +// Note: because we are using Server-Side-Apply we always have to calculate the full object. +func (p *rolloutPlanner) computeDesiredOldMS(ctx context.Context, currentOldMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + desiredOldMS, err := p.computeDesiredMS(ctx, p.md, currentOldMS) + if err != nil { + return nil, err + } + + // For oldMS, carry over the revision annotations (those annotations should not be updated for oldMSs). + revisionAnnotations := mdutil.GetRevisionAnnotations(ctx, currentOldMS) + annotations.AddAnnotations(desiredOldMS, revisionAnnotations) + + // Disable creation of machines on oldMS when rollout strategy is on delete. + if desiredOldMS.Annotations == nil { + desiredOldMS.Annotations = map[string]string{} + } + if p.md.Spec.Rollout.Strategy.Type == clusterv1.OnDeleteMachineDeploymentStrategyType { + desiredOldMS.Annotations[clusterv1.DisableMachineCreateAnnotation] = "true" + } else { + desiredOldMS.Annotations[clusterv1.DisableMachineCreateAnnotation] = "false" + } + return desiredOldMS, nil +} + +// computeDesiredMS computes the desired MachineSet, which could be either a newly created newMS, or the new desired version of an existing newMS/OldMS. +// Note: because we are using Server-Side-Apply we always have to calculate the full object. +func computeDesiredMS(ctx context.Context, deployment *clusterv1.MachineDeployment, currentMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + var name string + var uid types.UID + var finalizers []string + var uniqueIdentifierLabelValue string + var machineTemplateSpec clusterv1.MachineSpec + var status clusterv1.MachineSetStatus + var replicas int32 + var creationTimestamp metav1.Time + + if currentMS == nil { + // For a new MachineSet: compute a new uniqueIdentifier, a new MachineSet name, finalizers, replicas and machine template spec (take the one from MachineDeployment) + // Note: Replicas count might be updated by the rollout planner later in the same reconcile or in following reconcile. + + // Note: In previous Cluster API versions (< v1.4.0), the label value was the hash of the full machine + // template. Since the introduction of in-place mutation we are ignoring all in-place mutable fields, + // and using it as a info to be used for building a unique label selector. Instead, the rollout decision + // is not using the hash anymore. + templateHash, err := hash.Compute(mdutil.MachineTemplateDeepCopyRolloutFields(&deployment.Spec.Template)) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute machine template hash") + } + // Append a random string at the end of template hash. This is required to distinguish MachineSets that + // could be created with the same spec as a result of rolloutAfter. + var randomSuffix string + name, randomSuffix = computeNewMachineSetName(deployment.Name + "-") + uniqueIdentifierLabelValue = fmt.Sprintf("%d-%s", templateHash, randomSuffix) + replicas = 0 + machineTemplateSpec = *deployment.Spec.Template.Spec.DeepCopy() + creationTimestamp = metav1.NewTime(time.Now()) + } else { + // For updating an existing MachineSet use name, uid, finalizers, replicas, uniqueIdentifier and machine template spec from existingMS. + // Note: We use the uid, to ensure that the Server-Side-Apply only updates the existingMS. + // Note: Replicas count might be updated by the rollout planner later in the same reconcile or in following reconcile. + var uniqueIdentifierLabelExists bool + uniqueIdentifierLabelValue, uniqueIdentifierLabelExists = currentMS.Labels[clusterv1.MachineDeploymentUniqueLabel] + if !uniqueIdentifierLabelExists { + return nil, errors.Errorf("failed to compute desired MachineSet: failed to get unique identifier from %q annotation", + clusterv1.MachineDeploymentUniqueLabel) + } + + name = currentMS.Name + uid = currentMS.UID + // Preserve all existing finalizers (including foregroundDeletion finalizer). + finalizers = currentMS.Finalizers + replicas = *currentMS.Spec.Replicas + machineTemplateSpec = *currentMS.Spec.Template.Spec.DeepCopy() + status = currentMS.Status + creationTimestamp = currentMS.CreationTimestamp + } + + // Construct the basic MachineSet. + desiredMS := &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: deployment.Namespace, + // NOTE: Carry over creationTimestamp from current MS, because it is required by the sorting functions + // used in the planning phase, e.g. MachineSetsByCreationTimestamp. + // NOTE: For newMS, this value is set to now, but it will be overridden when actual creation happens + // NOTE: CreationTimestamp will be dropped from the SSA intent by the SSA helper. + CreationTimestamp: creationTimestamp, + // Note: By setting the ownerRef on creation we signal to the MachineSet controller that this is not a stand-alone MachineSet. + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(deployment, machineDeploymentKind)}, + UID: uid, + Finalizers: finalizers, + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: &replicas, + ClusterName: deployment.Spec.ClusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: machineTemplateSpec, + }, + }, + // NOTE: Carry over status from current MS, because it is required by mdutil functions + // used in the planning phase, e.g. GetAvailableReplicaCountForMachineSets. + // NOTE: Status will be dropped from the SSA intent by the SSA helper. + Status: status, + } + + // Set the in-place mutable fields. + // When we create a new MachineSet we will just create the MachineSet with those fields. + // When we update an existing MachineSet will we update the fields on the existing MachineSet (in-place mutate). + + // Set labels and .spec.template.labels. + desiredMS.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, + clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + + // Always set the MachineDeploymentNameLabel. + // Note: If a client tries to create a MachineDeployment without a selector, the MachineDeployment webhook + // will add this label automatically. But we want this label to always be present even if the MachineDeployment + // has a selector which doesn't include it. Therefore, we have to set it here explicitly. + desiredMS.Labels[clusterv1.MachineDeploymentNameLabel] = deployment.Name + desiredMS.Spec.Template.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, + clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + + // Set selector. + desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + + // Set annotations and .spec.template.annotations. + // Note: Additional annotations might be added by the rollout planner later in the same reconcile. + // Note: Intentionally, we are not setting the following labels: + // - clusterv1.RevisionAnnotation + the deprecated revisionHistoryAnnotation + // - for newMS, we should always keep those annotations upToDate + // - for oldMS, we should carry over those annotations from previous reconcile + // - clusterv1.DisableMachineCreateAnnotation + // - it should be set to true only on oldMS and if strategy is on delete, otherwise set to false. + desiredMS.Annotations = mdutil.MachineSetAnnotationsFromMachineDeployment(ctx, deployment) + desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) + + // Set all other in-place mutable fields. + desiredMS.Spec.Deletion.Order = deployment.Spec.Deletion.Order + desiredMS.Spec.MachineNaming = deployment.Spec.MachineNaming + desiredMS.Spec.Template.Spec.MinReadySeconds = deployment.Spec.Template.Spec.MinReadySeconds + desiredMS.Spec.Template.Spec.ReadinessGates = deployment.Spec.Template.Spec.ReadinessGates + desiredMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds + desiredMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds + desiredMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds + + return desiredMS, nil +} diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_sequence_test.go b/internal/controllers/machinedeployment/machinedeployment_rollout_planner_test.go similarity index 51% rename from internal/controllers/machinedeployment/machinedeployment_rollout_sequence_test.go rename to internal/controllers/machinedeployment/machinedeployment_rollout_planner_test.go index b08ef97e4a81..fea0cf5a504e 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_sequence_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_planner_test.go @@ -17,6 +17,7 @@ limitations under the License. package machinedeployment import ( + "context" "fmt" "math/rand" "os" @@ -25,14 +26,455 @@ import ( "strings" "testing" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conversion" ) +func TestComputeDesiredNewMS(t *testing.T) { + t.Run("should compute Revision annotations for newMS, no oldMS", func(t *testing.T) { + g := NewWithT(t) + + deployment := &clusterv1.MachineDeployment{} + + p := rolloutPlanner{ + md: deployment, + // Add a dummy computeDesiredMS, that simply return an empty MS object. + // Note: there is dedicate test to validate the actual computeDesiredMS func, it is ok to simplify the unit test here. + computeDesiredMS: func(_ context.Context, _ *clusterv1.MachineDeployment, _ *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + return &clusterv1.MachineSet{}, nil + }, + } + actualNewMS, err := p.computeDesiredNewMS(ctx, nil) + g.Expect(err).ToNot(HaveOccurred()) + + // annotations that we are intentionally not setting in this func are not there + g.Expect(actualNewMS.Annotations).To(Equal(map[string]string{ + clusterv1.RevisionAnnotation: "1", + clusterv1.DisableMachineCreateAnnotation: "false", + })) + }) + t.Run("should update Revision annotations for newMS when required", func(t *testing.T) { + g := NewWithT(t) + + deployment := &clusterv1.MachineDeployment{} + currentMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "1", + }, + }, + } + oldMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + } + + p := rolloutPlanner{ + md: deployment, + // Add a dummy computeDesiredMS, that simply pass through the currentNewMS. + // Note: there is dedicate test to validate the actual computeDesiredMS func, it is ok to simplify the unit test here. + computeDesiredMS: func(_ context.Context, _ *clusterv1.MachineDeployment, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + return currentNewMS, nil + }, + oldMSs: []*clusterv1.MachineSet{oldMS}, + } + actualNewMS, err := p.computeDesiredNewMS(ctx, currentMS) + g.Expect(err).ToNot(HaveOccurred()) + + // annotations that we are intentionally not setting in this func are not there + // Note: there is a dedicated test for ComputeRevisionAnnotations, so it is ok to have a minimal coverage here about revision management. + g.Expect(actualNewMS.Annotations).To(Equal(map[string]string{ + clusterv1.RevisionAnnotation: "3", + "machinedeployment.clusters.x-k8s.io/revision-history": "1", + clusterv1.DisableMachineCreateAnnotation: "false", + })) + }) + t.Run("should preserve Revision annotations for newMS when already up to date", func(t *testing.T) { + g := NewWithT(t) + + deployment := &clusterv1.MachineDeployment{} + currentMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "3", + "machinedeployment.clusters.x-k8s.io/revision-history": "1", + }, + }, + } + oldMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + } + + p := rolloutPlanner{ + md: deployment, + // Add a dummy computeDesiredMS, that simply pass through the currentNewMS. + // Note: there is dedicate test to validate the actual computeDesiredMS func, it is ok to simplify the unit test here. + computeDesiredMS: func(_ context.Context, _ *clusterv1.MachineDeployment, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + return currentNewMS, nil + }, + oldMSs: []*clusterv1.MachineSet{oldMS}, + } + actualNewMS, err := p.computeDesiredNewMS(ctx, currentMS) + g.Expect(err).ToNot(HaveOccurred()) + + // annotations that we are intentionally not setting in this func are not there + // Note: there is a dedicated test for ComputeRevisionAnnotations, so it is ok to have a minimal coverage here about revision management. + g.Expect(actualNewMS.Annotations).To(Equal(map[string]string{ + clusterv1.RevisionAnnotation: "3", + "machinedeployment.clusters.x-k8s.io/revision-history": "1", + clusterv1.DisableMachineCreateAnnotation: "false", + })) + }) +} + +func TestComputeDesiredOldMS(t *testing.T) { + t.Run("should carry over Revision annotations from oldMS", func(t *testing.T) { + g := NewWithT(t) + const revision = "4" + const revisionHistory = "1,3" + + deployment := &clusterv1.MachineDeployment{} + currentMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: revision, + "machinedeployment.clusters.x-k8s.io/revision-history": revisionHistory, + }, + }, + } + + p := rolloutPlanner{ + md: deployment, + // Add a dummy computeDesiredMS, that simply pass through the currentNewMS. + // Note: there is dedicate test to validate the actual computeDesiredMS func, it is ok to simplify the unit test here. + computeDesiredMS: func(_ context.Context, _ *clusterv1.MachineDeployment, currentOldMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + return currentOldMS, nil + }, + } + actualOldMS, err := p.computeDesiredOldMS(ctx, currentMS) + g.Expect(err).ToNot(HaveOccurred()) + + // annotations that we are intentionally not setting in this func are not there + g.Expect(actualOldMS.Annotations).To(Equal(map[string]string{ + clusterv1.RevisionAnnotation: revision, + "machinedeployment.clusters.x-k8s.io/revision-history": revisionHistory, + clusterv1.DisableMachineCreateAnnotation: "false", + })) + }) + t.Run("should disable creation of machines on oldMS when rollout strategy is OnDelete", func(t *testing.T) { + g := NewWithT(t) + const revision = "4" + const revisionHistory = "1,3" + + deployment := &clusterv1.MachineDeployment{ + Spec: clusterv1.MachineDeploymentSpec{ + Rollout: clusterv1.MachineDeploymentRolloutSpec{Strategy: clusterv1.MachineDeploymentRolloutStrategy{Type: clusterv1.OnDeleteMachineDeploymentStrategyType}}, + }, + } + currentMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: revision, + "machinedeployment.clusters.x-k8s.io/revision-history": revisionHistory, + }, + }, + } + + p := rolloutPlanner{ + md: deployment, + // Add a dummy computeDesiredMS, that simply pass through the currentNewMS. + // Note: there is dedicate test to validate the actual computeDesiredMS func, it is ok to simplify the unit test here. + computeDesiredMS: func(_ context.Context, _ *clusterv1.MachineDeployment, currentOldMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + return currentOldMS, nil + }, + } + actualOldMS, err := p.computeDesiredOldMS(ctx, currentMS) + g.Expect(err).ToNot(HaveOccurred()) + + // annotations that we are intentionally not setting in this func are not there + g.Expect(actualOldMS.Annotations).To(Equal(map[string]string{ + clusterv1.RevisionAnnotation: revision, + "machinedeployment.clusters.x-k8s.io/revision-history": revisionHistory, + clusterv1.DisableMachineCreateAnnotation: "true", + })) + }) +} + +func TestComputeDesiredMS(t *testing.T) { + duration10s := ptr.To(int32(10)) + namingTemplateKey := "test" + + infraRef := clusterv1.ContractVersionedObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Name: "infra-template-1", + APIGroup: clusterv1.GroupVersionInfrastructure.Group, + } + bootstrapRef := clusterv1.ContractVersionedObjectReference{ + Kind: "GenericBootstrapConfigTemplate", + Name: "bootstrap-template-1", + APIGroup: clusterv1.GroupVersionBootstrap.Group, + } + + deployment := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "md1", + Annotations: map[string]string{"top-level-annotation": "top-level-annotation-value"}, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: "test-cluster", + Replicas: ptr.To[int32](3), + Rollout: clusterv1.MachineDeploymentRolloutSpec{ + Strategy: clusterv1.MachineDeploymentRolloutStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{ + MaxSurge: intOrStrPtr(1), + MaxUnavailable: intOrStrPtr(0), + }, + }, + }, + Deletion: clusterv1.MachineDeploymentDeletionSpec{ + Order: clusterv1.RandomMachineSetDeletionOrder, + }, + MachineNaming: clusterv1.MachineNamingSpec{ + Template: "{{ .machineSet.name }}" + namingTemplateKey + "-{{ .random }}", + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{"machine-label1": "machine-value1"}, + Annotations: map[string]string{"machine-annotation1": "machine-value1"}, + }, + Spec: clusterv1.MachineSpec{ + Version: "v1.25.3", + InfrastructureRef: infraRef, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + MinReadySeconds: ptr.To[int32](3), + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "foo"}}, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + }, + }, + }, + }, + } + + skeletonMSBasedOnMD := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + CreationTimestamp: metav1.Now(), + Labels: map[string]string{ + // labels that must be propagated to MS. + "machine-label1": "machine-value1", + }, + Annotations: map[string]string{ + // annotations that must be propagated to MS. + "top-level-annotation": "top-level-annotation-value", + }, + }, + Spec: clusterv1.MachineSetSpec{ + // Info that we do expect to be copied from the MD. + ClusterName: deployment.Spec.ClusterName, + Deletion: clusterv1.MachineSetDeletionSpec{ + Order: deployment.Spec.Deletion.Order, + }, + Selector: deployment.Spec.Selector, + Template: *deployment.Spec.Template.DeepCopy(), + MachineNaming: deployment.Spec.MachineNaming, + }, + } + + t.Run("should compute a new MachineSet when current MS is nil", func(t *testing.T) { + expectedMS := skeletonMSBasedOnMD.DeepCopy() + // Replicas should always be set to zero on newMS. + expectedMS.Spec.Replicas = ptr.To[int32](0) + + g := NewWithT(t) + actualMS, err := computeDesiredMS(ctx, deployment, nil) + g.Expect(err).ToNot(HaveOccurred()) + assertDesiredMS(g, deployment, actualMS, expectedMS) + }) + + t.Run("should compute the updated MachineSet when current MS is not nil", func(t *testing.T) { + uid := apirand.String(5) + name := "foo" + finalizers := []string{"pre-existing-finalizer"} + replicas := ptr.To(int32(1)) + uniqueLabelValue := "123" + currentMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + CreationTimestamp: metav1.Now(), + Labels: map[string]string{ + // labels that must be carried over + clusterv1.MachineDeploymentUniqueLabel: uniqueLabelValue, + // unknown labels from current MS should not be considered for desired state (known labels are inferred from MD). + "foo": "bar", + }, + Annotations: map[string]string{ + // unknown annotations from current MS should not be considered for desired state (known annotations are inferred from MD). + "foo": "bar", + }, + // value that must be preserved + UID: types.UID(uid), + Name: name, + Finalizers: finalizers, + }, + Spec: clusterv1.MachineSetSpec{ + // value that must be preserved + Replicas: replicas, + // Info that we do expect to be copied from the MD (set to another value to make sure it is overridden). + Deletion: clusterv1.MachineSetDeletionSpec{ + Order: clusterv1.OldestMachineSetDeletionOrder, + }, + MachineNaming: clusterv1.MachineNamingSpec{ + Template: "foo", + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "v1"}, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "machine-value1"}, + Annotations: map[string]string{"foo": "machine-value1"}, + }, + Spec: clusterv1.MachineSpec{ + Version: "foo", + MinReadySeconds: ptr.To[int32](5), + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "bar"}}, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: nil, + NodeVolumeDetachTimeoutSeconds: nil, + NodeDeletionTimeoutSeconds: nil, + }, + }, + }, + }, + } + + expectedMS := skeletonMSBasedOnMD.DeepCopy() + // Fields that are expected to be carried over from oldMS. + expectedMS.ObjectMeta.Name = name + expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueLabelValue + expectedMS.ObjectMeta.UID = types.UID(uid) + expectedMS.ObjectMeta.Finalizers = finalizers + expectedMS.Spec.Replicas = replicas + expectedMS.Spec.Template = *currentMS.Spec.Template.DeepCopy() + // Fields that must be taken from the MD + expectedMS.Spec.Deletion.Order = deployment.Spec.Deletion.Order + expectedMS.Spec.MachineNaming = deployment.Spec.MachineNaming + expectedMS.Spec.Template.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, clusterv1.MachineDeploymentUniqueLabel, uniqueLabelValue) + expectedMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) + expectedMS.Spec.Template.Spec.MinReadySeconds = deployment.Spec.Template.Spec.MinReadySeconds + expectedMS.Spec.Template.Spec.ReadinessGates = deployment.Spec.Template.Spec.ReadinessGates + expectedMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds + expectedMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds + expectedMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds + + g := NewWithT(t) + actualMS, err := computeDesiredMS(ctx, deployment, currentMS) + g.Expect(err).ToNot(HaveOccurred()) + assertDesiredMS(g, deployment, actualMS, expectedMS) + }) +} + +func assertDesiredMS(g *WithT, md *clusterv1.MachineDeployment, actualMS *clusterv1.MachineSet, expectedMS *clusterv1.MachineSet) { + // check UID + if expectedMS.UID != "" { + g.Expect(actualMS.UID).Should(Equal(expectedMS.UID)) + } + // Check Name + if expectedMS.Name != "" { + g.Expect(actualMS.Name).Should(Equal(expectedMS.Name)) + } + // Check Namespace + g.Expect(actualMS.Namespace).Should(Equal(expectedMS.Namespace)) + + // Check CreationTimestamp + g.Expect(actualMS.CreationTimestamp.IsZero()).Should(BeFalse()) + + // Check Ownership + g.Expect(util.IsControlledBy(actualMS, md, clusterv1.GroupVersion.WithKind("MachineDeployment").GroupKind())).Should(BeTrue()) + + // Check finalizers + g.Expect(actualMS.Finalizers).Should(Equal(expectedMS.Finalizers)) + + // Check Replicas + g.Expect(actualMS.Spec.Replicas).ShouldNot(BeNil()) + g.Expect(actualMS.Spec.Replicas).Should(HaveValue(Equal(*expectedMS.Spec.Replicas))) + + // Check ClusterName + g.Expect(actualMS.Spec.ClusterName).Should(Equal(expectedMS.Spec.ClusterName)) + + // Check Labels + for k, v := range expectedMS.Labels { + g.Expect(actualMS.Labels).Should(HaveKeyWithValue(k, v)) + } + for k, v := range expectedMS.Spec.Template.Labels { + g.Expect(actualMS.Spec.Template.Labels).Should(HaveKeyWithValue(k, v)) + } + // Verify that the labels also has the unique identifier key. + g.Expect(actualMS.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) + g.Expect(actualMS.Spec.Template.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) + + // Check Annotations + // Note: More nuanced validation of the Revision annotation calculations are done when testing `ComputeMachineSetAnnotations`. + for k, v := range expectedMS.Annotations { + g.Expect(actualMS.Annotations).Should(HaveKeyWithValue(k, v)) + } + // annotations that must not be propagated from MD have been removed. + g.Expect(actualMS.Annotations).ShouldNot(HaveKey(corev1.LastAppliedConfigAnnotation)) + g.Expect(actualMS.Annotations).ShouldNot(HaveKey(conversion.DataAnnotation)) + + // annotations that must be derived from MD have been set. + g.Expect(actualMS.Annotations).Should(HaveKeyWithValue(clusterv1.DesiredReplicasAnnotation, fmt.Sprintf("%d", *md.Spec.Replicas))) + g.Expect(actualMS.Annotations).Should(HaveKeyWithValue(clusterv1.MaxReplicasAnnotation, fmt.Sprintf("%d", *(md.Spec.Replicas)+mdutil.MaxSurge(*md)))) + + // annotations that we are intentionally not setting in this func are not there + g.Expect(actualMS.Annotations).ShouldNot(HaveKey(clusterv1.RevisionAnnotation)) + g.Expect(actualMS.Annotations).ShouldNot(HaveKey("machinedeployment.clusters.x-k8s.io/revision-history")) + g.Expect(actualMS.Annotations).ShouldNot(HaveKey(clusterv1.DisableMachineCreateAnnotation)) + + for k, v := range expectedMS.Spec.Template.Annotations { + g.Expect(actualMS.Spec.Template.Annotations).Should(HaveKeyWithValue(k, v)) + } + + // Check MinReadySeconds + g.Expect(actualMS.Spec.Template.Spec.MinReadySeconds).Should(Equal(expectedMS.Spec.Template.Spec.MinReadySeconds)) + + // Check Order + g.Expect(actualMS.Spec.Deletion.Order).Should(Equal(expectedMS.Spec.Deletion.Order)) + + // Check MachineTemplateSpec + g.Expect(actualMS.Spec.Template.Spec).Should(BeComparableTo(expectedMS.Spec.Template.Spec)) + + // Check MachineNamingSpec + g.Expect(actualMS.Spec.MachineNaming.Template).Should(BeComparableTo(expectedMS.Spec.MachineNaming.Template)) +} + // machineControllerMutator fakes a small part of the Machine controller, just what is required for the rollout to progress. func machineControllerMutator(log *fileLogger, m *clusterv1.Machine, scope *rolloutScope) { if m.DeletionTimestamp.IsZero() { @@ -64,7 +506,7 @@ func machineSetControllerMutator(log *fileLogger, ms *clusterv1.MachineSet, scop // if too few machines, create missing machine. // new machines are created with a predictable name, so it is easier to write test case and validate rollout sequences. // e.g. if the cluster is initialized with m1, m2, m3, new machines will be m4, m5, m6 - if _, ok := ms.Annotations[clusterv1.DisableMachineCreateAnnotation]; !ok { + if value, ok := ms.Annotations[clusterv1.DisableMachineCreateAnnotation]; !ok || value != "true" { machinesToAdd := ptr.Deref(ms.Spec.Replicas, 0) - ptr.Deref(ms.Status.Replicas, 0) if machinesToAdd > 0 { machinesAdded := []string{} @@ -153,10 +595,6 @@ func initCurrentRolloutScope(currentMachineNames []string, mdOptions ...machineD current.machineSetMachines[ms.Name] = currentMachines current.machineUID = totMachines - // TODO(in-place): this should be removed as soon as rolloutPlanner will take care of creating newMS - newMS := createMS("ms2", current.machineDeployment.Spec.Template.Spec.FailureDomain, 0) - current.machineSets = append(current.machineSets, newMS) - return current } @@ -270,25 +708,6 @@ func msLog(ms *clusterv1.MachineSet, machines []*clusterv1.Machine) string { return msLog } -func (r rolloutScope) newMS() *clusterv1.MachineSet { - for _, ms := range r.machineSets { - if upToDate, _ := mdutil.MachineTemplateUpToDate(&r.machineDeployment.Spec.Template, &ms.Spec.Template); upToDate { - return ms - } - } - return nil -} - -func (r rolloutScope) oldMSs() []*clusterv1.MachineSet { - var oldMSs []*clusterv1.MachineSet - for _, ms := range r.machineSets { - if upToDate, _ := mdutil.MachineTemplateUpToDate(&r.machineDeployment.Spec.Template, &ms.Spec.Template); !upToDate { - oldMSs = append(oldMSs, ms) - } - } - return oldMSs -} - func (r rolloutScope) machines() []*clusterv1.Machine { machines := []*clusterv1.Machine{} for _, ms := range r.machineSets { diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go index cf2c5666aa37..88cd454b8f1d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go @@ -21,66 +21,33 @@ import ( "fmt" "sort" - "github.com/pkg/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" - "sigs.k8s.io/cluster-api/util/patch" ) // rolloutRollingUpdate reconcile machine sets controlled by a MachineDeployment that is using the RolloutUpdate strategy. func (r *Reconciler) rolloutRollingUpdate(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, templateExists bool) error { - // TODO(in-place): move create newMS into rolloutPlanner - newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, md, msList, true, templateExists) - if err != nil { + planner := newRolloutPlanner() + if err := planner.init(ctx, md, msList, nil, true, templateExists); err != nil { return err } - // newMS can be nil in case there is already a MachineSet associated with this deployment, - // but there are only either changes in annotations or MinReadySeconds. Or in other words, - // this can be nil if there are changes, but no replacement of existing machines is needed. - if newMS == nil { - return nil - } - - allMSs := append(oldMSs, newMS) - - // TODO(in-place): also apply/remove labels to MS should go into rolloutPlanner - // Note: looks like the current implementation is missing proper management of the DisableMachineCreateAnnotation on oldMS: - // - Currently the DisableMachineCreateAnnotation is not removed from oldMS, so the annotation might be there or not depending it the users - // transitioned back and forth from RolloutUpdate strategy and OnDelete strategy. - // - TBD is we want to implement a proper cleanup thus ensuring create machines on oldMS can always happen when using the - // RolloutUpdate strategy, or if we want to always prevent machine creation on oldMS also in this case. - // Note: When rollout is paused the code assumes it can always scale up oldMS (see scale in machinedeployment_sync.go). - // however current implementation is missing removal of the DisableMachineCreateAnnotation that might exist or not - // on oldMS (depending on strategy / proper cleanup etc.). - if err := r.cleanupDisableMachineCreateAnnotation(ctx, newMS); err != nil { + // TODO(in-place): TBD if we want to always prevent machine creation on oldMS. + if err := planner.planRollingUpdate(ctx); err != nil { return err } - planner := newRolloutPlanner() - planner.md = md - planner.newMS = newMS - planner.oldMSs = oldMSs - - if err := planner.planRollingUpdate(ctx); err != nil { + if err := r.createOrUpdateMachineSetsAndSyncMachineDeploymentRevision(ctx, planner); err != nil { return err } - // TODO(in-place): this should be changed as soon as rolloutPlanner support MS creation and adding/removing labels from MS - for _, ms := range allMSs { - scaleIntent := ptr.Deref(ms.Spec.Replicas, 0) - if v, ok := planner.scaleIntents[ms.Name]; ok { - scaleIntent = v - } - if err := r.scaleMachineSet(ctx, ms, scaleIntent, md); err != nil { - return err - } - } + newMS := planner.newMS + oldMSs := planner.oldMSs + allMSs := append(oldMSs, newMS) if err := r.syncDeploymentStatus(allMSs, newMS, md); err != nil { return err @@ -95,35 +62,8 @@ func (r *Reconciler) rolloutRollingUpdate(ctx context.Context, md *clusterv1.Mac return nil } -type rolloutPlanner struct { - md *clusterv1.MachineDeployment - newMS *clusterv1.MachineSet - oldMSs []*clusterv1.MachineSet - scaleIntents map[string]int32 -} - -func newRolloutPlanner() *rolloutPlanner { - return &rolloutPlanner{ - scaleIntents: make(map[string]int32), - } -} - // planRollingUpdate determine how to proceed with the rollout when using the RollingUpdate strategy if the system is not yet at the desired state. func (p *rolloutPlanner) planRollingUpdate(ctx context.Context) error { - if p.md.Spec.Replicas == nil { - return errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected", client.ObjectKeyFromObject(p.md)) - } - - if p.newMS.Spec.Replicas == nil { - return errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(p.newMS)) - } - - for _, oldMS := range p.oldMSs { - if oldMS.Spec.Replicas == nil { - return errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(oldMS)) - } - } - // Scale up, if we can. if err := p.reconcileNewMachineSet(ctx); err != nil { return err @@ -407,25 +347,3 @@ func (p *rolloutPlanner) reconcileDeadlockBreaker(ctx context.Context) { return } } - -// cleanupDisableMachineCreateAnnotation will remove the disable machine create annotation from new MachineSets that were created during reconcileOldMachineSetsOnDelete. -func (r *Reconciler) cleanupDisableMachineCreateAnnotation(ctx context.Context, newMS *clusterv1.MachineSet) error { - log := ctrl.LoggerFrom(ctx, "MachineSet", klog.KObj(newMS)) - - if newMS.Annotations != nil { - if _, ok := newMS.Annotations[clusterv1.DisableMachineCreateAnnotation]; ok { - log.V(4).Info("removing annotation on latest MachineSet to enable machine creation") - patchHelper, err := patch.NewHelper(newMS, r.Client) - if err != nil { - return err - } - delete(newMS.Annotations, clusterv1.DisableMachineCreateAnnotation) - err = patchHelper.Patch(ctx, newMS) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go index 51350ad658fd..af55facdad63 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go @@ -20,15 +20,15 @@ import ( "context" "fmt" "math/rand" + "os" "strings" "testing" "time" "github.com/google/go-cmp/cmp" - "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -788,8 +788,7 @@ type rollingUpdateSequenceTestCase struct { func Test_RollingUpdateSequences(t *testing.T) { ctx := context.Background() - ctx = ctrl.LoggerInto(ctx, klog.Background()) - klog.SetOutput(ginkgo.GinkgoWriter) + ctx = ctrl.LoggerInto(ctx, textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(os.Stdout)))) tests := []rollingUpdateSequenceTestCase{ // Regular rollout (no in-place) @@ -1011,11 +1010,22 @@ func runRollingUpdateTestCase(ctx context.Context, t *testing.T, tt rollingUpdat // Running a small subset of MD reconcile (the rollout logic and a bit of setReplicas) p := newRolloutPlanner() - p.md = current.machineDeployment - p.newMS = current.newMS() - p.oldMSs = current.oldMSs() + p.computeDesiredMS = func(_ context.Context, deployment *clusterv1.MachineDeployment, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + desiredNewMS := currentNewMS + if currentNewMS == nil { + // uses a predictable MS name when creating newMS, also add the newMS to current.machineSets + totMS := len(current.machineSets) + desiredNewMS = createMS(fmt.Sprintf("ms%d", totMS+1), deployment.Spec.Template.Spec.FailureDomain, 0) + current.machineSets = append(current.machineSets, desiredNewMS) + } + return desiredNewMS, nil + } + + // init the rollout planner and plan next step for a rollout. + err := p.init(ctx, current.machineDeployment, current.machineSets, current.machines(), true, true) + g.Expect(err).ToNot(HaveOccurred()) - err := p.planRollingUpdate(ctx) + err = p.planRollingUpdate(ctx) g.Expect(err).ToNot(HaveOccurred()) // Apply changes. diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 3add73cabccd..93660adf5330 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -20,16 +20,11 @@ import ( "context" "fmt" "sort" - "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" apirand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -37,8 +32,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" - "sigs.k8s.io/cluster-api/internal/util/hash" - "sigs.k8s.io/cluster-api/internal/util/ssa" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -46,274 +39,44 @@ import ( // sync is responsible for reconciling deployments on scaling events or when they // are paused. func (r *Reconciler) sync(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, templateExists bool) error { - newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, md, msList, false, templateExists) - if err != nil { + // Use the rollout planner to take benefit of the common logic for: + // - identifying newMS and OldMS when necessary + // - computing desired state for newMS and OldMS, including managing rollout related annotations and + // in-place propagation of labels, annotations and other fields. + planner := newRolloutPlanner() + if err := planner.init(ctx, md, msList, nil, false, templateExists); err != nil { return err } - if err := r.scale(ctx, md, newMS, oldMSs); err != nil { - // If we get an error while trying to scale, the deployment will be requeued - // so we can abort this resync + // Applying above changes to MachineSets, so it will be possible to use legacy code for scale. + if err := r.createOrUpdateMachineSetsAndSyncMachineDeploymentRevision(ctx, planner); err != nil { return err } - // - // // TODO: Clean up the deployment when it's paused and no rollback is in flight. - // + // Call the legacy scale logic. + // Note: the legacy scale logic do not relies yet on the rollout planner, and it still lead to many + // patch calls vs grouping all the MachineSet changes in a single SSA call based on a carefully crafted desired state. + // Note: using the legacy scale logic on newMS and oldMSs computed by the rollout planner is not an issue because + // the legacy scale logic relies on info that are part of the desired state computed by rollout planner (or of + // info carried over from original MS). More specifically: + // - ms.metadata.CreationTimestamp, carried over + // - ms.metadata.Annotations, computed (only DesiredReplicasAnnotation, MaxReplicasAnnotation are relevant) + // - ms.spec.Replicas, computed + // - ms.status.Replicas, carried over + // - ms.status.AvailableReplicas, carried over + newMS := planner.newMS + oldMSs := planner.oldMSs allMSs := append(oldMSs, newMS) - return r.syncDeploymentStatus(allMSs, newMS, md) -} - -// getAllMachineSetsAndSyncRevision returns all the machine sets for the provided deployment (new and all old), with new MS's and deployment's revision updated. -// -// msList should come from getMachineSetsForDeployment(d). -// machineMap should come from getMachineMapForDeployment(d, msList). -// -// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV). -// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1), -// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. -// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. -// -// Note that currently the deployment controller is using caches to avoid querying the server for reads. -// This may lead to stale reads of machine sets, thus incorrect deployment status. -func (r *Reconciler) getAllMachineSetsAndSyncRevision(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, createIfNotExisted, templateExists bool) (*clusterv1.MachineSet, []*clusterv1.MachineSet, error) { - reconciliationTime := metav1.Now() - newMS, oldMSs, _, createReason := mdutil.FindNewAndOldMachineSets(md, msList, &reconciliationTime) - - // Get new machine set with the updated revision number - newMS, err := r.getNewMachineSet(ctx, md, newMS, oldMSs, createIfNotExisted, templateExists, createReason) - if err != nil { - return nil, nil, err - } - return newMS, oldMSs, nil -} - -// Returns a MachineSet that matches the intent of the given MachineDeployment. -// If there does not exist such a MachineSet and createIfNotExisted is true, create a new MachineSet. -// If there is already such a MachineSet, update it to propagate in-place mutable fields from the MachineDeployment. -func (r *Reconciler) getNewMachineSet(ctx context.Context, md *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, createIfNotExists, templateExists bool, createReason string) (*clusterv1.MachineSet, error) { - // If there is a MachineSet that matches the intent of the MachineDeployment, update the MachineSet - // to propagate all in-place mutable fields from MachineDeployment to the MachineSet. - if newMS != nil { - updatedMS, err := r.updateMachineSet(ctx, md, newMS, oldMSs) - if err != nil { - return nil, err - } - - // Ensure MachineDeployment has the latest MachineSet revision in its revision annotation. - mdutil.SetDeploymentRevision(md, updatedMS.Annotations[clusterv1.RevisionAnnotation]) - return updatedMS, nil - } - - if !createIfNotExists { - return nil, nil - } - - if !templateExists { - return nil, errors.New("cannot create a new MachineSet when templates do not exist") - } - - // Create a new MachineSet and wait until the new MachineSet exists in the cache. - newMS, err := r.createMachineSetAndWait(ctx, md, oldMSs, createReason) - if err != nil { - return nil, err - } - - mdutil.SetDeploymentRevision(md, newMS.Annotations[clusterv1.RevisionAnnotation]) - - return newMS, nil -} - -// updateMachineSet updates an existing MachineSet to propagate in-place mutable fields from the MachineDeployment. -func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { - log := ctrl.LoggerFrom(ctx) - - // Compute the desired MachineSet. - updatedMS, err := r.computeDesiredMachineSet(ctx, deployment, ms, oldMSs) - if err != nil { - return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms)) - } - - // Update the MachineSet to propagate in-place mutable fields from the MachineDeployment. - err = ssa.Patch(ctx, r.Client, machineDeploymentManagerName, updatedMS, ssa.WithCachingProxy{Cache: r.ssaCache, Original: ms}) - if err != nil { - r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedUpdate", "Failed to update MachineSet %s: %v", klog.KObj(updatedMS), err) - return nil, errors.Wrapf(err, "failed to update MachineSet %s", klog.KObj(updatedMS)) - } - - log.V(4).Info("Updated MachineSet", "MachineSet", klog.KObj(updatedMS)) - return updatedMS, nil -} - -// createMachineSetAndWait creates a new MachineSet with the desired intent of the MachineDeployment. -// It waits for the cache to be updated with the newly created MachineSet. -func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, createReason string) (*clusterv1.MachineSet, error) { - log := ctrl.LoggerFrom(ctx) - - // Compute the desired MachineSet. - newMS, err := r.computeDesiredMachineSet(ctx, deployment, nil, oldMSs) - if err != nil { - return nil, errors.Wrap(err, "failed to create new MachineSet") - } - - log = log.WithValues("MachineSet", klog.KObj(newMS)) - ctx = ctrl.LoggerInto(ctx, log) - - // Create the MachineSet. - if err := ssa.Patch(ctx, r.Client, machineDeploymentManagerName, newMS); err != nil { - r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedCreate", "Failed to create MachineSet %s: %v", klog.KObj(newMS), err) - return nil, errors.Wrapf(err, "failed to create new MachineSet %s", klog.KObj(newMS)) - } - log.Info(fmt.Sprintf("MachineSet created (%s)", createReason)) - r.recorder.Eventf(deployment, corev1.EventTypeNormal, "SuccessfulCreate", "Created MachineSet %s", klog.KObj(newMS)) - - // Keep trying to get the MachineSet. This will force the cache to update and prevent any future reconciliation of - // the MachineDeployment to reconcile with an outdated list of MachineSets which could lead to unwanted creation of - // a duplicate MachineSet. - var pollErrors []error - if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Second, true, func(ctx context.Context) (bool, error) { - ms := &clusterv1.MachineSet{} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(newMS), ms); err != nil { - // Do not return error here. Continue to poll even if we hit an error - // so that we avoid existing because of transient errors like network flakes. - // Capture all the errors and return the aggregate error if the poll fails eventually. - pollErrors = append(pollErrors, err) - return false, nil - } - return true, nil - }); err != nil { - return nil, errors.Wrapf(kerrors.NewAggregate(pollErrors), "failed to get the MachineSet %s after creation", klog.KObj(newMS)) - } - return newMS, nil -} - -// computeDesiredMachineSet computes the desired MachineSet. -// This MachineSet will be used during reconciliation to: -// * create a MachineSet -// * update an existing MachineSet -// Because we are using Server-Side-Apply we always have to calculate the full object. -// There are small differences in how we calculate the MachineSet depending on if it -// is a create or update. Example: for a new MachineSet we have to calculate a new name, -// while for an existing MachineSet we have to use the name of the existing MachineSet. -func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { - var name string - var uid types.UID - var finalizers []string - var uniqueIdentifierLabelValue string - var machineTemplateSpec clusterv1.MachineSpec - var replicas int32 - var err error - - // For a new MachineSet: - // * compute a new uniqueIdentifier, a new MachineSet name, finalizers, replicas and - // machine template spec (take the one from MachineDeployment) - if existingMS == nil { - // Note: In previous Cluster API versions (< v1.4.0), the label value was the hash of the full machine - // template. With the introduction of in-place mutation the machine template of the MachineSet can change. - // Because of that it is impossible that the label's value to always be the hash of the full machine template. - // (Because the hash changes when the machine template changes). - // As a result, we use the hash of the machine template while ignoring all in-place mutable fields, i.e. the - // machine template with only fields that could trigger a rollout for the machine-template-hash, making it - // independent of the changes to any in-place mutable fields. - templateHash, err := hash.Compute(mdutil.MachineTemplateDeepCopyRolloutFields(&deployment.Spec.Template)) - if err != nil { - return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute machine template hash") - } - // Append a random string at the end of template hash. This is required to distinguish MachineSets that - // could be created with the same spec as a result of rolloutAfter. If not, computeDesiredMachineSet - // will end up updating the existing MachineSet instead of creating a new one. - var randomSuffix string - name, randomSuffix = computeNewMachineSetName(deployment.Name + "-") - uniqueIdentifierLabelValue = fmt.Sprintf("%d-%s", templateHash, randomSuffix) - - replicas, err = mdutil.NewMSNewReplicas(deployment, oldMSs, 0) - if err != nil { - return nil, errors.Wrap(err, "failed to compute desired MachineSet") - } - - machineTemplateSpec = *deployment.Spec.Template.Spec.DeepCopy() - } else { - // For updating an existing MachineSet: - // * get the uniqueIdentifier from labels of the existingMS - // * use name, uid, finalizers, replicas and machine template spec from existingMS. - // Note: We use the uid, to ensure that the Server-Side-Apply only updates existingMS. - // Note: We carry over those fields because we don't want to mutate them for an existingMS. - var uniqueIdentifierLabelExists bool - uniqueIdentifierLabelValue, uniqueIdentifierLabelExists = existingMS.Labels[clusterv1.MachineDeploymentUniqueLabel] - if !uniqueIdentifierLabelExists { - return nil, errors.Errorf("failed to compute desired MachineSet: failed to get unique identifier from %q annotation", - clusterv1.MachineDeploymentUniqueLabel) - } - - name = existingMS.Name - uid = existingMS.UID - - // Preserve all existing finalizers (including foregroundDeletion finalizer). - finalizers = existingMS.Finalizers - - replicas = *existingMS.Spec.Replicas - - machineTemplateSpec = *existingMS.Spec.Template.Spec.DeepCopy() - } - - // Construct the basic MachineSet. - desiredMS := &clusterv1.MachineSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachineSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: deployment.Namespace, - // Note: By setting the ownerRef on creation we signal to the MachineSet controller that this is not a stand-alone MachineSet. - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(deployment, machineDeploymentKind)}, - UID: uid, - Finalizers: finalizers, - }, - Spec: clusterv1.MachineSetSpec{ - Replicas: &replicas, - ClusterName: deployment.Spec.ClusterName, - Template: clusterv1.MachineTemplateSpec{ - Spec: machineTemplateSpec, - }, - }, + // TODO(in-place): consider if to move the scale logic to the rollout planner as well, so we can improve test coverage + // like we did for RolloutUpdate and OnDelete strategy. + if err := r.scale(ctx, md, newMS, oldMSs); err != nil { + // If we get an error while trying to scale, the deployment will be requeued + // so we can abort this resync + return err } - // Set the in-place mutable fields. - // When we create a new MachineSet we will just create the MachineSet with those fields. - // When we update an existing MachineSet will we update the fields on the existing MachineSet (in-place mutate). - - // Set labels and .spec.template.labels. - desiredMS.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, - clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) - // Always set the MachineDeploymentNameLabel. - // Note: If a client tries to create a MachineDeployment without a selector, the MachineDeployment webhook - // will add this label automatically. But we want this label to always be present even if the MachineDeployment - // has a selector which doesn't include it. Therefore, we have to set it here explicitly. - desiredMS.Labels[clusterv1.MachineDeploymentNameLabel] = deployment.Name - desiredMS.Spec.Template.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, - clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) - - // Set selector. - desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) - - // Set annotations and .spec.template.annotations. - if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(ctx, deployment, oldMSs, existingMS); err != nil { - return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations") - } - desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) - - // Set all other in-place mutable fields. - desiredMS.Spec.Template.Spec.MinReadySeconds = deployment.Spec.Template.Spec.MinReadySeconds - desiredMS.Spec.Deletion.Order = deployment.Spec.Deletion.Order - desiredMS.Spec.Template.Spec.ReadinessGates = deployment.Spec.Template.Spec.ReadinessGates - desiredMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds - desiredMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds - desiredMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = deployment.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds - desiredMS.Spec.MachineNaming = deployment.Spec.MachineNaming - - return desiredMS, nil + return r.syncDeploymentStatus(allMSs, newMS, md) } // cloneStringMap clones a string map. @@ -523,14 +286,8 @@ func (r *Reconciler) scaleMachineSet(ctx context.Context, ms *clusterv1.MachineS return errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected", client.ObjectKeyFromObject(deployment)) } - annotationsNeedUpdate := mdutil.ReplicasAnnotationsNeedUpdate( - ms, - *(deployment.Spec.Replicas), - *(deployment.Spec.Replicas)+mdutil.MaxSurge(*deployment), - ) - - // No need to scale nor setting annotations, return. - if *(ms.Spec.Replicas) == newScale && !annotationsNeedUpdate { + // No need to scale, return. + if *(ms.Spec.Replicas) == newScale { return nil } @@ -543,9 +300,8 @@ func (r *Reconciler) scaleMachineSet(ctx context.Context, ms *clusterv1.MachineS // Save original replicas to log in event. originalReplicas := *(ms.Spec.Replicas) - // Mutate replicas and the related annotation. + // Mutate replicas. ms.Spec.Replicas = &newScale - mdutil.SetReplicasAnnotations(ms, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+mdutil.MaxSurge(*deployment)) if err := patchHelper.Patch(ctx, ms); err != nil { r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedScale", "Failed to scale MachineSet %v: %v", diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index 221eb5790e12..6ed907dcb37e 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -26,15 +26,12 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) @@ -377,12 +374,6 @@ func TestScaleMachineSet(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(*freshMachineSet.Spec.Replicas).To(BeEquivalentTo(tc.newScale)) - - expectedMachineSetAnnotations := map[string]string{ - clusterv1.DesiredReplicasAnnotation: fmt.Sprintf("%d", *tc.machineDeployment.Spec.Replicas), - clusterv1.MaxReplicasAnnotation: fmt.Sprintf("%d", (*tc.machineDeployment.Spec.Replicas)+mdutil.MaxSurge(*tc.machineDeployment)), - } - g.Expect(freshMachineSet.GetAnnotations()).To(BeEquivalentTo(expectedMachineSetAnnotations)) }) } } @@ -525,305 +516,6 @@ func TestSyncDeploymentStatus(t *testing.T) { } } -func TestComputeDesiredMachineSet(t *testing.T) { - duration5s := ptr.To(int32(5)) - duration10s := ptr.To(int32(10)) - namingTemplateKey := "test" - - infraRef := clusterv1.ContractVersionedObjectReference{ - Kind: "GenericInfrastructureMachineTemplate", - Name: "infra-template-1", - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - } - bootstrapRef := clusterv1.ContractVersionedObjectReference{ - Kind: "GenericBootstrapConfigTemplate", - Name: "bootstrap-template-1", - APIGroup: clusterv1.GroupVersionBootstrap.Group, - } - - deployment := &clusterv1.MachineDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "md1", - Annotations: map[string]string{"top-level-annotation": "top-level-annotation-value"}, - }, - Spec: clusterv1.MachineDeploymentSpec{ - ClusterName: "test-cluster", - Replicas: ptr.To[int32](3), - Rollout: clusterv1.MachineDeploymentRolloutSpec{ - Strategy: clusterv1.MachineDeploymentRolloutStrategy{ - Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, - RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{ - MaxSurge: intOrStrPtr(1), - MaxUnavailable: intOrStrPtr(0), - }, - }, - }, - Deletion: clusterv1.MachineDeploymentDeletionSpec{ - Order: clusterv1.RandomMachineSetDeletionOrder, - }, - MachineNaming: clusterv1.MachineNamingSpec{ - Template: "{{ .machineSet.name }}" + namingTemplateKey + "-{{ .random }}", - }, - Selector: metav1.LabelSelector{ - MatchLabels: map[string]string{"k1": "v1"}, - }, - Template: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: map[string]string{"machine-label1": "machine-value1"}, - Annotations: map[string]string{"machine-annotation1": "machine-value1"}, - }, - Spec: clusterv1.MachineSpec{ - Version: "v1.25.3", - InfrastructureRef: infraRef, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - MinReadySeconds: ptr.To[int32](3), - ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "foo"}}, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: duration10s, - NodeVolumeDetachTimeoutSeconds: duration10s, - NodeDeletionTimeoutSeconds: duration10s, - }, - }, - }, - }, - } - - skeletonMSBasedOnMD := &clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Labels: map[string]string{"machine-label1": "machine-value1"}, - Annotations: map[string]string{"top-level-annotation": "top-level-annotation-value"}, - }, - Spec: clusterv1.MachineSetSpec{ - ClusterName: "test-cluster", - Replicas: ptr.To[int32](3), - Deletion: clusterv1.MachineSetDeletionSpec{ - Order: clusterv1.RandomMachineSetDeletionOrder, - }, - Selector: metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - Template: *deployment.Spec.Template.DeepCopy(), - MachineNaming: clusterv1.MachineNamingSpec{ - Template: "{{ .machineSet.name }}" + namingTemplateKey + "-{{ .random }}", - }, - }, - } - - t.Run("should compute a new MachineSet when no old MachineSets exist", func(t *testing.T) { - expectedMS := skeletonMSBasedOnMD.DeepCopy() - - g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, nil) - g.Expect(err).ToNot(HaveOccurred()) - assertMachineSet(g, actualMS, expectedMS) - }) - - t.Run("should compute a new MachineSet when old MachineSets exist", func(t *testing.T) { - oldMS := skeletonMSBasedOnMD.DeepCopy() - oldMS.Spec.Replicas = ptr.To[int32](2) - - expectedMS := skeletonMSBasedOnMD.DeepCopy() - expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 - - g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, []*clusterv1.MachineSet{oldMS}) - g.Expect(err).ToNot(HaveOccurred()) - assertMachineSet(g, actualMS, expectedMS) - }) - - t.Run("should compute the updated MachineSet when no old MachineSets exists", func(t *testing.T) { - uniqueID := apirand.String(5) - existingMS := skeletonMSBasedOnMD.DeepCopy() - // computeDesiredMachineSet should retain the UID, name and the "machine-template-hash" label value - // of the existing machine. - // Other fields like labels, annotations, node timeout, etc are expected to change. - existingMSUID := types.UID("abc-123-uid") - existingMS.UID = existingMSUID - existingMS.Name = deployment.Name + "-" + uniqueID - existingMS.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-1": "ms-value-1", - } - existingMS.Annotations = nil - // Pre-existing finalizer should be preserved. - existingMS.Finalizers = []string{"pre-existing-finalizer"} - existingMS.Spec.Template.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-2": "ms-value-2", - } - existingMS.Spec.Template.Annotations = nil - existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration5s - existingMS.Spec.Deletion.Order = clusterv1.NewestMachineSetDeletionOrder - existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) - - expectedMS := skeletonMSBasedOnMD.DeepCopy() - expectedMS.UID = existingMSUID - expectedMS.Name = deployment.Name + "-" + uniqueID - expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - // Pre-existing finalizer should be preserved. - expectedMS.Finalizers = []string{"pre-existing-finalizer"} - - expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - - g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) - g.Expect(err).ToNot(HaveOccurred()) - assertMachineSet(g, actualMS, expectedMS) - }) - - t.Run("should compute the updated MachineSet when old MachineSets exist", func(t *testing.T) { - uniqueID := apirand.String(5) - existingMS := skeletonMSBasedOnMD.DeepCopy() - existingMSUID := types.UID("abc-123-uid") - existingMS.UID = existingMSUID - existingMS.Name = deployment.Name + "-" + uniqueID - existingMS.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-1": "ms-value-1", - } - existingMS.Annotations = nil - // Pre-existing finalizer should be preserved. - existingMS.Finalizers = []string{"pre-existing-finalizer"} - existingMS.Spec.Template.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-2": "ms-value-2", - } - existingMS.Spec.Template.Annotations = nil - existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration5s - existingMS.Spec.Deletion.Order = clusterv1.NewestMachineSetDeletionOrder - existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) - - oldMS := skeletonMSBasedOnMD.DeepCopy() - oldMS.Spec.Replicas = ptr.To[int32](2) - - // Note: computeDesiredMachineSet does not modify the replicas on the updated MachineSet. - // Therefore, even though we have the old machineset with replicas 2 the updatedMS does not - // get modified replicas (2 = 4(maxsuge+spec.replica) - 2(oldMS replicas)). - // Nb. The final replicas of the MachineSet are calculated elsewhere. - expectedMS := skeletonMSBasedOnMD.DeepCopy() - expectedMS.UID = existingMSUID - expectedMS.Name = deployment.Name + "-" + uniqueID - expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - // Pre-existing finalizer should be preserved. - expectedMS.Finalizers = []string{"pre-existing-finalizer"} - expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - - g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, []*clusterv1.MachineSet{oldMS}) - g.Expect(err).ToNot(HaveOccurred()) - assertMachineSet(g, actualMS, expectedMS) - }) - - t.Run("should compute the updated MachineSet when no old MachineSets exists (", func(t *testing.T) { - // Set rollout strategy to "OnDelete". - deployment := deployment.DeepCopy() - deployment.Spec.Rollout.Strategy = clusterv1.MachineDeploymentRolloutStrategy{ - Type: clusterv1.OnDeleteMachineDeploymentStrategyType, - RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{}, - } - - uniqueID := apirand.String(5) - existingMS := skeletonMSBasedOnMD.DeepCopy() - // computeDesiredMachineSet should retain the UID, name and the "machine-template-hash" label value - // of the existing machine. - // Other fields like labels, annotations, node timeout, etc are expected to change. - existingMSUID := types.UID("abc-123-uid") - existingMS.UID = existingMSUID - existingMS.Name = deployment.Name + "-" + uniqueID - existingMS.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-1": "ms-value-1", - } - existingMS.Annotations = nil - existingMS.Spec.Template.Labels = map[string]string{ - clusterv1.MachineDeploymentUniqueLabel: uniqueID, - "ms-label-2": "ms-value-2", - } - existingMS.Spec.Template.Annotations = nil - existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = duration5s - existingMS.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration5s - existingMS.Spec.Deletion.Order = clusterv1.NewestMachineSetDeletionOrder - existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) - - expectedMS := skeletonMSBasedOnMD.DeepCopy() - expectedMS.UID = existingMSUID - expectedMS.Name = deployment.Name + "-" + uniqueID - expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID - expectedMS.Spec.Deletion.Order = deployment.Spec.Deletion.Order - - g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) - g.Expect(err).ToNot(HaveOccurred()) - assertMachineSet(g, actualMS, expectedMS) - }) -} - -func assertMachineSet(g *WithT, actualMS *clusterv1.MachineSet, expectedMS *clusterv1.MachineSet) { - // check UID - if expectedMS.UID != "" { - g.Expect(actualMS.UID).Should(Equal(expectedMS.UID)) - } - // Check Name - if expectedMS.Name != "" { - g.Expect(actualMS.Name).Should(Equal(expectedMS.Name)) - } - // Check Namespace - g.Expect(actualMS.Namespace).Should(Equal(expectedMS.Namespace)) - - // Check finalizers - g.Expect(actualMS.Finalizers).Should(Equal(expectedMS.Finalizers)) - - // Check Replicas - g.Expect(actualMS.Spec.Replicas).ShouldNot(BeNil()) - g.Expect(actualMS.Spec.Replicas).Should(HaveValue(Equal(*expectedMS.Spec.Replicas))) - - // Check ClusterName - g.Expect(actualMS.Spec.ClusterName).Should(Equal(expectedMS.Spec.ClusterName)) - - // Check Labels - for k, v := range expectedMS.Labels { - g.Expect(actualMS.Labels).Should(HaveKeyWithValue(k, v)) - } - for k, v := range expectedMS.Spec.Template.Labels { - g.Expect(actualMS.Spec.Template.Labels).Should(HaveKeyWithValue(k, v)) - } - // Verify that the labels also has the unique identifier key. - g.Expect(actualMS.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) - g.Expect(actualMS.Spec.Template.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) - - // Check Annotations - // Note: More nuanced validation of the Revision annotation calculations are done when testing `ComputeMachineSetAnnotations`. - for k, v := range expectedMS.Annotations { - g.Expect(actualMS.Annotations).Should(HaveKeyWithValue(k, v)) - } - for k, v := range expectedMS.Spec.Template.Annotations { - g.Expect(actualMS.Spec.Template.Annotations).Should(HaveKeyWithValue(k, v)) - } - - // Check MinReadySeconds - g.Expect(actualMS.Spec.Template.Spec.MinReadySeconds).Should(Equal(expectedMS.Spec.Template.Spec.MinReadySeconds)) - - // Check Order - g.Expect(actualMS.Spec.Deletion.Order).Should(Equal(expectedMS.Spec.Deletion.Order)) - - // Check MachineTemplateSpec - g.Expect(actualMS.Spec.Template.Spec).Should(BeComparableTo(expectedMS.Spec.Template.Spec)) - - // Check MachineNamingSpec - g.Expect(actualMS.Spec.MachineNaming.Template).Should(BeComparableTo(expectedMS.Spec.MachineNaming.Template)) -} - // asserts the conditions set on the Getter object. // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). func assertConditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 8c74b5f71e54..f5839b2313fb 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -102,21 +102,6 @@ func (o MachineSetsBySizeNewer) Less(i, j int) bool { return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) } -// SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision string) bool { - updated := false - - if deployment.Annotations == nil { - deployment.Annotations = make(map[string]string) - } - if deployment.Annotations[clusterv1.RevisionAnnotation] != revision { - deployment.Annotations[clusterv1.RevisionAnnotation] = revision - updated = true - } - - return updated -} - // MaxRevision finds the highest revision in the machine sets. func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 { log := ctrl.LoggerFrom(ctx) @@ -193,9 +178,9 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // Deprecated: This annotation is deprecated and is going to be removed in the next release. const revisionHistoryAnnotation = "machinedeployment.clusters.x-k8s.io/revision-history" -// ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet. -// Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet. -func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { +// MachineSetAnnotationsFromMachineDeployment return the annotations that should be set on all the MachineSets and +// that are derived from the controlling MachineDeployment. +func MachineSetAnnotationsFromMachineDeployment(_ context.Context, deployment *clusterv1.MachineDeployment) map[string]string { // Copy annotations from Deployment annotations while filtering out some annotations // that we don't want to propagate. annotations := map[string]string{} @@ -206,6 +191,17 @@ func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.Mac annotations[k] = v } + annotations[clusterv1.DesiredReplicasAnnotation] = fmt.Sprintf("%d", *deployment.Spec.Replicas) + annotations[clusterv1.MaxReplicasAnnotation] = fmt.Sprintf("%d", *(deployment.Spec.Replicas)+MaxSurge(*deployment)) + return annotations +} + +// ComputeRevisionAnnotations returns revision annotations to be set on a newMS. +func ComputeRevisionAnnotations(ctx context.Context, newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (map[string]string, string, error) { + // Copy annotations from Deployment annotations while filtering out some annotations + // that we don't want to propagate. + annotations := map[string]string{} + // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number // of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and // newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet. @@ -217,7 +213,7 @@ func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.Mac if currentRevisionExists { currentRevisionInt, err := strconv.ParseInt(currentRevision, 10, 64) if err != nil { - return nil, errors.Wrapf(err, "failed to parse current revision on MachineSet %s", klog.KObj(newMS)) + return nil, newRevision, errors.Wrapf(err, "failed to parse current revision on MachineSet %s", klog.KObj(newMS)) } if newRevisionInt < currentRevisionInt { newRevision = currentRevision @@ -243,9 +239,19 @@ func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.Mac } annotations[clusterv1.RevisionAnnotation] = newRevision - annotations[clusterv1.DesiredReplicasAnnotation] = fmt.Sprintf("%d", *deployment.Spec.Replicas) - annotations[clusterv1.MaxReplicasAnnotation] = fmt.Sprintf("%d", *(deployment.Spec.Replicas)+MaxSurge(*deployment)) - return annotations, nil + return annotations, newRevision, nil +} + +// GetRevisionAnnotations returns revision annotations to be preserved on oldMSs. +func GetRevisionAnnotations(_ context.Context, oldMS *clusterv1.MachineSet) map[string]string { + annotations := map[string]string{} + if v, ok := oldMS.Annotations[clusterv1.RevisionAnnotation]; ok { + annotations[clusterv1.RevisionAnnotation] = v + } + if v, ok := oldMS.Annotations[revisionHistoryAnnotation]; ok { + annotations[revisionHistoryAnnotation] = v + } + return annotations } // FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active @@ -273,39 +279,6 @@ func FindOneActiveOrLatest(newMS *clusterv1.MachineSet, oldMSs []*clusterv1.Mach } } -// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations. -func SetReplicasAnnotations(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { - updated := false - if ms.Annotations == nil { - ms.Annotations = make(map[string]string) - } - desiredString := fmt.Sprintf("%d", desiredReplicas) - if hasString := ms.Annotations[clusterv1.DesiredReplicasAnnotation]; hasString != desiredString { - ms.Annotations[clusterv1.DesiredReplicasAnnotation] = desiredString - updated = true - } - if hasString := ms.Annotations[clusterv1.MaxReplicasAnnotation]; hasString != fmt.Sprintf("%d", maxReplicas) { - ms.Annotations[clusterv1.MaxReplicasAnnotation] = fmt.Sprintf("%d", maxReplicas) - updated = true - } - return updated -} - -// ReplicasAnnotationsNeedUpdate return true if the replicas annotation needs to be updated. -func ReplicasAnnotationsNeedUpdate(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { - if ms.Annotations == nil { - return true - } - desiredString := fmt.Sprintf("%d", desiredReplicas) - if hasString := ms.Annotations[clusterv1.DesiredReplicasAnnotation]; hasString != desiredString { - return true - } - if hasString := ms.Annotations[clusterv1.MaxReplicasAnnotation]; hasString != fmt.Sprintf("%d", maxReplicas) { - return true - } - return false -} - // MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. func MaxUnavailable(deployment clusterv1.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { @@ -376,17 +349,17 @@ func getMachineSetFraction(ms clusterv1.MachineSet, md clusterv1.MachineDeployme return integer.RoundToInt32(newMSsize) - *(ms.Spec.Replicas) } -// NotUpToDateResult is the result of calling the MachineTemplateUpToDate func for a MachineTemplateSpec. -type NotUpToDateResult struct { - LogMessages []string // consider if to make this private. +// UpToDateResult is the result of calling the MachineTemplateUpToDate func for a MachineTemplateSpec. +type UpToDateResult struct { + LogMessages []string ConditionMessages []string EligibleForInPlaceUpdate bool } // MachineTemplateUpToDate returns true if the current MachineTemplateSpec is up-to-date with a corresponding desired MachineTemplateSpec. // Note: The comparison does not consider any in-place propagated fields, as well as the version from external references. -func MachineTemplateUpToDate(current, desired *clusterv1.MachineTemplateSpec) (bool, *NotUpToDateResult) { - res := &NotUpToDateResult{ +func MachineTemplateUpToDate(current, desired *clusterv1.MachineTemplateSpec) (bool, UpToDateResult) { + res := UpToDateResult{ EligibleForInPlaceUpdate: true, } @@ -431,7 +404,9 @@ func MachineTemplateUpToDate(current, desired *clusterv1.MachineTemplateSpec) (b return false, res } - return true, nil + // Machine is up to date, no need for in-place update. + res.EligibleForInPlaceUpdate = false + return true, res } // MachineTemplateDeepCopyRolloutFields copies a MachineTemplateSpec @@ -464,7 +439,7 @@ func MachineTemplateDeepCopyRolloutFields(template *clusterv1.MachineTemplateSpe // NOTE: If we find a matching MachineSet which only differs in in-place mutable fields we can use it to // fulfill the intent of the MachineDeployment by just updating the MachineSet to propagate in-place mutable fields. // Thus we don't have to create a new MachineSet and we can avoid an unnecessary rollout. -func FindNewAndOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, reconciliationTime *metav1.Time) (newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, oldMSNotUpToDateResults map[string]NotUpToDateResult, createReason string) { +func FindNewAndOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, reconciliationTime metav1.Time) (newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, upToDateResults map[string]UpToDateResult, createReason string) { if len(msList) == 0 { return nil, nil, nil, "no MachineSets exist for the MachineDeployment" } @@ -479,51 +454,52 @@ func FindNewAndOldMachineSets(deployment *clusterv1.MachineDeployment, msList [] sort.Sort(MachineSetsByDecreasingReplicas(msList)) oldMSs = make([]*clusterv1.MachineSet, 0) - oldMSNotUpToDateResults = make(map[string]NotUpToDateResult) + upToDateResults = make(map[string]UpToDateResult) var diffs []string for _, ms := range msList { - upToDate, notUpToDateResult := MachineTemplateUpToDate(&ms.Spec.Template, &deployment.Spec.Template) + upToDate, upToDateResult := MachineTemplateUpToDate(&ms.Spec.Template, &deployment.Spec.Template) + upToDateResults[ms.Name] = upToDateResult if upToDate { newMSCandidates = append(newMSCandidates, ms) } else { oldMSs = append(oldMSs, ms) // Override the EligibleForInPlaceUpdate decision if rollout after is expired. - if !deployment.Spec.Rollout.After.IsZero() && deployment.Spec.Rollout.After.Before(reconciliationTime) && !ms.CreationTimestamp.After(deployment.Spec.Rollout.After.Time) { - notUpToDateResult.EligibleForInPlaceUpdate = false - notUpToDateResult.LogMessages = append(notUpToDateResult.LogMessages, "MachineDeployment spec.rolloutAfter expired") + if !deployment.Spec.Rollout.After.IsZero() && deployment.Spec.Rollout.After.Before(&reconciliationTime) && !ms.CreationTimestamp.After(deployment.Spec.Rollout.After.Time) { + upToDateResult.EligibleForInPlaceUpdate = false + upToDateResult.LogMessages = append(upToDateResult.LogMessages, "MachineDeployment spec.rolloutAfter expired") // No need to set an additional condition message, it is not used anywhere. + upToDateResults[ms.Name] = upToDateResult } - oldMSNotUpToDateResults[ms.Name] = *notUpToDateResult - diffs = append(diffs, fmt.Sprintf("MachineSet %s: diff: %s", ms.Name, strings.Join(notUpToDateResult.LogMessages, ", "))) + diffs = append(diffs, fmt.Sprintf("MachineSet %s: diff: %s", ms.Name, strings.Join(upToDateResult.LogMessages, ", "))) } } if len(newMSCandidates) == 0 { - return nil, oldMSs, oldMSNotUpToDateResults, fmt.Sprintf("couldn't find MachineSet matching MachineDeployment spec template: %s", strings.Join(diffs, "; ")) + return nil, oldMSs, upToDateResults, fmt.Sprintf("couldn't find MachineSet matching MachineDeployment spec template: %s", strings.Join(diffs, "; ")) } // If RolloutAfter is not set, pick the first matching MachineSet. if deployment.Spec.Rollout.After.IsZero() { for _, ms := range newMSCandidates[1:] { oldMSs = append(oldMSs, ms) - oldMSNotUpToDateResults[ms.Name] = NotUpToDateResult{ + upToDateResults[ms.Name] = UpToDateResult{ // No need to set log or condition message for discarded candidates, it is not used anywhere. EligibleForInPlaceUpdate: false, } } - return newMSCandidates[0], oldMSs, oldMSNotUpToDateResults, "" + return newMSCandidates[0], oldMSs, upToDateResults, "" } // If reconciliation time is before RolloutAfter, pick the first matching MachineSet. if reconciliationTime.Before(&deployment.Spec.Rollout.After) { for _, ms := range newMSCandidates[1:] { oldMSs = append(oldMSs, ms) - oldMSNotUpToDateResults[ms.Name] = NotUpToDateResult{ + upToDateResults[ms.Name] = UpToDateResult{ // No need to set log or condition for discarded candidates, it is not used anywhere. EligibleForInPlaceUpdate: false, } } - return newMSCandidates[0], oldMSs, oldMSNotUpToDateResults, "" + return newMSCandidates[0], oldMSs, upToDateResults, "" } // Pick the first matching MachineSet that has been created at RolloutAfter or later. @@ -534,7 +510,7 @@ func FindNewAndOldMachineSets(deployment *clusterv1.MachineDeployment, msList [] } oldMSs = append(oldMSs, ms) - oldMSNotUpToDateResults[ms.Name] = NotUpToDateResult{ + upToDateResults[ms.Name] = UpToDateResult{ // No need to set log or condition for discarded candidates, it is not used anywhere. EligibleForInPlaceUpdate: false, } @@ -542,9 +518,9 @@ func FindNewAndOldMachineSets(deployment *clusterv1.MachineDeployment, msList [] // If no matching MachineSet was created after RolloutAfter, trigger creation of a new MachineSet. if newMS == nil { - return nil, oldMSs, oldMSNotUpToDateResults, fmt.Sprintf("spec.rollout.after on MachineDeployment set to %s, no MachineSet has been created afterwards", deployment.Spec.Rollout.After.Format(time.RFC3339)) + return nil, oldMSs, upToDateResults, fmt.Sprintf("spec.rollout.after on MachineDeployment set to %s, no MachineSet has been created afterwards", deployment.Spec.Rollout.After.Format(time.RFC3339)) } - return newMS, oldMSs, oldMSNotUpToDateResults, "" + return newMS, oldMSs, upToDateResults, "" } // GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. @@ -834,15 +810,3 @@ func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelVal return newSelector } - -// GetDeletingMachineCount gets the number of machines that are in the process of being deleted -// in a machineList. -func GetDeletingMachineCount(machineList *clusterv1.MachineList) int32 { - var deletingMachineCount int32 - for _, machine := range machineList.Items { - if !machine.GetDeletionTimestamp().IsZero() { - deletingMachineCount++ - } - } - return deletingMachineCount -} diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 0b26c0ae2bde..b8c9f9443868 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -34,6 +34,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/conversion" ) var ( @@ -361,14 +362,18 @@ func TestMachineTemplateUpToDate(t *testing.T) { runTest := func(t1, t2 *clusterv1.MachineTemplateSpec, expectedLogMessages, expectedConditionMessages []string) { // Run - upToDate, notUpToDateResult := MachineTemplateUpToDate(t1, t2) + upToDate, upToDateResult := MachineTemplateUpToDate(t1, t2) g.Expect(upToDate).To(Equal(test.expectedUpToDate)) if upToDate { - g.Expect(notUpToDateResult).To(BeNil()) + g.Expect(upToDateResult).ToNot(BeNil()) + g.Expect(upToDateResult.EligibleForInPlaceUpdate).To(BeFalse()) + g.Expect(upToDateResult.LogMessages).To(BeEmpty()) + g.Expect(upToDateResult.ConditionMessages).To(BeEmpty()) } else { - g.Expect(notUpToDateResult).ToNot(BeNil()) - g.Expect(notUpToDateResult.LogMessages).To(Equal(expectedLogMessages)) - g.Expect(notUpToDateResult.ConditionMessages).To(Equal(expectedConditionMessages)) + g.Expect(upToDateResult).ToNot(BeNil()) + g.Expect(upToDateResult.EligibleForInPlaceUpdate).To(BeTrue()) + g.Expect(upToDateResult.LogMessages).To(Equal(expectedLogMessages)) + g.Expect(upToDateResult.ConditionMessages).To(Equal(expectedConditionMessages)) } g.Expect(t1.Labels).NotTo(BeNil()) g.Expect(t2.Labels).NotTo(BeNil()) @@ -423,23 +428,23 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msCreatedExactlyInRolloutAfter.CreationTimestamp = rolloutAfter tests := []struct { - Name string - deployment clusterv1.MachineDeployment - msList []*clusterv1.MachineSet - reconciliationTime *metav1.Time - expectedNewMS *clusterv1.MachineSet - expectedOldMSs []*clusterv1.MachineSet - expectedOldMSNotUpToDateResults map[string]NotUpToDateResult - expectedCreateReason string + Name string + deployment clusterv1.MachineDeployment + msList []*clusterv1.MachineSet + reconciliationTime metav1.Time + expectedNewMS *clusterv1.MachineSet + expectedOldMSs []*clusterv1.MachineSet + expectedUpToDateResults map[string]UpToDateResult + expectedCreateReason string }{ { - Name: "Get nil if no MachineSets exist", - deployment: deployment, - msList: []*clusterv1.MachineSet{}, - expectedNewMS: nil, - expectedOldMSs: nil, - expectedOldMSNotUpToDateResults: nil, - expectedCreateReason: "no MachineSets exist for the MachineDeployment", + Name: "Get nil if no MachineSets exist", + deployment: deployment, + msList: []*clusterv1.MachineSet{}, + expectedNewMS: nil, + expectedOldMSs: nil, + expectedUpToDateResults: nil, + expectedCreateReason: "no MachineSets exist for the MachineDeployment", }, { Name: "Get nil if there are no MachineTemplate that matches the intent of the MachineDeployment", @@ -447,7 +452,7 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msList: []*clusterv1.MachineSet{&oldMS}, expectedNewMS: nil, expectedOldMSs: []*clusterv1.MachineSet{&oldMS}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { LogMessages: []string{"spec.infrastructureRef InfrastructureMachineTemplate old-infra-ref, InfrastructureMachineTemplate new-infra-ref required"}, ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, @@ -462,21 +467,28 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msList: []*clusterv1.MachineSet{&oldMS, &matchingMS}, expectedNewMS: &matchingMS, expectedOldMSs: []*clusterv1.MachineSet{&oldMS}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { LogMessages: []string{"spec.infrastructureRef InfrastructureMachineTemplate old-infra-ref, InfrastructureMachineTemplate new-infra-ref required"}, ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, EligibleForInPlaceUpdate: true, }, + matchingMS.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { - Name: "Get empty old MachineSets", - deployment: deployment, - msList: []*clusterv1.MachineSet{&matchingMS}, - expectedNewMS: &matchingMS, - expectedOldMSs: []*clusterv1.MachineSet{}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{}, + Name: "Get empty old MachineSets", + deployment: deployment, + msList: []*clusterv1.MachineSet{&matchingMS}, + expectedNewMS: &matchingMS, + expectedOldMSs: []*clusterv1.MachineSet{}, + expectedUpToDateResults: map[string]UpToDateResult{ + matchingMS.Name: { + EligibleForInPlaceUpdate: false, + }, + }, }, { Name: "Get the MachineSet with the higher replicas if multiple MachineSets match the desired intent on the MachineDeployment", @@ -484,7 +496,7 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msList: []*clusterv1.MachineSet{&oldMS, &matchingMS, &matchingMSHigherReplicas}, expectedNewMS: &matchingMSHigherReplicas, expectedOldMSs: []*clusterv1.MachineSet{&oldMS, &matchingMS}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { LogMessages: []string{"spec.infrastructureRef InfrastructureMachineTemplate old-infra-ref, InfrastructureMachineTemplate new-infra-ref required"}, ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, @@ -493,6 +505,9 @@ func TestFindNewAndOldMachineSets(t *testing.T) { matchingMS.Name: { EligibleForInPlaceUpdate: false, }, + matchingMSHigherReplicas.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { @@ -501,12 +516,15 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msList: []*clusterv1.MachineSet{&oldMS, &matchingMSDiffersInPlaceMutableFields}, expectedNewMS: &matchingMSDiffersInPlaceMutableFields, expectedOldMSs: []*clusterv1.MachineSet{&oldMS}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { LogMessages: []string{"spec.infrastructureRef InfrastructureMachineTemplate old-infra-ref, InfrastructureMachineTemplate new-infra-ref required"}, ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, EligibleForInPlaceUpdate: true, }, + matchingMSDiffersInPlaceMutableFields.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { @@ -515,7 +533,7 @@ func TestFindNewAndOldMachineSets(t *testing.T) { msList: []*clusterv1.MachineSet{&oldMS}, expectedNewMS: nil, expectedOldMSs: []*clusterv1.MachineSet{&oldMS}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { LogMessages: []string{"spec.infrastructureRef InfrastructureMachineTemplate old-infra-ref, InfrastructureMachineTemplate new-infra-ref required"}, ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, @@ -528,10 +546,10 @@ func TestFindNewAndOldMachineSets(t *testing.T) { Name: "Get nil if no MachineSet matches the desired intent of the MachineDeployment, reconciliationTime is > rolloutAfter", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&oldMSCreatedThreeBeforeRolloutAfter}, - reconciliationTime: &oneAfterRolloutAfter, + reconciliationTime: oneAfterRolloutAfter, expectedNewMS: nil, expectedOldMSs: []*clusterv1.MachineSet{&oldMSCreatedThreeBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ oldMS.Name: { ConditionMessages: []string{"InfrastructureMachine is not up-to-date"}, LogMessages: []string{ @@ -549,23 +567,26 @@ func TestFindNewAndOldMachineSets(t *testing.T) { Name: "Get the MachineSet if reconciliationTime < rolloutAfter", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter, &msCreatedThreeBeforeRolloutAfter}, - reconciliationTime: &oneBeforeRolloutAfter, + reconciliationTime: oneBeforeRolloutAfter, expectedNewMS: &msCreatedThreeBeforeRolloutAfter, expectedOldMSs: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ msCreatedTwoBeforeRolloutAfter.Name: { EligibleForInPlaceUpdate: false, }, + msCreatedThreeBeforeRolloutAfter.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { Name: "Get nil if reconciliationTime is > rolloutAfter and no MachineSet is created after rolloutAfter", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter, &msCreatedThreeBeforeRolloutAfter, &oldMSCreatedThreeBeforeRolloutAfter}, - reconciliationTime: &oneAfterRolloutAfter, + reconciliationTime: oneAfterRolloutAfter, expectedNewMS: nil, expectedOldMSs: []*clusterv1.MachineSet{&oldMSCreatedThreeBeforeRolloutAfter, &msCreatedThreeBeforeRolloutAfter, &msCreatedTwoBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ msCreatedTwoBeforeRolloutAfter.Name: { EligibleForInPlaceUpdate: false, }, @@ -589,13 +610,16 @@ func TestFindNewAndOldMachineSets(t *testing.T) { Name: "Get MachineSet created after RolloutAfter if reconciliationTime is > rolloutAfter", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&msCreatedAfterRolloutAfter, &msCreatedTwoBeforeRolloutAfter}, - reconciliationTime: &twoAfterRolloutAfter, + reconciliationTime: twoAfterRolloutAfter, expectedNewMS: &msCreatedAfterRolloutAfter, expectedOldMSs: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ msCreatedTwoBeforeRolloutAfter.Name: { EligibleForInPlaceUpdate: false, }, + msCreatedAfterRolloutAfter.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { @@ -603,26 +627,32 @@ func TestFindNewAndOldMachineSets(t *testing.T) { Name: "Get MachineSet created exactly in RolloutAfter if reconciliationTime > rolloutAfter", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&msCreatedExactlyInRolloutAfter, &msCreatedTwoBeforeRolloutAfter}, - reconciliationTime: &oneAfterRolloutAfter, + reconciliationTime: oneAfterRolloutAfter, expectedNewMS: &msCreatedExactlyInRolloutAfter, expectedOldMSs: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ msCreatedTwoBeforeRolloutAfter.Name: { EligibleForInPlaceUpdate: false, }, + msCreatedExactlyInRolloutAfter.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, { Name: "Get MachineSet created after RolloutAfter if reconciliationTime is > rolloutAfter (inverse order in ms list)", deployment: *deploymentWithRolloutAfter, msList: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter, &msCreatedAfterRolloutAfter}, - reconciliationTime: &twoAfterRolloutAfter, + reconciliationTime: twoAfterRolloutAfter, expectedNewMS: &msCreatedAfterRolloutAfter, expectedOldMSs: []*clusterv1.MachineSet{&msCreatedTwoBeforeRolloutAfter}, - expectedOldMSNotUpToDateResults: map[string]NotUpToDateResult{ + expectedUpToDateResults: map[string]UpToDateResult{ msCreatedTwoBeforeRolloutAfter.Name: { EligibleForInPlaceUpdate: false, }, + msCreatedAfterRolloutAfter.Name: { + EligibleForInPlaceUpdate: false, + }, }, }, } @@ -631,10 +661,10 @@ func TestFindNewAndOldMachineSets(t *testing.T) { t.Run(test.Name, func(t *testing.T) { g := NewWithT(t) - newMS, oldMSs, oldMSNotUpToDateResults, createReason := FindNewAndOldMachineSets(&test.deployment, test.msList, test.reconciliationTime) + newMS, oldMSs, upToDateResults, createReason := FindNewAndOldMachineSets(&test.deployment, test.msList, test.reconciliationTime) g.Expect(newMS).To(BeComparableTo(test.expectedNewMS)) g.Expect(oldMSs).To(BeComparableTo(test.expectedOldMSs)) - g.Expect(oldMSNotUpToDateResults).To(BeComparableTo(test.expectedOldMSNotUpToDateResults)) + g.Expect(upToDateResults).To(BeComparableTo(test.expectedUpToDateResults)) g.Expect(createReason).To(BeComparableTo(test.expectedCreateReason)) }) } @@ -942,165 +972,191 @@ func TestMaxUnavailable(t *testing.T) { } } -// TestAnnotationUtils is a set of simple tests for annotation related util functions. -func TestAnnotationUtils(t *testing.T) { - // Setup +func TestMachineSetAnnotationsFromMachineDeployment(t *testing.T) { tDeployment := generateDeployment("nginx") - tDeployment.Spec.Replicas = ptr.To[int32](1) - tMS := generateMS(tDeployment) + tDeployment.Annotations = map[string]string{ + // annotations to skip + corev1.LastAppliedConfigAnnotation: "foo", + clusterv1.RevisionAnnotation: "foo", + revisionHistoryAnnotation: "foo", + clusterv1.DesiredReplicasAnnotation: "foo", + clusterv1.MaxReplicasAnnotation: "foo", + conversion.DataAnnotation: "foo", + + // annotations to preserve + "bar": "bar", + } + tDeployment.Spec.Replicas = ptr.To[int32](3) + tDeployment.Spec.Rollout.Strategy = clusterv1.MachineDeploymentRolloutStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{ + MaxSurge: ptr.To(intstr.FromInt32(1)), + MaxUnavailable: ptr.To(intstr.FromInt32(0)), + }, + } - // Test Case 1: Check if annotations are set properly - t.Run("SetReplicasAnnotations", func(t *testing.T) { + t.Run("Drops well-known annotations, keeps other, adds replica annotations", func(t *testing.T) { g := NewWithT(t) - g.Expect(SetReplicasAnnotations(&tMS, 10, 11)).To(BeTrue()) - g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.DesiredReplicasAnnotation, "10")) - g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.MaxReplicasAnnotation, "11")) - }) + annotations := MachineSetAnnotationsFromMachineDeployment(ctx, &tDeployment) - // Test Case 2: Check if annotations reflect deployments state - tMS.Annotations[clusterv1.DesiredReplicasAnnotation] = "1" - tMS.Status.AvailableReplicas = ptr.To[int32](1) - tMS.Spec.Replicas = new(int32) - *tMS.Spec.Replicas = 1 + g.Expect(annotations).To(Equal(map[string]string{ + // Drops well-known annotations - t.Run("IsSaturated", func(t *testing.T) { - g := NewWithT(t) + // Keeps other + "bar": "bar", - g.Expect(IsSaturated(&tDeployment, &tMS)).To(BeTrue()) + // Adds replica annotations + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + })) }) } -func TestComputeMachineSetAnnotations(t *testing.T) { - deployment := generateDeployment("nginx") - deployment.Spec.Replicas = ptr.To[int32](3) - maxSurge := intstr.FromInt32(1) - maxUnavailable := intstr.FromInt32(0) - deployment.Spec.Rollout.Strategy = clusterv1.MachineDeploymentRolloutStrategy{ - Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, - RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{ - MaxSurge: &maxSurge, - MaxUnavailable: &maxUnavailable, +func TestIsSaturated(t *testing.T) { + tDeployment := generateDeployment("nginx") + tDeployment.Spec.Replicas = ptr.To[int32](3) + + tMS := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.DesiredReplicasAnnotation: "3", + }, }, } - deployment.Annotations = map[string]string{ - corev1.LastAppliedConfigAnnotation: "last-applied-configuration", - "key1": "value1", - } + t.Run("deployment not yet saturated, ms doesn't have all the desired replicas", func(t *testing.T) { + g := NewWithT(t) + tMS := tMS.DeepCopy() + tMS.Spec.Replicas = ptr.To[int32](1) + g.Expect(IsSaturated(&tDeployment, tMS)).To(BeFalse()) + }) + t.Run("deployment not yet saturated, ms has all replicas but some are not available yet", func(t *testing.T) { + g := NewWithT(t) + tMS := tMS.DeepCopy() + tMS.Spec.Replicas = ptr.To[int32](3) + tMS.Status.AvailableReplicas = ptr.To[int32](1) + g.Expect(IsSaturated(&tDeployment, tMS)).To(BeFalse()) + }) + t.Run("deployment saturated, ms has all replicas and all are available", func(t *testing.T) { + g := NewWithT(t) + tMS := tMS.DeepCopy() + tMS.Spec.Replicas = ptr.To[int32](3) + tMS.Status.AvailableReplicas = ptr.To[int32](3) + g.Expect(IsSaturated(&tDeployment, tMS)).To(BeTrue()) + }) +} + +func TestComputeRevisionAnnotations(t *testing.T) { tests := []struct { - name string - deployment *clusterv1.MachineDeployment - oldMSs []*clusterv1.MachineSet - ms *clusterv1.MachineSet - want map[string]string - wantErr bool + name string + oldMSs []*clusterv1.MachineSet + ms *clusterv1.MachineSet + want map[string]string + wantRevision string + wantErr bool }{ { - name: "Calculating annotations for a new MachineSet", - deployment: &deployment, - oldMSs: nil, - ms: nil, + name: "Calculating annotations for a new newMS - oldMSs do not exist", + oldMSs: nil, + ms: nil, want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "1", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "1", }, - wantErr: false, + wantRevision: "1", + wantErr: false, }, { - name: "Calculating annotations for a new MachineSet - old MSs exist", - deployment: &deployment, - oldMSs: []*clusterv1.MachineSet{machineSetWithRevisionAndHistory("1", "")}, - ms: nil, + name: "Calculating annotations for a new newMS - old MSs exist", + oldMSs: []*clusterv1.MachineSet{machineSetWithRevisionAndHistory("1", "")}, + ms: nil, want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "2", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "2", }, - wantErr: false, + wantRevision: "2", + wantErr: false, }, { - name: "Calculating annotations for a existing MachineSet", - deployment: &deployment, - oldMSs: nil, - ms: machineSetWithRevisionAndHistory("1", ""), + name: "Calculating annotations for a existing newMS - oldMSs do not exist", + oldMSs: nil, + ms: machineSetWithRevisionAndHistory("1", ""), want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "1", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "1", }, - wantErr: false, + wantRevision: "1", + wantErr: false, }, { - name: "Calculating annotations for a existing MachineSet - old MSs exist", - deployment: &deployment, + name: "Calculating annotations for a existing newMS - old MSs exist - update required", oldMSs: []*clusterv1.MachineSet{ machineSetWithRevisionAndHistory("1", ""), machineSetWithRevisionAndHistory("2", ""), }, ms: machineSetWithRevisionAndHistory("1", ""), want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "3", - revisionHistoryAnnotation: "1", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "3", + revisionHistoryAnnotation: "1", }, - wantErr: false, + wantRevision: "3", + wantErr: false, }, { - name: "Calculating annotations for a existing MachineSet - old MSs exist - existing revision is greater", - deployment: &deployment, + name: "Calculating annotations for a existing newMS - old MSs exist - no update required", oldMSs: []*clusterv1.MachineSet{ machineSetWithRevisionAndHistory("1", ""), machineSetWithRevisionAndHistory("2", ""), }, ms: machineSetWithRevisionAndHistory("4", ""), want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "4", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "4", }, - wantErr: false, + wantRevision: "4", + wantErr: false, }, { - name: "Calculating annotations for a existing MachineSet - old MSs exist - ms already has revision history", - deployment: &deployment, + name: "Calculating annotations for a existing newMS with revision history - old MSs exist - update required", oldMSs: []*clusterv1.MachineSet{ machineSetWithRevisionAndHistory("3", ""), machineSetWithRevisionAndHistory("4", ""), }, ms: machineSetWithRevisionAndHistory("2", "1"), want: map[string]string{ - "key1": "value1", - clusterv1.RevisionAnnotation: "5", - revisionHistoryAnnotation: "1,2", - clusterv1.DesiredReplicasAnnotation: "3", - clusterv1.MaxReplicasAnnotation: "4", + clusterv1.RevisionAnnotation: "5", + revisionHistoryAnnotation: "1,2", }, - wantErr: false, + wantRevision: "5", + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ComputeMachineSetAnnotations(ctx, tt.deployment, tt.oldMSs, tt.ms) + got, gotRevision, err := ComputeRevisionAnnotations(ctx, tt.ms, tt.oldMSs) if tt.wantErr { g.Expect(err).ShouldNot(HaveOccurred()) } else { g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).Should(Equal(tt.want)) + g.Expect(gotRevision).Should(Equal(tt.wantRevision)) } }) } } +func TestGetRevisionAnnotations(t *testing.T) { + t.Run("gets revision annotations", func(t *testing.T) { + g := NewWithT(t) + ms := machineSetWithRevisionAndHistory("2", "1") + + annotations := GetRevisionAnnotations(ctx, ms) + + g.Expect(annotations).To(HaveLen(2)) + g.Expect(annotations).To(HaveKeyWithValue(clusterv1.RevisionAnnotation, "2")) + g.Expect(annotations).To(HaveKeyWithValue(revisionHistoryAnnotation, "1")) + }) +} + func machineSetWithRevisionAndHistory(revision string, revisionHistory string) *clusterv1.MachineSet { ms := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ @@ -1114,75 +1170,3 @@ func machineSetWithRevisionAndHistory(revision string, revisionHistory string) * } return ms } - -func TestReplicasAnnotationsNeedUpdate(t *testing.T) { - desiredReplicas := fmt.Sprintf("%d", int32(10)) - maxReplicas := fmt.Sprintf("%d", int32(20)) - - tests := []struct { - name string - machineSet *clusterv1.MachineSet - expected bool - }{ - { - name: "test Annotations nil", - machineSet: &clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: metav1.NamespaceDefault}, - Spec: clusterv1.MachineSetSpec{ - Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, - }, - }, - expected: true, - }, - { - name: "test desiredReplicas update", - machineSet: &clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hello", - Namespace: metav1.NamespaceDefault, - Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: "8", clusterv1.MaxReplicasAnnotation: maxReplicas}, - }, - Spec: clusterv1.MachineSetSpec{ - Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, - }, - }, - expected: true, - }, - { - name: "test maxReplicas update", - machineSet: &clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hello", - Namespace: metav1.NamespaceDefault, - Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: desiredReplicas, clusterv1.MaxReplicasAnnotation: "16"}, - }, - Spec: clusterv1.MachineSetSpec{ - Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, - }, - }, - expected: true, - }, - { - name: "test needn't update", - machineSet: &clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hello", - Namespace: metav1.NamespaceDefault, - Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: desiredReplicas, clusterv1.MaxReplicasAnnotation: maxReplicas}, - }, - Spec: clusterv1.MachineSetSpec{ - Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, - }, - }, - expected: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - g.Expect(ReplicasAnnotationsNeedUpdate(test.machineSet, 10, 20)).To(Equal(test.expected)) - }) - } -} diff --git a/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1, random(0).test.log.golden index f702a5ee06c6..1c19b2c6f87d 100644 --- a/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1, random(0).test.log.golden @@ -3,38 +3,24 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, onDeleteStrategy, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () +[M controller] Iteration 1, Reconcile m2 [M controller] Iteration 1, Reconcile m1 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - ms2, 0/0 replicas () [MS controller] Iteration 1, Reconcile ms1, 3/3 replicas (m1,m2,m3) -[MS controller] Iteration 1, Reconcile ms1, 3/3 replicas (m1,m2,m3) -[M controller] Iteration 2, Reconcile m2 -[M controller] Iteration 2, Reconcile m3 -[MS controller] Iteration 2, Reconcile ms2, 0/0 replicas () -[MD controller] Iteration 2, Reconcile md -[MD controller] - Input to rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () -[MD controller] - Result of rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () +[M controller] Iteration 1, Reconcile m3 +[User] Iteration 1, Deleting machine m2 +[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () [M controller] Iteration 2, Reconcile m1 -[M controller] Iteration 3, Reconcile m3 -[MS controller] Iteration 3, Reconcile ms2, 0/0 replicas () -[User] Iteration 3, Deleting machine m2 -[M controller] Iteration 3, Reconcile m2 +[M controller] Iteration 2, Reconcile m3 +[M controller] Iteration 2, Reconcile m2 [M controller] - m2 finalizer removed [MD controller] Iteration 3, Reconcile md [MD controller] - Input to rollout planner @@ -45,19 +31,15 @@ md, 3/3 replicas - ms1, 3/3 replicas (m1,m3) - ms2, 0/0 replicas () +[M controller] Iteration 3, Reconcile m1 +[MS controller] Iteration 3, Reconcile ms2, 0/0 replicas () +[MS controller] Iteration 3, Reconcile ms1, 3/3 replicas (m1,m3) +[M controller] Iteration 3, Reconcile m3 +[MS controller] Iteration 4, Reconcile ms1, 2/3 replicas (m1,m3) +[MS controller] Iteration 4, Reconcile ms2, 0/0 replicas () [M controller] Iteration 4, Reconcile m3 [MS controller] Iteration 4, Reconcile ms2, 0/0 replicas () [MD controller] Iteration 4, Reconcile md -[MD controller] - Input to rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m3) - - ms2, 0/0 replicas () -[MD controller] - Result of rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m3) - - ms2, 0/0 replicas () -[MS controller] Iteration 4, Reconcile ms1, 3/3 replicas (m1,m3) -[MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 2/3 replicas (m1,m3) @@ -66,8 +48,6 @@ md, 2/3 replicas - ms1, 2/2 replicas (m1,m3) - ms2, 0/0 replicas () -[M controller] Iteration 5, Reconcile m3 -[MS controller] Iteration 5, Reconcile ms2, 0/0 replicas () [MS controller] Iteration 5, Reconcile ms1, 2/2 replicas (m1,m3) [MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner @@ -78,30 +58,62 @@ md, 2/3 replicas - ms1, 2/2 replicas (m1,m3) - ms2, 0/1 replicas () -[MS controller] Iteration 5, Reconcile ms1, 2/2 replicas (m1,m3) -[M controller] Iteration 6, Reconcile m3 -[M controller] Iteration 6, Reconcile m3 -[MS controller] Iteration 6, Reconcile ms2, 0/1 replicas () +[M controller] Iteration 5, Reconcile m3 +[MS controller] Iteration 5, Reconcile ms2, 0/1 replicas () [MS controller] - ms2 scale up to 1/1 replicas (m4 created) -[User] Iteration 6, Deleting machine m3 -[MS controller] Iteration 6, Reconcile ms1, 2/2 replicas (m1,m3) +[MS controller] Iteration 5, Reconcile ms2, 1/1 replicas (m4) [MS controller] Iteration 6, Reconcile ms2, 1/1 replicas (m4) +[M controller] Iteration 6, Reconcile m3 +[M controller] Iteration 6, Reconcile m1 +[MD controller] Iteration 6, Reconcile md +[MD controller] - Input to rollout planner + md, 2/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) +[MD controller] - Result of rollout planner + md, 3/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) [MS controller] Iteration 7, Reconcile ms2, 1/1 replicas (m4) +[M controller] Iteration 7, Reconcile m3 +[MD controller] Iteration 7, Reconcile md +[MD controller] - Input to rollout planner + md, 3/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) +[MD controller] - Result of rollout planner + md, 3/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) +[M controller] Iteration 7, Reconcile m4 [M controller] Iteration 7, Reconcile m4 [MS controller] Iteration 7, Reconcile ms1, 2/2 replicas (m1,m3) -[M controller] Iteration 7, Reconcile m1 -[M controller] Iteration 7, Reconcile m3 -[M controller] - m3 finalizer removed +[M controller] Iteration 8, Reconcile m4 +[User] Iteration 8, Deleting machine m3 [MD controller] Iteration 8, Reconcile md [MD controller] - Input to rollout planner - md, 2/3 replicas + md, 3/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) +[MD controller] - Result of rollout planner + md, 3/3 replicas + - ms1, 2/2 replicas (m1,m3) + - ms2, 1/1 replicas (m4) +[MS controller] Iteration 8, Reconcile ms2, 1/1 replicas (m4) +[MS controller] Iteration 8, Reconcile ms1, 2/2 replicas (m1,m3) +[M controller] Iteration 8, Reconcile m3 +[M controller] - m3 finalizer removed +[M controller] Iteration 8, Reconcile m1 +[MD controller] Iteration 9, Reconcile md +[MD controller] - Input to rollout planner + md, 3/3 replicas - ms1, 2/2 replicas (m1) - ms2, 1/1 replicas (m4) [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 2/2 replicas (m1) - ms2, 1/1 replicas (m4) -[MD controller] Iteration 8, Reconcile md +[MD controller] Iteration 9, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 2/2 replicas (m1) @@ -110,11 +122,15 @@ md, 3/3 replicas - ms1, 2/2 replicas (m1) - ms2, 1/1 replicas (m4) -[MS controller] Iteration 8, Reconcile ms2, 1/1 replicas (m4) -[MS controller] Iteration 8, Reconcile ms1, 2/2 replicas (m1) -[MS controller] Iteration 8, Reconcile ms1, 1/2 replicas (m1) -[MS controller] Iteration 8, Reconcile ms1, 1/2 replicas (m1) -[MD controller] Iteration 9, Reconcile md +[M controller] Iteration 9, Reconcile m4 +[MS controller] Iteration 9, Reconcile ms1, 2/2 replicas (m1) +[MS controller] Iteration 9, Reconcile ms1, 1/2 replicas (m1) +[M controller] Iteration 9, Reconcile m1 +[MS controller] Iteration 9, Reconcile ms2, 1/1 replicas (m4) +[MS controller] Iteration 9, Reconcile ms2, 1/1 replicas (m4) +[MS controller] Iteration 10, Reconcile ms1, 1/2 replicas (m1) +[M controller] Iteration 10, Reconcile m1 +[MD controller] Iteration 10, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 1/2 replicas (m1) @@ -123,8 +139,7 @@ md, 2/3 replicas - ms1, 1/1 replicas (m1) - ms2, 1/1 replicas (m4) -[MS controller] Iteration 9, Reconcile ms1, 1/1 replicas (m1) -[MD controller] Iteration 9, Reconcile md +[MD controller] Iteration 10, Reconcile md [MD controller] - Input to rollout planner md, 2/3 replicas - ms1, 1/1 replicas (m1) @@ -133,10 +148,7 @@ md, 2/3 replicas - ms1, 1/1 replicas (m1) - ms2, 1/2 replicas (m4) -[MS controller] Iteration 9, Reconcile ms1, 1/1 replicas (m1) -[MS controller] Iteration 9, Reconcile ms1, 1/1 replicas (m1) -[M controller] Iteration 9, Reconcile m4 -[MD controller] Iteration 9, Reconcile md +[MD controller] Iteration 11, Reconcile md [MD controller] - Input to rollout planner md, 2/3 replicas - ms1, 1/1 replicas (m1) @@ -145,55 +157,65 @@ md, 2/3 replicas - ms1, 1/1 replicas (m1) - ms2, 1/2 replicas (m4) -[MD controller] Iteration 9, Reconcile md +[MS controller] Iteration 11, Reconcile ms1, 1/1 replicas (m1) +[M controller] Iteration 11, Reconcile m4 +[MS controller] Iteration 12, Reconcile ms1, 1/1 replicas (m1) +[MS controller] Iteration 12, Reconcile ms2, 1/2 replicas (m4) +[MS controller] - ms2 scale up to 2/2 replicas (m5 created) +[User] Iteration 12, Deleting machine m1 +[M controller] Iteration 12, Reconcile m4 +[M controller] Iteration 12, Reconcile m4 +[MD controller] Iteration 12, Reconcile md [MD controller] - Input to rollout planner md, 2/3 replicas - ms1, 1/1 replicas (m1) - - ms2, 1/2 replicas (m4) + - ms2, 2/2 replicas (m4,m5) [MD controller] - Result of rollout planner - md, 2/3 replicas + md, 3/3 replicas - ms1, 1/1 replicas (m1) - - ms2, 1/2 replicas (m4) -[M controller] Iteration 10, Reconcile m4 -[MS controller] Iteration 10, Reconcile ms2, 1/2 replicas (m4) -[MS controller] - ms2 scale up to 2/2 replicas (m5 created) -[User] Iteration 10, Deleting machine m1 -[M controller] Iteration 10, Reconcile m1 -[M controller] - m1 finalizer removed -[MS controller] Iteration 10, Reconcile ms1, 1/1 replicas () -[MD controller] Iteration 10, Reconcile md + - ms2, 2/2 replicas (m4,m5) +[M controller] Iteration 13, Reconcile m4 +[MD controller] Iteration 13, Reconcile md [MD controller] - Input to rollout planner - md, 2/3 replicas - - ms1, 0/1 replicas () + md, 3/3 replicas + - ms1, 1/1 replicas (m1) - ms2, 2/2 replicas (m4,m5) [MD controller] - Result of rollout planner - md, 2/3 replicas - - ms1, 0/0 replicas () + md, 3/3 replicas + - ms1, 1/1 replicas (m1) - ms2, 2/2 replicas (m4,m5) -[MD controller] Iteration 10, Reconcile md +[M controller] Iteration 13, Reconcile m1 +[M controller] - m1 finalizer removed +[M controller] Iteration 13, Reconcile m5 +[MS controller] Iteration 14, Reconcile ms1, 1/1 replicas () +[M controller] Iteration 14, Reconcile m4 +[MD controller] Iteration 14, Reconcile md [MD controller] - Input to rollout planner - md, 2/3 replicas - - ms1, 0/0 replicas () + md, 3/3 replicas + - ms1, 0/1 replicas () - ms2, 2/2 replicas (m4,m5) [MD controller] - Result of rollout planner md, 2/3 replicas - ms1, 0/0 replicas () - - ms2, 2/3 replicas (m4,m5) -[MD controller] Iteration 10, Reconcile md + - ms2, 2/2 replicas (m4,m5) +[MS controller] Iteration 14, Reconcile ms2, 2/2 replicas (m4,m5) +[M controller] Iteration 15, Reconcile m5 +[MD controller] Iteration 15, Reconcile md [MD controller] - Input to rollout planner md, 2/3 replicas - ms1, 0/0 replicas () - - ms2, 2/3 replicas (m4,m5) + - ms2, 2/2 replicas (m4,m5) [MD controller] - Result of rollout planner md, 2/3 replicas - ms1, 0/0 replicas () - ms2, 2/3 replicas (m4,m5) -[M controller] Iteration 11, Reconcile m5 -[M controller] Iteration 11, Reconcile m4 -[MS controller] Iteration 11, Reconcile ms2, 2/3 replicas (m4,m5) +[M controller] Iteration 15, Reconcile m4 +[MS controller] Iteration 15, Reconcile ms2, 2/3 replicas (m4,m5) [MS controller] - ms2 scale up to 3/3 replicas (m6 created) -[MS controller] Iteration 11, Reconcile ms1, 0/0 replicas () -[MD controller] Iteration 11, Reconcile md +[M controller] Iteration 16, Reconcile m5 +[M controller] Iteration 16, Reconcile m6 +[M controller] Iteration 16, Reconcile m4 +[MD controller] Iteration 16, Reconcile md [MD controller] - Input to rollout planner md, 2/3 replicas - ms1, 0/0 replicas () @@ -202,15 +224,8 @@ md, 3/3 replicas - ms1, 0/0 replicas () - ms2, 3/3 replicas (m4,m5,m6) -[MD controller] Iteration 11, Reconcile md -[MD controller] - Input to rollout planner - md, 3/3 replicas - - ms1, 0/0 replicas () - - ms2, 3/3 replicas (m4,m5,m6) -[MD controller] - Result of rollout planner - md, 3/3 replicas - - ms1, 0/0 replicas () - - ms2, 3/3 replicas (m4,m5,m6) +[MS controller] Iteration 16, Reconcile ms1, 0/0 replicas () +[MS controller] Iteration 16, Reconcile ms1, 0/0 replicas () [Test] Final state md, 3/3 replicas - ms1, 0/0 replicas () diff --git a/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1.test.log.golden b/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1.test.log.golden index 079461a6318b..4adcb9de820f 100644 --- a/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/ondelete/3 replicas, maxuserunavailable 1.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, onDeleteStrategy [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) diff --git a/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2, random(0).test.log.golden index 6b72ae6d3ccf..5a6b8d642576 100644 --- a/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2, random(0).test.log.golden @@ -3,314 +3,295 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, onDeleteStrategy, random(0) +[M controller] Iteration 1, Reconcile m1 +[M controller] Iteration 1, Reconcile m3 [M controller] Iteration 1, Reconcile m4 [M controller] Iteration 1, Reconcile m5 +[M controller] Iteration 1, Reconcile m6 +[MD controller] Iteration 1, Reconcile md +[MD controller] - Input to rollout planner + md, 6/6 replicas + - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) +[MD controller] - Result of rollout planner + md, 6/6 replicas + - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + - ms2, 0/0 replicas () [M controller] Iteration 1, Reconcile m2 [M controller] Iteration 1, Reconcile m6 -[M controller] Iteration 1, Reconcile m3 -[M controller] Iteration 2, Reconcile m4 [M controller] Iteration 2, Reconcile m6 -[M controller] Iteration 2, Reconcile m2 -[MS controller] Iteration 2, Reconcile ms2, 0/0 replicas () +[MD controller] Iteration 2, Reconcile md +[MD controller] - Input to rollout planner + md, 6/6 replicas + - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + - ms2, 0/0 replicas () +[MD controller] - Result of rollout planner + md, 6/6 replicas + - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + - ms2, 0/0 replicas () +[User] Iteration 2, Deleting machine m3 +[MS controller] Iteration 2, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [M controller] Iteration 2, Reconcile m3 -[M controller] Iteration 3, Reconcile m4 +[M controller] - m3 finalizer removed [M controller] Iteration 3, Reconcile m5 +[MS controller] Iteration 3, Reconcile ms1, 6/6 replicas (m1,m2,m4,m5,m6) [M controller] Iteration 3, Reconcile m2 -[M controller] Iteration 3, Reconcile m1 -[MS controller] Iteration 3, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) -[M controller] Iteration 3, Reconcile m3 -[MS controller] Iteration 3, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [MS controller] Iteration 3, Reconcile ms2, 0/0 replicas () -[M controller] Iteration 4, Reconcile m5 -[MS controller] Iteration 4, Reconcile ms2, 0/0 replicas () -[M controller] Iteration 4, Reconcile m4 -[M controller] Iteration 4, Reconcile m2 -[M controller] Iteration 4, Reconcile m1 -[M controller] Iteration 4, Reconcile m3 -[MS controller] Iteration 4, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) -[M controller] Iteration 4, Reconcile m6 +[M controller] Iteration 3, Reconcile m4 +[M controller] Iteration 3, Reconcile m1 +[M controller] Iteration 3, Reconcile m2 +[M controller] Iteration 3, Reconcile m1 [MD controller] Iteration 4, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + - ms1, 5/6 replicas (m1,m2,m4,m5,m6) - ms2, 0/0 replicas () [MD controller] - Result of rollout planner - md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + md, 5/6 replicas + - ms1, 5/5 replicas (m1,m2,m4,m5,m6) - ms2, 0/0 replicas () +[M controller] Iteration 4, Reconcile m4 +[MS controller] Iteration 4, Reconcile ms1, 5/5 replicas (m1,m2,m4,m5,m6) +[MS controller] Iteration 4, Reconcile ms2, 0/0 replicas () +[User] Iteration 4, Deleting machine m5 +[M controller] Iteration 4, Reconcile m2 +[MS controller] Iteration 4, Reconcile ms2, 0/0 replicas () +[M controller] Iteration 4, Reconcile m6 +[M controller] Iteration 4, Reconcile m6 [MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner - md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) + md, 5/6 replicas + - ms1, 5/5 replicas (m1,m2,m4,m5,m6) - ms2, 0/0 replicas () [MD controller] - Result of rollout planner - md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () -[M controller] Iteration 5, Reconcile m6 + md, 5/6 replicas + - ms1, 5/5 replicas (m1,m2,m4,m5,m6) + - ms2, 0/1 replicas () [M controller] Iteration 5, Reconcile m2 [M controller] Iteration 5, Reconcile m1 [M controller] Iteration 5, Reconcile m6 -[User] Iteration 5, Deleting machine m3 -[M controller] Iteration 5, Reconcile m4 -[MS controller] Iteration 5, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) -[User] Iteration 5, Deleting machine m2 [M controller] Iteration 5, Reconcile m2 -[M controller] - m2 finalizer removed +[M controller] Iteration 5, Reconcile m6 +[M controller] Iteration 5, Reconcile m6 +[MS controller] Iteration 5, Reconcile ms1, 5/5 replicas (m1,m2,m4,m5,m6) +[M controller] Iteration 6, Reconcile m5 +[M controller] - m5 finalizer removed +[MS controller] Iteration 6, Reconcile ms1, 5/5 replicas (m1,m2,m4,m6) +[MD controller] Iteration 6, Reconcile md +[MD controller] - Input to rollout planner + md, 5/6 replicas + - ms1, 4/5 replicas (m1,m2,m4,m6) + - ms2, 0/1 replicas () +[MD controller] - Result of rollout planner + md, 4/6 replicas + - ms1, 4/4 replicas (m1,m2,m4,m6) + - ms2, 0/1 replicas () +[M controller] Iteration 6, Reconcile m1 +[M controller] Iteration 6, Reconcile m1 +[M controller] Iteration 6, Reconcile m6 [M controller] Iteration 6, Reconcile m6 -[M controller] Iteration 6, Reconcile m3 -[M controller] - m3 finalizer removed -[MS controller] Iteration 6, Reconcile ms1, 6/6 replicas (m1,m4,m5,m6) -[M controller] Iteration 6, Reconcile m4 -[M controller] Iteration 6, Reconcile m4 -[MS controller] Iteration 6, Reconcile ms2, 0/0 replicas () [M controller] Iteration 6, Reconcile m4 -[M controller] Iteration 6, Reconcile m1 -[M controller] Iteration 6, Reconcile m5 -[MS controller] Iteration 7, Reconcile ms2, 0/0 replicas () -[M controller] Iteration 7, Reconcile m4 -[M controller] Iteration 7, Reconcile m6 +[M controller] Iteration 7, Reconcile m2 +[MS controller] Iteration 7, Reconcile ms1, 4/4 replicas (m1,m2,m4,m6) +[MS controller] Iteration 7, Reconcile ms2, 0/1 replicas () +[MS controller] - ms2 scale up to 1/1 replicas (m7 created) [M controller] Iteration 7, Reconcile m1 -[MD controller] Iteration 8, Reconcile md +[M controller] Iteration 7, Reconcile m6 +[M controller] Iteration 7, Reconcile m6 +[MD controller] Iteration 7, Reconcile md [MD controller] - Input to rollout planner - md, 6/6 replicas - - ms1, 4/6 replicas (m1,m4,m5,m6) - - ms2, 0/0 replicas () -[MD controller] - Result of rollout planner md, 4/6 replicas - - ms1, 4/4 replicas (m1,m4,m5,m6) - - ms2, 0/0 replicas () -[M controller] Iteration 8, Reconcile m1 -[MS controller] Iteration 8, Reconcile ms2, 0/0 replicas () -[M controller] Iteration 8, Reconcile m5 + - ms1, 4/4 replicas (m1,m2,m4,m6) + - ms2, 1/1 replicas (m7) +[MD controller] - Result of rollout planner + md, 5/6 replicas + - ms1, 4/4 replicas (m1,m2,m4,m6) + - ms2, 1/2 replicas (m7) +[M controller] Iteration 8, Reconcile m2 [M controller] Iteration 8, Reconcile m6 -[MS controller] Iteration 8, Reconcile ms1, 4/4 replicas (m1,m4,m5,m6) +[MS controller] Iteration 8, Reconcile ms1, 4/4 replicas (m1,m2,m4,m6) +[M controller] Iteration 8, Reconcile m4 +[M controller] Iteration 8, Reconcile m4 +[M controller] Iteration 8, Reconcile m7 +[M controller] Iteration 8, Reconcile m7 [M controller] Iteration 8, Reconcile m4 -[M controller] Iteration 9, Reconcile m6 -[MS controller] Iteration 9, Reconcile ms1, 4/4 replicas (m1,m4,m5,m6) -[M controller] Iteration 9, Reconcile m5 +[User] Iteration 8, Deleting machine m1 +[M controller] Iteration 8, Reconcile m7 +[MD controller] Iteration 8, Reconcile md +[MD controller] - Input to rollout planner + md, 5/6 replicas + - ms1, 4/4 replicas (m1,m2,m4,m6) + - ms2, 1/2 replicas (m7) +[MD controller] - Result of rollout planner + md, 5/6 replicas + - ms1, 4/4 replicas (m1,m2,m4,m6) + - ms2, 1/2 replicas (m7) [M controller] Iteration 9, Reconcile m1 +[M controller] - m1 finalizer removed [M controller] Iteration 9, Reconcile m4 -[M controller] Iteration 9, Reconcile m4 -[MS controller] Iteration 10, Reconcile ms1, 4/4 replicas (m1,m4,m5,m6) -[M controller] Iteration 10, Reconcile m5 -[M controller] Iteration 10, Reconcile m1 -[M controller] Iteration 10, Reconcile m4 -[M controller] Iteration 10, Reconcile m6 -[M controller] Iteration 11, Reconcile m1 -[MD controller] Iteration 11, Reconcile md +[M controller] Iteration 9, Reconcile m2 +[MD controller] Iteration 9, Reconcile md [MD controller] - Input to rollout planner - md, 4/6 replicas - - ms1, 4/4 replicas (m1,m4,m5,m6) - - ms2, 0/0 replicas () + md, 5/6 replicas + - ms1, 4/4 replicas (m2,m4,m6) + - ms2, 1/2 replicas (m7) [MD controller] - Result of rollout planner - md, 4/6 replicas - - ms1, 4/4 replicas (m1,m4,m5,m6) - - ms2, 0/2 replicas () -[M controller] Iteration 11, Reconcile m6 -[M controller] Iteration 11, Reconcile m5 -[MS controller] Iteration 11, Reconcile ms2, 0/2 replicas () -[MS controller] - ms2 scale up to 2/2 replicas (m7,m8 created) -[M controller] Iteration 11, Reconcile m6 -[User] Iteration 11, Deleting machine m6 -[M controller] Iteration 11, Reconcile m5 -[M controller] Iteration 12, Reconcile m7 -[M controller] Iteration 12, Reconcile m4 -[M controller] Iteration 12, Reconcile m8 -[M controller] Iteration 12, Reconcile m6 + md, 5/6 replicas + - ms1, 4/4 replicas (m2,m4,m6) + - ms2, 1/2 replicas (m7) +[MS controller] Iteration 9, Reconcile ms2, 1/2 replicas (m7) +[MS controller] - ms2 scale up to 2/2 replicas (m8 created) +[MS controller] Iteration 9, Reconcile ms1, 4/4 replicas (m2,m4,m6) +[M controller] Iteration 9, Reconcile m7 +[M controller] Iteration 10, Reconcile m7 +[User] Iteration 10, Deleting machine m6 +[M controller] Iteration 10, Reconcile m4 +[MS controller] Iteration 10, Reconcile ms1, 3/4 replicas (m2,m4,m6) +[MS controller] Iteration 10, Reconcile ms2, 2/2 replicas (m7,m8) +[MS controller] Iteration 10, Reconcile ms2, 2/2 replicas (m7,m8) +[M controller] Iteration 10, Reconcile m6 [M controller] - m6 finalizer removed -[MS controller] Iteration 12, Reconcile ms2, 2/2 replicas (m7,m8) -[MD controller] Iteration 12, Reconcile md +[MD controller] Iteration 10, Reconcile md [MD controller] - Input to rollout planner - md, 4/6 replicas - - ms1, 4/4 replicas (m1,m4,m5) + md, 5/6 replicas + - ms1, 3/4 replicas (m2,m4) - ms2, 2/2 replicas (m7,m8) [MD controller] - Result of rollout planner - md, 6/6 replicas - - ms1, 4/4 replicas (m1,m4,m5) + md, 5/6 replicas + - ms1, 3/3 replicas (m2,m4) - ms2, 2/2 replicas (m7,m8) -[M controller] Iteration 12, Reconcile m1 +[MS controller] Iteration 11, Reconcile ms2, 2/2 replicas (m7,m8) +[M controller] Iteration 11, Reconcile m8 +[MS controller] Iteration 11, Reconcile ms2, 2/2 replicas (m7,m8) +[M controller] Iteration 11, Reconcile m4 +[M controller] Iteration 11, Reconcile m2 +[M controller] Iteration 11, Reconcile m7 +[M controller] Iteration 11, Reconcile m7 +[M controller] Iteration 12, Reconcile m2 +[MS controller] Iteration 12, Reconcile ms2, 2/2 replicas (m7,m8) [M controller] Iteration 12, Reconcile m4 -[MS controller] Iteration 12, Reconcile ms1, 4/4 replicas (m1,m4,m5) -[M controller] Iteration 13, Reconcile m7 +[M controller] Iteration 12, Reconcile m7 +[MS controller] Iteration 12, Reconcile ms1, 3/3 replicas (m2,m4) +[M controller] Iteration 12, Reconcile m8 +[M controller] Iteration 12, Reconcile m7 +[M controller] Iteration 12, Reconcile m7 +[M controller] Iteration 13, Reconcile m8 +[MS controller] Iteration 13, Reconcile ms1, 2/3 replicas (m2,m4) +[M controller] Iteration 13, Reconcile m8 [M controller] Iteration 13, Reconcile m4 [MS controller] Iteration 13, Reconcile ms2, 2/2 replicas (m7,m8) -[M controller] Iteration 13, Reconcile m8 -[M controller] Iteration 13, Reconcile m7 -[User] Iteration 13, Deleting machine m4 -[M controller] Iteration 13, Reconcile m5 -[MD controller] Iteration 14, Reconcile md -[MD controller] - Input to rollout planner - md, 6/6 replicas - - ms1, 3/4 replicas (m1,m4,m5) - - ms2, 2/2 replicas (m7,m8) -[MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 3/3 replicas (m1,m4,m5) - - ms2, 2/2 replicas (m7,m8) -[M controller] Iteration 14, Reconcile m1 -[M controller] Iteration 14, Reconcile m8 -[MS controller] Iteration 14, Reconcile ms1, 3/3 replicas (m1,m4,m5) -[MS controller] Iteration 14, Reconcile ms2, 2/2 replicas (m7,m8) -[M controller] Iteration 14, Reconcile m4 -[M controller] - m4 finalizer removed -[M controller] Iteration 14, Reconcile m7 -[M controller] Iteration 15, Reconcile m8 -[MS controller] Iteration 15, Reconcile ms2, 2/2 replicas (m7,m8) -[MD controller] Iteration 15, Reconcile md +[MS controller] Iteration 13, Reconcile ms2, 2/2 replicas (m7,m8) +[M controller] Iteration 13, Reconcile m2 +[MD controller] Iteration 13, Reconcile md [MD controller] - Input to rollout planner md, 5/6 replicas - - ms1, 3/3 replicas (m1,m5) + - ms1, 2/3 replicas (m2,m4) - ms2, 2/2 replicas (m7,m8) [MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 3/3 replicas (m1,m5) + md, 4/6 replicas + - ms1, 2/2 replicas (m2,m4) - ms2, 2/3 replicas (m7,m8) -[M controller] Iteration 16, Reconcile m5 -[MS controller] Iteration 16, Reconcile ms1, 3/3 replicas (m1,m5) -[MS controller] Iteration 16, Reconcile ms2, 2/3 replicas (m7,m8) -[MS controller] - ms2 scale up to 3/3 replicas (m9 created) -[M controller] Iteration 16, Reconcile m7 -[MD controller] Iteration 16, Reconcile md -[MD controller] - Input to rollout planner - md, 5/6 replicas - - ms1, 2/3 replicas (m1,m5) - - ms2, 3/3 replicas (m7,m8,m9) -[MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 2/2 replicas (m1,m5) - - ms2, 3/3 replicas (m7,m8,m9) -[M controller] Iteration 16, Reconcile m1 -[MD controller] Iteration 16, Reconcile md -[MD controller] - Input to rollout planner - md, 5/6 replicas - - ms1, 2/2 replicas (m1,m5) - - ms2, 3/3 replicas (m7,m8,m9) -[MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 2/2 replicas (m1,m5) - - ms2, 3/4 replicas (m7,m8,m9) -[M controller] Iteration 17, Reconcile m9 -[M controller] Iteration 17, Reconcile m1 -[M controller] Iteration 17, Reconcile m8 -[M controller] Iteration 17, Reconcile m7 -[User] Iteration 17, Deleting machine m1 -[M controller] Iteration 17, Reconcile m8 -[M controller] Iteration 17, Reconcile m5 -[MS controller] Iteration 17, Reconcile ms2, 3/4 replicas (m7,m8,m9) -[MS controller] - ms2 scale up to 4/4 replicas (m10 created) -[MD controller] Iteration 17, Reconcile md +[M controller] Iteration 14, Reconcile m2 +[MD controller] Iteration 14, Reconcile md [MD controller] - Input to rollout planner - md, 5/6 replicas - - ms1, 2/2 replicas (m1,m5) - - ms2, 4/4 replicas (m7,m8,m9,m10) + md, 4/6 replicas + - ms1, 2/2 replicas (m2,m4) + - ms2, 2/3 replicas (m7,m8) [MD controller] - Result of rollout planner - md, 6/6 replicas - - ms1, 2/2 replicas (m1,m5) - - ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 18, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[M controller] Iteration 18, Reconcile m8 -[M controller] Iteration 18, Reconcile m9 -[MD controller] Iteration 18, Reconcile md + md, 4/6 replicas + - ms1, 2/2 replicas (m2,m4) + - ms2, 2/4 replicas (m7,m8) +[M controller] Iteration 14, Reconcile m8 +[M controller] Iteration 14, Reconcile m7 +[MS controller] Iteration 14, Reconcile ms2, 2/4 replicas (m7,m8) +[MS controller] - ms2 scale up to 4/4 replicas (m9,m10 created) +[M controller] Iteration 14, Reconcile m8 +[User] Iteration 14, Deleting machine m4 +[M controller] Iteration 14, Reconcile m7 +[M controller] Iteration 15, Reconcile m9 +[M controller] Iteration 15, Reconcile m4 +[M controller] - m4 finalizer removed +[M controller] Iteration 15, Reconcile m10 +[M controller] Iteration 15, Reconcile m8 +[MS controller] Iteration 15, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) +[MD controller] Iteration 15, Reconcile md [MD controller] - Input to rollout planner - md, 6/6 replicas - - ms1, 2/2 replicas (m1,m5) + md, 4/6 replicas + - ms1, 2/2 replicas (m2) - ms2, 4/4 replicas (m7,m8,m9,m10) [MD controller] - Result of rollout planner md, 6/6 replicas - - ms1, 2/2 replicas (m1,m5) + - ms1, 2/2 replicas (m2) - ms2, 4/4 replicas (m7,m8,m9,m10) -[M controller] Iteration 18, Reconcile m5 -[M controller] Iteration 18, Reconcile m7 -[M controller] Iteration 19, Reconcile m10 -[M controller] Iteration 19, Reconcile m8 -[M controller] Iteration 19, Reconcile m1 -[M controller] - m1 finalizer removed -[M controller] Iteration 19, Reconcile m5 -[M controller] Iteration 19, Reconcile m5 -[MS controller] Iteration 19, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 19, Reconcile ms1, 2/2 replicas (m5) -[MS controller] Iteration 19, Reconcile ms1, 1/2 replicas (m5) -[M controller] Iteration 19, Reconcile m5 -[M controller] Iteration 19, Reconcile m10 -[MD controller] Iteration 19, Reconcile md +[M controller] Iteration 15, Reconcile m2 +[MS controller] Iteration 15, Reconcile ms1, 2/2 replicas (m2) +[M controller] Iteration 16, Reconcile m9 +[M controller] Iteration 16, Reconcile m7 +[MS controller] Iteration 16, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) +[M controller] Iteration 16, Reconcile m10 +[M controller] Iteration 16, Reconcile m9 +[User] Iteration 16, Deleting machine m2 +[M controller] Iteration 16, Reconcile m8 +[MD controller] Iteration 17, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - - ms1, 1/2 replicas (m5) + - ms1, 1/2 replicas (m2) - ms2, 4/4 replicas (m7,m8,m9,m10) [MD controller] - Result of rollout planner md, 5/6 replicas - - ms1, 1/1 replicas (m5) + - ms1, 1/1 replicas (m2) - ms2, 4/4 replicas (m7,m8,m9,m10) -[M controller] Iteration 19, Reconcile m9 -[M controller] Iteration 20, Reconcile m7 -[M controller] Iteration 20, Reconcile m5 -[User] Iteration 20, Deleting machine m5 -[MS controller] Iteration 20, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 20, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[M controller] Iteration 20, Reconcile m5 -[M controller] - m5 finalizer removed -[MS controller] Iteration 20, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[M controller] Iteration 20, Reconcile m8 -[M controller] Iteration 21, Reconcile m8 -[M controller] Iteration 21, Reconcile m7 -[M controller] Iteration 21, Reconcile m7 -[MD controller] Iteration 21, Reconcile md +[M controller] Iteration 17, Reconcile m2 +[M controller] - m2 finalizer removed +[M controller] Iteration 17, Reconcile m10 +[MS controller] Iteration 17, Reconcile ms1, 1/1 replicas () +[MS controller] Iteration 17, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) +[M controller] Iteration 17, Reconcile m7 +[M controller] Iteration 17, Reconcile m9 +[M controller] Iteration 18, Reconcile m10 +[MS controller] Iteration 18, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) +[MD controller] Iteration 18, Reconcile md [MD controller] - Input to rollout planner md, 5/6 replicas - - ms1, 1/1 replicas () + - ms1, 0/1 replicas () - ms2, 4/4 replicas (m7,m8,m9,m10) [MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 1/1 replicas () + md, 4/6 replicas + - ms1, 0/0 replicas () - ms2, 4/5 replicas (m7,m8,m9,m10) -[M controller] Iteration 21, Reconcile m7 -[MS controller] Iteration 21, Reconcile ms1, 1/1 replicas () -[M controller] Iteration 21, Reconcile m10 -[MS controller] Iteration 21, Reconcile ms2, 4/5 replicas (m7,m8,m9,m10) +[M controller] Iteration 19, Reconcile m8 +[MS controller] Iteration 19, Reconcile ms1, 0/0 replicas () +[MS controller] Iteration 19, Reconcile ms2, 4/5 replicas (m7,m8,m9,m10) [MS controller] - ms2 scale up to 5/5 replicas (m11 created) -[M controller] Iteration 21, Reconcile m10 -[MS controller] Iteration 22, Reconcile ms2, 5/5 replicas (m7,m8,m9,m10,m11) -[M controller] Iteration 22, Reconcile m10 -[M controller] Iteration 22, Reconcile m11 -[MD controller] Iteration 22, Reconcile md +[M controller] Iteration 19, Reconcile m9 +[MD controller] Iteration 19, Reconcile md [MD controller] - Input to rollout planner - md, 5/6 replicas - - ms1, 0/1 replicas () + md, 4/6 replicas + - ms1, 0/0 replicas () - ms2, 5/5 replicas (m7,m8,m9,m10,m11) [MD controller] - Result of rollout planner md, 5/6 replicas - ms1, 0/0 replicas () - - ms2, 5/5 replicas (m7,m8,m9,m10,m11) -[M controller] Iteration 22, Reconcile m11 -[M controller] Iteration 22, Reconcile m8 -[M controller] Iteration 22, Reconcile m7 -[M controller] Iteration 22, Reconcile m7 -[M controller] Iteration 23, Reconcile m10 -[M controller] Iteration 23, Reconcile m9 -[MD controller] Iteration 23, Reconcile md + - ms2, 5/6 replicas (m7,m8,m9,m10,m11) +[M controller] Iteration 19, Reconcile m7 +[MD controller] Iteration 19, Reconcile md [MD controller] - Input to rollout planner md, 5/6 replicas - ms1, 0/0 replicas () - - ms2, 5/5 replicas (m7,m8,m9,m10,m11) + - ms2, 5/6 replicas (m7,m8,m9,m10,m11) [MD controller] - Result of rollout planner md, 5/6 replicas - ms1, 0/0 replicas () - ms2, 5/6 replicas (m7,m8,m9,m10,m11) -[MS controller] Iteration 23, Reconcile ms2, 5/6 replicas (m7,m8,m9,m10,m11) +[M controller] Iteration 20, Reconcile m11 +[M controller] Iteration 20, Reconcile m7 +[M controller] Iteration 20, Reconcile m10 +[M controller] Iteration 20, Reconcile m9 +[M controller] Iteration 20, Reconcile m10 +[M controller] Iteration 20, Reconcile m8 +[MS controller] Iteration 20, Reconcile ms2, 5/6 replicas (m7,m8,m9,m10,m11) [MS controller] - ms2 scale up to 6/6 replicas (m12 created) -[MS controller] Iteration 23, Reconcile ms1, 0/0 replicas () -[MS controller] Iteration 23, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 23, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[M controller] Iteration 23, Reconcile m10 -[M controller] Iteration 23, Reconcile m7 -[MS controller] Iteration 24, Reconcile ms1, 0/0 replicas () -[M controller] Iteration 24, Reconcile m10 -[M controller] Iteration 24, Reconcile m7 -[M controller] Iteration 24, Reconcile m8 -[MD controller] Iteration 24, Reconcile md +[MD controller] Iteration 20, Reconcile md [MD controller] - Input to rollout planner md, 5/6 replicas - ms1, 0/0 replicas () @@ -319,8 +300,6 @@ md, 6/6 replicas - ms1, 0/0 replicas () - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[M controller] Iteration 24, Reconcile m9 -[M controller] Iteration 24, Reconcile m12 [Test] Final state md, 6/6 replicas - ms1, 0/0 replicas () diff --git a/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2.test.log.golden b/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2.test.log.golden index 3da216fc3550..f3a61b8fb2f0 100644 --- a/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/ondelete/6 replicas, maxuserunavailable 2.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, onDeleteStrategy [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1, random(0).test.log.golden index 5a971c842f9d..abef26c32957 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1, random(0).test.log.golden @@ -3,23 +3,19 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, MaxSurge=0, MaxUnavailable=1, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 3/3 replicas (m1,m2,m3) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/2 replicas (m1,m2,m3) - ms2, 0/0 replicas () -[MS controller] Iteration 2, Reconcile ms1, 3/2 replicas (m1,m2,m3) +[MS controller] Iteration 1, Reconcile ms1, 3/2 replicas (m1,m2,m3) [MS controller] - ms1 scale down to 2/2 replicas (m1 deleted) -[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m2,m3) -[MD controller] Iteration 3, Reconcile md +[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 2/2 replicas (m2,m3) @@ -28,6 +24,17 @@ md, 2/3 replicas - ms1, 2/2 replicas (m2,m3) - ms2, 0/1 replicas () +[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m2,m3) +[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m2,m3) +[MD controller] Iteration 3, Reconcile md +[MD controller] - Input to rollout planner + md, 2/3 replicas + - ms1, 2/2 replicas (m2,m3) + - ms2, 0/1 replicas () +[MD controller] - Result of rollout planner + md, 2/3 replicas + - ms1, 2/2 replicas (m2,m3) + - ms2, 0/1 replicas () [MS controller] Iteration 3, Reconcile ms2, 0/1 replicas () [MS controller] - ms2 scale up to 1/1 replicas (m4 created) [MD controller] Iteration 4, Reconcile md diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1.test.log.golden index dc291b7404d9..cc83eb496412 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 0, maxunavailable 1.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, MaxSurge=0, MaxUnavailable=1 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/2 replicas (m1,m2,m3) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0, random(0).test.log.golden index 5045410bc79a..3a78d5351d59 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0, random(0).test.log.golden @@ -3,33 +3,19 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, MaxSurge=1, MaxUnavailable=0, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 3/3 replicas (m1,m2,m3) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () -[MD controller] - Result of rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/1 replicas () -[MS controller] Iteration 2, Reconcile ms1, 3/3 replicas (m1,m2,m3) -[MS controller] Iteration 2, Reconcile ms1, 3/3 replicas (m1,m2,m3) -[MD controller] Iteration 3, Reconcile md -[MD controller] - Input to rollout planner - md, 3/3 replicas - - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/1 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - ms2, 0/1 replicas () -[MS controller] Iteration 3, Reconcile ms2, 0/1 replicas () +[MS controller] Iteration 1, Reconcile ms1, 3/3 replicas (m1,m2,m3) +[MS controller] Iteration 1, Reconcile ms2, 0/1 replicas () [MS controller] - ms2 scale up to 1/1 replicas (m4 created) -[MD controller] Iteration 4, Reconcile md +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) @@ -38,11 +24,10 @@ md, 4/3 replicas - ms1, 3/2 replicas (m1,m2,m3) - ms2, 1/1 replicas (m4) -[MS controller] Iteration 4, Reconcile ms1, 3/2 replicas (m1,m2,m3) +[MS controller] Iteration 2, Reconcile ms1, 3/2 replicas (m1,m2,m3) [MS controller] - ms1 scale down to 2/2 replicas (m1 deleted) -[MS controller] Iteration 4, Reconcile ms2, 1/1 replicas (m4) -[MS controller] Iteration 5, Reconcile ms2, 1/1 replicas (m4) -[MD controller] Iteration 5, Reconcile md +[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m2,m3) +[MD controller] Iteration 3, Reconcile md [MD controller] - Input to rollout planner md, 4/3 replicas - ms1, 2/2 replicas (m2,m3) @@ -51,9 +36,9 @@ md, 3/3 replicas - ms1, 2/2 replicas (m2,m3) - ms2, 1/2 replicas (m4) -[MS controller] Iteration 6, Reconcile ms2, 1/2 replicas (m4) +[MS controller] Iteration 3, Reconcile ms2, 1/2 replicas (m4) [MS controller] - ms2 scale up to 2/2 replicas (m5 created) -[MD controller] Iteration 7, Reconcile md +[MD controller] Iteration 4, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 2/2 replicas (m2,m3) @@ -62,21 +47,11 @@ md, 4/3 replicas - ms1, 2/1 replicas (m2,m3) - ms2, 2/2 replicas (m4,m5) -[MS controller] Iteration 7, Reconcile ms2, 2/2 replicas (m4,m5) -[MD controller] Iteration 8, Reconcile md -[MD controller] - Input to rollout planner - md, 4/3 replicas - - ms1, 2/1 replicas (m2,m3) - - ms2, 2/2 replicas (m4,m5) -[MD controller] - Result of rollout planner - md, 4/3 replicas - - ms1, 2/1 replicas (m2,m3) - - ms2, 2/2 replicas (m4,m5) -[MS controller] Iteration 8, Reconcile ms1, 2/1 replicas (m2,m3) +[MS controller] Iteration 4, Reconcile ms1, 2/1 replicas (m2,m3) [MS controller] - ms1 scale down to 1/1 replicas (m2 deleted) -[MS controller] Iteration 9, Reconcile ms2, 2/2 replicas (m4,m5) -[MS controller] Iteration 9, Reconcile ms1, 1/1 replicas (m3) -[MD controller] Iteration 9, Reconcile md +[MS controller] Iteration 4, Reconcile ms2, 2/2 replicas (m4,m5) +[MS controller] Iteration 5, Reconcile ms2, 2/2 replicas (m4,m5) +[MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner md, 4/3 replicas - ms1, 1/1 replicas (m3) @@ -85,11 +60,9 @@ md, 3/3 replicas - ms1, 1/1 replicas (m3) - ms2, 2/3 replicas (m4,m5) -[MS controller] Iteration 10, Reconcile ms2, 2/3 replicas (m4,m5) +[MS controller] Iteration 6, Reconcile ms2, 2/3 replicas (m4,m5) [MS controller] - ms2 scale up to 3/3 replicas (m6 created) -[MS controller] Iteration 10, Reconcile ms1, 1/1 replicas (m3) -[MS controller] Iteration 11, Reconcile ms2, 3/3 replicas (m4,m5,m6) -[MD controller] Iteration 11, Reconcile md +[MD controller] Iteration 7, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 1/1 replicas (m3) @@ -98,16 +71,8 @@ md, 4/3 replicas - ms1, 1/0 replicas (m3) - ms2, 3/3 replicas (m4,m5,m6) -[MD controller] Iteration 12, Reconcile md -[MD controller] - Input to rollout planner - md, 4/3 replicas - - ms1, 1/0 replicas (m3) - - ms2, 3/3 replicas (m4,m5,m6) -[MD controller] - Result of rollout planner - md, 4/3 replicas - - ms1, 1/0 replicas (m3) - - ms2, 3/3 replicas (m4,m5,m6) -[MD controller] Iteration 14, Reconcile md +[MS controller] Iteration 7, Reconcile ms2, 3/3 replicas (m4,m5,m6) +[MD controller] Iteration 8, Reconcile md [MD controller] - Input to rollout planner md, 4/3 replicas - ms1, 1/0 replicas (m3) @@ -116,12 +81,11 @@ md, 4/3 replicas - ms1, 1/0 replicas (m3) - ms2, 3/3 replicas (m4,m5,m6) -[MS controller] Iteration 15, Reconcile ms2, 3/3 replicas (m4,m5,m6) -[MS controller] Iteration 16, Reconcile ms2, 3/3 replicas (m4,m5,m6) -[MS controller] Iteration 16, Reconcile ms2, 3/3 replicas (m4,m5,m6) -[MS controller] Iteration 16, Reconcile ms1, 1/0 replicas (m3) +[MS controller] Iteration 8, Reconcile ms1, 1/0 replicas (m3) [MS controller] - ms1 scale down to 0/0 replicas (m3 deleted) -[MD controller] Iteration 16, Reconcile md +[MS controller] Iteration 9, Reconcile ms2, 3/3 replicas (m4,m5,m6) +[MS controller] Iteration 9, Reconcile ms1, 0/0 replicas () +[MD controller] Iteration 9, Reconcile md [MD controller] - Input to rollout planner md, 4/3 replicas - ms1, 0/0 replicas () diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0.test.log.golden index 4d22fbea8db5..61fff85cb47f 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 3 replicas, maxsurge 1, maxunavailable 0.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [Test] Rollout 3 replicas, MaxSurge=1, MaxUnavailable=0 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 3/3 replicas - ms1, 3/3 replicas (m1,m2,m3) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10, random(0).test.log.golden index 87e5e2c9323a..c84284cebd2c 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10, random(0).test.log.golden @@ -3,23 +3,19 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=0, MaxUnavailable=10, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) - ms2, 0/0 replicas () -[MS controller] Iteration 2, Reconcile ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) +[MS controller] Iteration 1, Reconcile ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) [MS controller] - ms1 scale down to 0/0 replicas (m1,m2,m3,m4,m5,m6 deleted) -[MS controller] Iteration 2, Reconcile ms1, 0/0 replicas () -[MD controller] Iteration 3, Reconcile md +[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 0/0 replicas () @@ -28,6 +24,17 @@ md, 0/6 replicas - ms1, 0/0 replicas () - ms2, 0/6 replicas () +[MS controller] Iteration 2, Reconcile ms1, 0/0 replicas () +[MS controller] Iteration 2, Reconcile ms1, 0/0 replicas () +[MD controller] Iteration 3, Reconcile md +[MD controller] - Input to rollout planner + md, 0/6 replicas + - ms1, 0/0 replicas () + - ms2, 0/6 replicas () +[MD controller] - Result of rollout planner + md, 0/6 replicas + - ms1, 0/0 replicas () + - ms2, 0/6 replicas () [MS controller] Iteration 3, Reconcile ms2, 0/6 replicas () [MS controller] - ms2 scale up to 6/6 replicas (m7,m8,m9,m10,m11,m12 created) [MD controller] Iteration 4, Reconcile md diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10.test.log.golden index 9f557ca4cb64..a61c15cf58b8 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 0, maxunavailable 10.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=0, MaxUnavailable=10 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3, random(0).test.log.golden index e0ed895585e0..f029f40374c9 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3, random(0).test.log.golden @@ -3,60 +3,69 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=1, MaxUnavailable=3, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/3 replicas (m1,m2,m3,m4,m5,m6) - ms2, 0/1 replicas () -[MS controller] Iteration 2, Reconcile ms1, 6/3 replicas (m1,m2,m3,m4,m5,m6) +[MS controller] Iteration 1, Reconcile ms1, 6/3 replicas (m1,m2,m3,m4,m5,m6) [MS controller] - ms1 scale down to 3/3 replicas (m1,m2,m3 deleted) -[MS controller] Iteration 2, Reconcile ms1, 3/3 replicas (m4,m5,m6) -[MD controller] Iteration 3, Reconcile md +[MS controller] Iteration 1, Reconcile ms2, 0/1 replicas () +[MS controller] - ms2 scale up to 1/1 replicas (m7 created) +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 3/3 replicas (m4,m5,m6) - - ms2, 0/1 replicas () + - ms2, 1/1 replicas (m7) +[MD controller] - Result of rollout planner + md, 4/6 replicas + - ms1, 3/2 replicas (m4,m5,m6) + - ms2, 1/4 replicas (m7) +[MS controller] Iteration 2, Reconcile ms1, 3/2 replicas (m4,m5,m6) +[MS controller] - ms1 scale down to 2/2 replicas (m4 deleted) +[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m5,m6) +[MD controller] Iteration 3, Reconcile md +[MD controller] - Input to rollout planner + md, 4/6 replicas + - ms1, 2/2 replicas (m5,m6) + - ms2, 1/4 replicas (m7) [MD controller] - Result of rollout planner md, 3/6 replicas - - ms1, 3/3 replicas (m4,m5,m6) - - ms2, 0/4 replicas () -[MS controller] Iteration 3, Reconcile ms2, 0/4 replicas () -[MS controller] - ms2 scale up to 4/4 replicas (m7,m8,m9,m10 created) + - ms1, 2/2 replicas (m5,m6) + - ms2, 1/5 replicas (m7) +[MS controller] Iteration 3, Reconcile ms2, 1/5 replicas (m7) +[MS controller] - ms2 scale up to 5/5 replicas (m8,m9,m10,m11 created) [MD controller] Iteration 4, Reconcile md [MD controller] - Input to rollout planner md, 3/6 replicas - - ms1, 3/3 replicas (m4,m5,m6) - - ms2, 4/4 replicas (m7,m8,m9,m10) + - ms1, 2/2 replicas (m5,m6) + - ms2, 5/5 replicas (m7,m8,m9,m10,m11) [MD controller] - Result of rollout planner md, 7/6 replicas - - ms1, 3/0 replicas (m4,m5,m6) - - ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 4, Reconcile ms1, 3/0 replicas (m4,m5,m6) -[MS controller] - ms1 scale down to 0/0 replicas (m4,m5,m6 deleted) -[MS controller] Iteration 4, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 5, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) + - ms1, 2/0 replicas (m5,m6) + - ms2, 5/5 replicas (m7,m8,m9,m10,m11) +[MS controller] Iteration 4, Reconcile ms1, 2/0 replicas (m5,m6) +[MS controller] - ms1 scale down to 0/0 replicas (m5,m6 deleted) +[MS controller] Iteration 4, Reconcile ms2, 5/5 replicas (m7,m8,m9,m10,m11) +[MS controller] Iteration 5, Reconcile ms2, 5/5 replicas (m7,m8,m9,m10,m11) [MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner md, 7/6 replicas - ms1, 0/0 replicas () - - ms2, 4/4 replicas (m7,m8,m9,m10) + - ms2, 5/5 replicas (m7,m8,m9,m10,m11) [MD controller] - Result of rollout planner - md, 4/6 replicas + md, 5/6 replicas - ms1, 0/0 replicas () - - ms2, 4/6 replicas (m7,m8,m9,m10) -[MS controller] Iteration 6, Reconcile ms2, 4/6 replicas (m7,m8,m9,m10) -[MS controller] - ms2 scale up to 6/6 replicas (m11,m12 created) + - ms2, 5/6 replicas (m7,m8,m9,m10,m11) +[MS controller] Iteration 6, Reconcile ms2, 5/6 replicas (m7,m8,m9,m10,m11) +[MS controller] - ms2 scale up to 6/6 replicas (m12 created) [MD controller] Iteration 7, Reconcile md [MD controller] - Input to rollout planner - md, 4/6 replicas + md, 5/6 replicas - ms1, 0/0 replicas () - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) [MD controller] - Result of rollout planner diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3.test.log.golden index 41f177ffa99c..d91c38e18bf5 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 1, maxunavailable 3.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=1, MaxUnavailable=3 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/3 replicas (m1,m2,m3,m4,m5,m6) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0, random(0).test.log.golden index 067f8cb229e8..670b8ea588be 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0, random(0).test.log.golden @@ -3,33 +3,19 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=10, MaxUnavailable=0, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () -[MD controller] - Result of rollout planner - md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/6 replicas () -[MS controller] Iteration 2, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) -[MS controller] Iteration 2, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) -[MD controller] Iteration 3, Reconcile md -[MD controller] - Input to rollout planner - md, 6/6 replicas - - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/6 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - ms2, 0/6 replicas () -[MS controller] Iteration 3, Reconcile ms2, 0/6 replicas () +[MS controller] Iteration 1, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) +[MS controller] Iteration 1, Reconcile ms2, 0/6 replicas () [MS controller] - ms2 scale up to 6/6 replicas (m7,m8,m9,m10,m11,m12 created) -[MD controller] Iteration 4, Reconcile md +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) @@ -38,11 +24,10 @@ md, 12/6 replicas - ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 4, Reconcile ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) +[MS controller] Iteration 2, Reconcile ms1, 6/0 replicas (m1,m2,m3,m4,m5,m6) [MS controller] - ms1 scale down to 0/0 replicas (m1,m2,m3,m4,m5,m6 deleted) -[MS controller] Iteration 4, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 5, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MD controller] Iteration 5, Reconcile md +[MS controller] Iteration 2, Reconcile ms1, 0/0 replicas () +[MD controller] Iteration 3, Reconcile md [MD controller] - Input to rollout planner md, 12/6 replicas - ms1, 0/0 replicas () @@ -51,6 +36,7 @@ md, 6/6 replicas - ms1, 0/0 replicas () - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) +[MS controller] Iteration 3, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) [Test] Final state md, 6/6 replicas - ms1, 0/0 replicas () diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0.test.log.golden index 655776816f16..0354b9ce6b02 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 10, maxunavailable 0.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=10, MaxUnavailable=0 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1, random(0).test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1, random(0).test.log.golden index d2560cd9f190..fd8a3153fdbc 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1, random(0).test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1, random(0).test.log.golden @@ -3,83 +3,58 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=3, MaxUnavailable=1, random(0) -[MS controller] Iteration 1, Reconcile ms2, 0/0 replicas () -[MS controller] Iteration 1, Reconcile ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/5 replicas (m1,m2,m3,m4,m5,m6) - ms2, 0/3 replicas () -[MS controller] Iteration 2, Reconcile ms1, 6/5 replicas (m1,m2,m3,m4,m5,m6) +[MS controller] Iteration 1, Reconcile ms1, 6/5 replicas (m1,m2,m3,m4,m5,m6) [MS controller] - ms1 scale down to 5/5 replicas (m1 deleted) -[MS controller] Iteration 2, Reconcile ms1, 5/5 replicas (m2,m3,m4,m5,m6) -[MD controller] Iteration 3, Reconcile md +[MS controller] Iteration 1, Reconcile ms2, 0/3 replicas () +[MS controller] - ms2 scale up to 3/3 replicas (m7,m8,m9 created) +[MD controller] Iteration 2, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 5/5 replicas (m2,m3,m4,m5,m6) - - ms2, 0/3 replicas () -[MD controller] - Result of rollout planner - md, 5/6 replicas - - ms1, 5/5 replicas (m2,m3,m4,m5,m6) - - ms2, 0/4 replicas () -[MS controller] Iteration 3, Reconcile ms2, 0/4 replicas () -[MS controller] - ms2 scale up to 4/4 replicas (m7,m8,m9,m10 created) -[MD controller] Iteration 4, Reconcile md -[MD controller] - Input to rollout planner - md, 5/6 replicas - - ms1, 5/5 replicas (m2,m3,m4,m5,m6) - - ms2, 4/4 replicas (m7,m8,m9,m10) + - ms2, 3/3 replicas (m7,m8,m9) [MD controller] - Result of rollout planner - md, 9/6 replicas - - ms1, 5/1 replicas (m2,m3,m4,m5,m6) - - ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 4, Reconcile ms1, 5/1 replicas (m2,m3,m4,m5,m6) -[MS controller] - ms1 scale down to 1/1 replicas (m2,m3,m4,m5 deleted) -[MS controller] Iteration 4, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[MS controller] Iteration 5, Reconcile ms2, 4/4 replicas (m7,m8,m9,m10) -[MD controller] Iteration 5, Reconcile md + md, 8/6 replicas + - ms1, 5/2 replicas (m2,m3,m4,m5,m6) + - ms2, 3/4 replicas (m7,m8,m9) +[MS controller] Iteration 2, Reconcile ms1, 5/2 replicas (m2,m3,m4,m5,m6) +[MS controller] - ms1 scale down to 2/2 replicas (m2,m3,m4 deleted) +[MS controller] Iteration 2, Reconcile ms1, 2/2 replicas (m5,m6) +[MD controller] Iteration 3, Reconcile md [MD controller] - Input to rollout planner - md, 9/6 replicas - - ms1, 1/1 replicas (m6) - - ms2, 4/4 replicas (m7,m8,m9,m10) + md, 8/6 replicas + - ms1, 2/2 replicas (m5,m6) + - ms2, 3/4 replicas (m7,m8,m9) [MD controller] - Result of rollout planner md, 5/6 replicas - - ms1, 1/1 replicas (m6) - - ms2, 4/6 replicas (m7,m8,m9,m10) -[MS controller] Iteration 6, Reconcile ms2, 4/6 replicas (m7,m8,m9,m10) -[MS controller] - ms2 scale up to 6/6 replicas (m11,m12 created) -[MD controller] Iteration 7, Reconcile md + - ms1, 2/2 replicas (m5,m6) + - ms2, 3/6 replicas (m7,m8,m9) +[MS controller] Iteration 3, Reconcile ms2, 3/6 replicas (m7,m8,m9) +[MS controller] - ms2 scale up to 6/6 replicas (m10,m11,m12 created) +[MD controller] Iteration 4, Reconcile md [MD controller] - Input to rollout planner md, 5/6 replicas - - ms1, 1/1 replicas (m6) - - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MD controller] - Result of rollout planner - md, 7/6 replicas - - ms1, 1/0 replicas (m6) - - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 7, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MD controller] Iteration 8, Reconcile md -[MD controller] - Input to rollout planner - md, 7/6 replicas - - ms1, 1/0 replicas (m6) + - ms1, 2/2 replicas (m5,m6) - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) [MD controller] - Result of rollout planner - md, 7/6 replicas - - ms1, 1/0 replicas (m6) + md, 8/6 replicas + - ms1, 2/0 replicas (m5,m6) - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 8, Reconcile ms1, 1/0 replicas (m6) -[MS controller] - ms1 scale down to 0/0 replicas (m6 deleted) -[MS controller] Iteration 9, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) -[MS controller] Iteration 9, Reconcile ms1, 0/0 replicas () -[MD controller] Iteration 9, Reconcile md +[MS controller] Iteration 4, Reconcile ms1, 2/0 replicas (m5,m6) +[MS controller] - ms1 scale down to 0/0 replicas (m5,m6 deleted) +[MS controller] Iteration 4, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) +[MS controller] Iteration 5, Reconcile ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) +[MD controller] Iteration 5, Reconcile md [MD controller] - Input to rollout planner - md, 7/6 replicas + md, 8/6 replicas - ms1, 0/0 replicas () - ms2, 6/6 replicas (m7,m8,m9,m10,m11,m12) [MD controller] - Result of rollout planner diff --git a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1.test.log.golden b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1.test.log.golden index 612d601a0737..fc741a5916ca 100644 --- a/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1.test.log.golden +++ b/internal/controllers/machinedeployment/testdata/rollingupdate/regular rollout, 6 replicas, maxsurge 3, maxunavailable 1.test.log.golden @@ -3,13 +3,11 @@ [Test] Initial state md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [Test] Rollout 6 replicas, MaxSurge=3, MaxUnavailable=1 [MD controller] Iteration 1, Reconcile md [MD controller] - Input to rollout planner md, 6/6 replicas - ms1, 6/6 replicas (m1,m2,m3,m4,m5,m6) - - ms2, 0/0 replicas () [MD controller] - Result of rollout planner md, 6/6 replicas - ms1, 6/5 replicas (m1,m2,m3,m4,m5,m6) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 45a6462f5087..7352142a81b6 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -630,9 +630,6 @@ func newMachineUpToDateCondition(s *scope) *metav1.Condition { if !s.owningMachineDeployment.Spec.Rollout.After.IsZero() { if s.owningMachineDeployment.Spec.Rollout.After.Time.Before(s.reconciliationTime) && !s.machineSet.CreationTimestamp.After(s.owningMachineDeployment.Spec.Rollout.After.Time) { upToDate = false - if notUpToDateResult == nil { - notUpToDateResult = &mdutil.NotUpToDateResult{} - } notUpToDateResult.ConditionMessages = append(notUpToDateResult.ConditionMessages, "MachineDeployment spec.rolloutAfter expired") } } @@ -676,7 +673,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, s *scope) (ctrl.Result, e diff *= -1 log.Info(fmt.Sprintf("MachineSet is scaling up to %d replicas by creating %d machines", *(ms.Spec.Replicas), diff), "replicas", *(ms.Spec.Replicas), "machineCount", len(machines)) if ms.Annotations != nil { - if _, ok := ms.Annotations[clusterv1.DisableMachineCreateAnnotation]; ok { + if value, ok := ms.Annotations[clusterv1.DisableMachineCreateAnnotation]; ok && value == "true" { log.Info("Automatic creation of new machines disabled for machine set") return ctrl.Result{}, nil } diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index a02c4caf9e5e..252fb03d3c78 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -335,20 +335,27 @@ func assertClusterObjects(ctx context.Context, clusterProxy framework.ClusterPro clusterClassObjects := getClusterClassObjects(ctx, g, clusterProxy, clusterClass) // InfrastructureCluster + By("Checking InfrastructureCluster object has the right labels, annotations and selectors") assertInfrastructureCluster(g, clusterClassObjects, clusterObjects, cluster, clusterClass) // ControlPlane controlPlaneContractVersion, err := contract.GetContractVersionForVersion(ctx, clusterProxy.GetClient(), clusterObjects.ControlPlane.GroupVersionKind().GroupKind(), clusterObjects.ControlPlane.GroupVersionKind().Version) g.Expect(err).ToNot(HaveOccurred()) + By("Checking ControlPlane object has the right labels, annotations and selectors") assertControlPlane(g, clusterClassObjects, clusterObjects, cluster, clusterClass) + By("Checking ControlPlane machines objects have the right labels, annotations and selectors") assertControlPlaneMachines(g, clusterObjects, cluster, controlPlaneContractVersion, filterMetadataBeforeValidation) // MachineDeployments + By("Checking MachineDeployments objects have the right labels, annotations and selectors") assertMachineDeployments(g, clusterClassObjects, clusterObjects, cluster, clusterClass) + By("Checking MachineSets objects have the right labels, annotations and selectors") assertMachineSets(g, clusterObjects, cluster) + By("Checking MachineSets machines objects have the right labels, annotations and selectors") assertMachineSetsMachines(g, clusterObjects, cluster, filterMetadataBeforeValidation) // MachinePools + By("Checking MachinePools objects have the right labels, annotations and selectors") assertMachinePools(g, clusterClassObjects, clusterObjects, cluster, clusterClass) By("All cluster objects have the right labels, annotations and selectors") @@ -809,7 +816,11 @@ func assertMachineSets(g Gomega, clusterObjects clusterObjects, cluster *cluster machineSet.Annotations, ).without(g, clusterv1.DesiredReplicasAnnotation, clusterv1.MaxReplicasAnnotation, clusterv1.RevisionAnnotation), union( - machineDeployment.Annotations, + union(machineDeployment.Annotations, + map[string]string{ + clusterv1.DisableMachineCreateAnnotation: "false", + }, + ), ).without(g, clusterv1.RevisionAnnotation), ) // MachineDeployment MachineSet.spec.selector