Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions api/core/v1beta1/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,10 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error {
// Recover other values.
if ok {
dst.Spec.MinReadySeconds = restored.Spec.MinReadySeconds
// Restore the phase, this also means that any client using v1beta1 during a round-trip
// won't be able to write the Phase field. But that's okay as the only client writing the Phase
// field should be the Machine controller.
dst.Status.Phase = restored.Status.Phase
}

return nil
Expand Down Expand Up @@ -1653,6 +1657,13 @@ func Convert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in *clusterv1.Machin
if err := autoConvert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in, out, s); err != nil {
return err
}

// Convert v1beta2 Updating phase to v1beta1 Running as Updating did not exist in v1beta1.
// We don't have to support a round-trip as only the core CAPI controller should write the Phase field.
if out.Phase == "Updating" {
out.Phase = "Running"
}

if !reflect.DeepEqual(in.LastUpdated, metav1.Time{}) {
out.LastUpdated = ptr.To(in.LastUpdated)
}
Expand Down
3 changes: 3 additions & 0 deletions api/core/v1beta1/conversion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,9 @@ func hubMachineSpec(in *clusterv1.MachineSpec, c randfill.Continue) {

func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) {
c.FillNoCustom(in)

in.Phase = []string{"Updating", "Running"}[c.Intn(2)]

// Drop empty structs with only omit empty fields.
if in.Deprecated != nil {
if in.Deprecated.V1Beta1 == nil || reflect.DeepEqual(in.Deprecated.V1Beta1, &clusterv1.MachineV1Beta1DeprecatedStatus{}) {
Expand Down
4 changes: 4 additions & 0 deletions api/core/v1beta2/machine_phase_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ const (
// become a Kubernetes Node in a Ready state.
MachinePhaseRunning = MachinePhase("Running")

// MachinePhaseUpdating is the Machine state when the Machine
// is updating.
MachinePhaseUpdating = MachinePhase("Updating")

// MachinePhaseDeleting is the Machine state when a delete
// request has been sent to the API Server,
// but its infrastructure has not yet been fully deleted.
Expand Down
3 changes: 2 additions & 1 deletion api/core/v1beta2/machine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ type MachineStatus struct {

// phase represents the current phase of machine actuation.
// +optional
// +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Deleting;Deleted;Failed;Unknown
// +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Updating;Deleting;Deleted;Failed;Unknown
Phase string `json:"phase,omitempty"`

// certificatesExpiryDate is the expiry date of the machine certificates.
Expand Down Expand Up @@ -728,6 +728,7 @@ func (m *MachineStatus) GetTypedPhase() MachinePhase {
MachinePhaseProvisioning,
MachinePhaseProvisioned,
MachinePhaseRunning,
MachinePhaseUpdating,
MachinePhaseDeleting,
MachinePhaseDeleted,
MachinePhaseFailed:
Expand Down
1 change: 1 addition & 0 deletions config/crd/bases/cluster.x-k8s.io_machines.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions internal/controllers/machine/machine_controller_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,10 @@ func setMachinePhaseAndLastUpdated(_ context.Context, m *clusterv1.Machine) {
m.Status.SetTypedPhase(clusterv1.MachinePhaseRunning)
}

if conditions.IsTrue(m, clusterv1.MachineUpdatingCondition) {
m.Status.SetTypedPhase(clusterv1.MachinePhaseUpdating)
}

// Set the phase to "deleting" if the deletion timestamp is set.
if !m.DeletionTimestamp.IsZero() {
m.Status.SetTypedPhase(clusterv1.MachinePhaseDeleting)
Expand Down
102 changes: 102 additions & 0 deletions internal/controllers/machine/machine_controller_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,16 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1"
"sigs.k8s.io/cluster-api/util/kubeconfig"
Expand Down Expand Up @@ -2444,6 +2448,104 @@ func TestReconcileMachinePhases(t *testing.T) {
}, 10*time.Second).Should(BeTrue())
})

t.Run("Should set `Updating` when Machine is in-place updating", func(t *testing.T) {
utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.InPlaceUpdates, true)

g := NewWithT(t)

ns, err := env.CreateNamespace(ctx, "test-reconcile-machine-phases")
g.Expect(err).ToNot(HaveOccurred())
defer func() {
g.Expect(env.Cleanup(ctx, ns)).To(Succeed())
}()

nodeProviderID := fmt.Sprintf("test://%s", util.RandomString(6))

cluster := defaultCluster.DeepCopy()
cluster.Namespace = ns.Name

bootstrapConfig := defaultBootstrap.DeepCopy()
bootstrapConfig.SetNamespace(ns.Name)
infraMachine := defaultInfra.DeepCopy()
infraMachine.SetNamespace(ns.Name)
g.Expect(unstructured.SetNestedField(infraMachine.Object, nodeProviderID, "spec", "providerID")).To(Succeed())
machine := defaultMachine.DeepCopy()
machine.Namespace = ns.Name

// Create Node.
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "machine-test-node-",
},
Spec: corev1.NodeSpec{ProviderID: nodeProviderID},
}
g.Expect(env.Create(ctx, node)).To(Succeed())
defer func() {
g.Expect(env.Cleanup(ctx, node)).To(Succeed())
}()

g.Expect(env.Create(ctx, cluster)).To(Succeed())
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true)
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
// We have to subtract 2 seconds, because .status.lastUpdated does not contain milliseconds.
preUpdate := time.Now().Add(-2 * time.Second)
// Create and wait on machine to make sure caches sync and reconciliation triggers.
g.Expect(env.CreateAndWait(ctx, machine)).To(Succeed())

modifiedMachine := machine.DeepCopy()
// Set NodeRef.
machine.Status.NodeRef = clusterv1.MachineNodeReference{Name: node.Name}
g.Expect(env.Status().Patch(ctx, modifiedMachine, client.MergeFrom(machine))).To(Succeed())

// Set bootstrap ready.
modifiedBootstrapConfig := bootstrapConfig.DeepCopy()
g.Expect(unstructured.SetNestedField(modifiedBootstrapConfig.Object, true, "status", "initialization", "dataSecretCreated")).To(Succeed())
g.Expect(unstructured.SetNestedField(modifiedBootstrapConfig.Object, "secret-data", "status", "dataSecretName")).To(Succeed())
g.Expect(env.Status().Patch(ctx, modifiedBootstrapConfig, client.MergeFrom(bootstrapConfig))).To(Succeed())

// Set infra ready.
modifiedInfraMachine := infraMachine.DeepCopy()
g.Expect(unstructured.SetNestedField(modifiedInfraMachine.Object, true, "status", "initialization", "provisioned")).To(Succeed())
g.Expect(env.Status().Patch(ctx, modifiedInfraMachine, client.MergeFrom(infraMachine))).To(Succeed())

// Set annotations on Machine, BootstrapConfig and InfraMachine to trigger an in-place update.
orig := modifiedBootstrapConfig.DeepCopy()
annotations.AddAnnotations(modifiedBootstrapConfig, map[string]string{
clusterv1.UpdateInProgressAnnotation: "",
})
g.Expect(env.Patch(ctx, modifiedBootstrapConfig, client.MergeFrom(orig))).To(Succeed())
orig = modifiedInfraMachine.DeepCopy()
annotations.AddAnnotations(modifiedInfraMachine, map[string]string{
clusterv1.UpdateInProgressAnnotation: "",
})
g.Expect(env.Patch(ctx, modifiedInfraMachine, client.MergeFrom(orig))).To(Succeed())
origMachine := modifiedMachine.DeepCopy()
annotations.AddAnnotations(modifiedMachine, map[string]string{
runtimev1.PendingHooksAnnotation: "UpdateMachine",
clusterv1.UpdateInProgressAnnotation: "",
})
g.Expect(env.Patch(ctx, modifiedMachine, client.MergeFrom(origMachine))).To(Succeed())

// Wait until Machine was reconciled.
g.Eventually(func(g Gomega) bool {
if err := env.DirectAPIServerGet(ctx, client.ObjectKeyFromObject(machine), machine); err != nil {
return false
}
g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseUpdating))
// Verify that the LastUpdated timestamp was updated
g.Expect(machine.Status.LastUpdated.IsZero()).To(BeFalse())
g.Expect(machine.Status.LastUpdated.After(preUpdate)).To(BeTrue())
return true
}, 10*time.Second).Should(BeTrue())
})

t.Run("Should set `Provisioned` when there is a ProviderID and there is no Node", func(t *testing.T) {
g := NewWithT(t)

Expand Down
8 changes: 6 additions & 2 deletions internal/controllers/machine/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ import (
"sigs.k8s.io/cluster-api/api/core/v1beta2/index"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/controllers/remote"
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/envtest"
)

Expand Down Expand Up @@ -99,8 +101,10 @@ func TestMain(m *testing.M) {
clusterCache.(interface{ DisablePrivateKeyGeneration() }).DisablePrivateKeyGeneration()

if err := (&Reconciler{
Client: mgr.GetClient(),
APIReader: mgr.GetAPIReader(),
Client: mgr.GetClient(),
APIReader: mgr.GetAPIReader(),
// Just adding a minimal RuntimeClient to avoid panics in tests.
RuntimeClient: fakeruntimeclient.NewRuntimeClientBuilder().WithCatalog(runtimecatalog.New()).Build(),
ClusterCache: clusterCache,
RemoteConditionsGracePeriod: 5 * time.Minute,
AdditionalSyncMachineLabels: nil,
Expand Down