diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index b9798e4e7592..f02f9315a3bc 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/annotations" - utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -91,21 +90,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, nil } - // We use the patchHelper to patch potential changes to the ObjectReferences in ClusterClass. - patchHelper, err := patch.NewHelper(clusterClass, r.Client) - if err != nil { - return ctrl.Result{}, err - } - - defer func() { - if err := patchHelper.Patch(ctx, clusterClass); err != nil { - reterr = kerrors.NewAggregate([]error{ - reterr, - errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: clusterClass})}, - ) - } - }() - return r.reconcile(ctx, clusterClass) } @@ -133,38 +117,35 @@ func (r *Reconciler) reconcile(ctx context.Context, clusterClass *clusterv1.Clus } } - // Ensure all the referenced objects are owned by the ClusterClass and that references are - // upgraded to the latest contract. - // Nb. Some external objects can be referenced multiple times in the ClusterClass. We - // update the API contracts of all the references but we set the owner reference on the unique - // external object only once. + // Ensure all referenced objects are owned by the ClusterClass. + // Nb. Some external objects can be referenced multiple times in the ClusterClass, + // but we only want to set the owner reference once per unique external object. + // For example the same KubeadmConfigTemplate could be referenced in multiple MachineDeployment + // classes. errs := []error{} - patchedRefs := sets.NewString() + reconciledRefs := sets.NewString() for i := range refs { ref := refs[i] uniqueKey := uniqueObjectRefKey(ref) - if err := r.reconcileExternal(ctx, clusterClass, ref, !patchedRefs.Has(uniqueKey)); err != nil { + + // Continue as we only have to reconcile every referenced object once. + if reconciledRefs.Has(uniqueKey) { + continue + } + + if err := r.reconcileExternal(ctx, clusterClass, ref); err != nil { errs = append(errs, err) continue } - patchedRefs.Insert(uniqueKey) + reconciledRefs.Insert(uniqueKey) } return ctrl.Result{}, kerrors.NewAggregate(errs) } -func (r *Reconciler) reconcileExternal(ctx context.Context, clusterClass *clusterv1.ClusterClass, ref *corev1.ObjectReference, setOwnerRef bool) error { +func (r *Reconciler) reconcileExternal(ctx context.Context, clusterClass *clusterv1.ClusterClass, ref *corev1.ObjectReference) error { log := ctrl.LoggerFrom(ctx) - if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { - return errors.Wrapf(err, "failed to update reference API contract of %s", tlog.KRef{Ref: ref}) - } - - // If we dont need to set the ownerReference then return early. - if !setOwnerRef { - return nil - } - obj, err := external.Get(ctx, r.UnstructuredCachingClient, ref, clusterClass.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { @@ -173,7 +154,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, clusterClass *cluste return errors.Wrapf(err, "failed to get the external object for the cluster class. refGroupVersionKind: %s, refName: %s", ref.GroupVersionKind(), ref.Name) } - // If external ref is paused, return early. + // If referenced object is paused, return early. if annotations.HasPaused(obj) { log.V(3).Info("External object referenced is paused", "refGroupVersionKind", ref.GroupVersionKind(), "refName", ref.Name) return nil diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index de8af7c4ee91..701b5a0bf3ff 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -21,8 +21,10 @@ import ( "fmt" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -41,7 +43,7 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop // Reference to the InfrastructureCluster can be nil and is expected to be on the first reconcile. // In this case the method should still be allowed to continue. if currentState.Cluster.Spec.InfrastructureRef != nil { - infra, err := r.getCurrentInfrastructureClusterState(ctx, currentState.Cluster) + infra, err := r.getCurrentInfrastructureClusterState(ctx, s.Blueprint.InfrastructureClusterTemplate, currentState.Cluster) if err != nil { return nil, err } @@ -52,7 +54,7 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop // should still be allowed to continue. currentState.ControlPlane = &scope.ControlPlaneState{} if currentState.Cluster.Spec.ControlPlaneRef != nil { - cp, err := r.getCurrentControlPlaneState(ctx, currentState.Cluster, s.Blueprint) + cp, err := r.getCurrentControlPlaneState(ctx, s.Blueprint.ControlPlane, s.Blueprint.HasControlPlaneInfrastructureMachine(), currentState.Cluster) if err != nil { return nil, err } @@ -61,7 +63,7 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop // A Cluster may have zero or more MachineDeployments and a Cluster is expected to have zero MachineDeployments on // first reconcile. - m, err := r.getCurrentMachineDeploymentState(ctx, currentState.Cluster) + m, err := r.getCurrentMachineDeploymentState(ctx, s.Blueprint.MachineDeployments, currentState.Cluster) if err != nil { return nil, err } @@ -72,8 +74,12 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop // getCurrentInfrastructureClusterState looks for the state of the InfrastructureCluster. If a reference is set but not // found, either from an error or the object not being found, an error is thrown. -func (r *Reconciler) getCurrentInfrastructureClusterState(ctx context.Context, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - infra, err := r.getReference(ctx, cluster.Spec.InfrastructureRef) +func (r *Reconciler) getCurrentInfrastructureClusterState(ctx context.Context, blueprintInfrastructureClusterTemplate *unstructured.Unstructured, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { + ref, err := alignRefAPIVersion(blueprintInfrastructureClusterTemplate, cluster.Spec.InfrastructureRef) + if err != nil { + return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.InfrastructureRef}) + } + infra, err := r.getReference(ctx, ref) if err != nil { return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.InfrastructureRef}) } @@ -89,12 +95,16 @@ func (r *Reconciler) getCurrentInfrastructureClusterState(ctx context.Context, c // getCurrentControlPlaneState returns information on the ControlPlane being used by the Cluster. If a reference is not found, // an error is thrown. If the ControlPlane requires MachineInfrastructure according to its ClusterClass an error will be // thrown if the ControlPlane has no MachineTemplates. -func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, cluster *clusterv1.Cluster, blueprint *scope.ClusterBlueprint) (*scope.ControlPlaneState, error) { +func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintControlPlane *scope.ControlPlaneBlueprint, blueprintHasControlPlaneInfrastructureMachine bool, cluster *clusterv1.Cluster) (*scope.ControlPlaneState, error) { var err error res := &scope.ControlPlaneState{} // Get the control plane object. - res.Object, err = r.getReference(ctx, cluster.Spec.ControlPlaneRef) + ref, err := alignRefAPIVersion(blueprintControlPlane.Template, cluster.Spec.ControlPlaneRef) + if err != nil { + return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.ControlPlaneRef}) + } + res.Object, err = r.getReference(ctx, ref) if err != nil { return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.ControlPlaneRef}) } @@ -106,7 +116,7 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, cluster *c } // If the clusterClass does not mandate the controlPlane has infrastructureMachines, return. - if !blueprint.HasControlPlaneInfrastructureMachine() { + if !blueprintHasControlPlaneInfrastructureMachine { return res, nil } @@ -115,7 +125,11 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, cluster *c if err != nil { return res, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate reference for %s", tlog.KObj{Obj: res.Object}) } - res.InfrastructureMachineTemplate, err = r.getReference(ctx, machineInfrastructureRef) + ref, err = alignRefAPIVersion(blueprintControlPlane.InfrastructureMachineTemplate, machineInfrastructureRef) + if err != nil { + return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s", tlog.KObj{Obj: res.Object}) + } + res.InfrastructureMachineTemplate, err = r.getReference(ctx, ref) if err != nil { return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s", tlog.KObj{Obj: res.Object}) } @@ -137,7 +151,7 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, cluster *c // whether they are managed by a ClusterClass using labels. A Cluster may have zero or more MachineDeployments. Zero is // expected on first reconcile. If MachineDeployments are found for the Cluster their Infrastructure and Bootstrap references // are inspected. Where these are not found the function will throw an error. -func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, cluster *clusterv1.Cluster) (map[string]*scope.MachineDeploymentState, error) { +func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, blueprintMachineDeployments map[string]*scope.MachineDeploymentBlueprint, cluster *clusterv1.Cluster) (map[string]*scope.MachineDeploymentState, error) { state := make(scope.MachineDeploymentsStateMap) // List all the machine deployments in the current cluster and in a managed topology. @@ -172,12 +186,26 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, clust return nil, fmt.Errorf("duplicate %s found for label %s: %s", tlog.KObj{Obj: m}, clusterv1.ClusterTopologyMachineDeploymentLabelName, mdTopologyName) } + mdClassName := getMDClassName(cluster, mdTopologyName) + if mdClassName == "" { + return nil, fmt.Errorf("failed to find MachineDeployment topology %s in %s", mdTopologyName, tlog.KObj{Obj: cluster}) + } + + mdBluePrint, ok := blueprintMachineDeployments[mdClassName] + if !ok { + return nil, fmt.Errorf("failed to find MachineDeployment class %s in ClusterClass", mdClassName) + } + // Gets the BootstrapTemplate bootstrapRef := m.Spec.Template.Spec.Bootstrap.ConfigRef if bootstrapRef == nil { return nil, fmt.Errorf("%s does not have a reference to a Bootstrap Config", tlog.KObj{Obj: m}) } - b, err := r.getReference(ctx, bootstrapRef) + ref, err := alignRefAPIVersion(mdBluePrint.BootstrapTemplate, bootstrapRef) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) + } + b, err := r.getReference(ctx, ref) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) } @@ -187,7 +215,11 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, clust if infraRef.Name == "" { return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachineTemplate", tlog.KObj{Obj: m}) } - infra, err := r.getReference(ctx, &infraRef) + ref, err = alignRefAPIVersion(mdBluePrint.InfrastructureMachineTemplate, &infraRef) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) + } + infra, err := r.getReference(ctx, ref) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) } @@ -214,3 +246,44 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, clust } return state, nil } + +// alignRefAPIVersion returns an aligned copy of the currentRef so it matches the apiVersion in ClusterClass. +// This is required so the topology controller can diff current and desired state objects of the same +// version during reconcile. +// If group or kind was changed in the ClusterClass, an exact copy of the currentRef is returned because +// it will end up in a diff and a rollout anyway. +// Only bootstrap template refs in a ClusterClass can change their group and kind. +func alignRefAPIVersion(templateFromClusterClass *unstructured.Unstructured, currentRef *corev1.ObjectReference) (*corev1.ObjectReference, error) { + currentGV, err := schema.ParseGroupVersion(currentRef.APIVersion) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse apiVersion: %q", currentRef.APIVersion) + } + + apiVersion := currentRef.APIVersion + // Use apiVersion from ClusterClass if group and kind is the same. + if templateFromClusterClass.GroupVersionKind().Group == currentGV.Group && + templateFromClusterClass.GetKind() == currentRef.Kind { + apiVersion = templateFromClusterClass.GetAPIVersion() + } + + return &corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: currentRef.Kind, + Namespace: currentRef.Namespace, + Name: currentRef.Name, + }, nil +} + +// getMDClassName retrieves the MDClass name by looking up the MDTopology in the Cluster. +func getMDClassName(cluster *clusterv1.Cluster, mdTopologyName string) string { + if cluster.Spec.Topology.Workers == nil { + return "" + } + + for _, mdTopology := range cluster.Spec.Topology.Workers.MachineDeployments { + if mdTopology.Name == mdTopologyName { + return mdTopology.Class + } + } + return "" +} diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 3a102446feee..8174a4312e76 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -50,9 +51,10 @@ func TestGetCurrentState(t *testing.T) { infraCluster := builder.InfrastructureCluster(metav1.NamespaceDefault, "infraOne"). Build() infraCluster.SetLabels(map[string]string{clusterv1.ClusterTopologyOwnedLabel: ""}) - infraClusterNotTopologyOwned := builder.InfrastructureCluster(metav1.NamespaceDefault, "infraOne"). Build() + infraClusterTemplate := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infraTemplateOne"). + Build() // ControlPlane and ControlPlaneInfrastructureMachineTemplate objects. controlPlaneInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfraTemplate"). @@ -133,16 +135,17 @@ func TestGetCurrentState(t *testing.T) { Build() tests := []struct { - name string - cluster *clusterv1.Cluster - class *clusterv1.ClusterClass - objects []client.Object - want *scope.ClusterState - wantErr bool + name string + cluster *clusterv1.Cluster + blueprint *scope.ClusterBlueprint + objects []client.Object + want *scope.ClusterState + wantErr bool }{ { - name: "Should read a Cluster when being processed by the topology controller for the first time (without references)", - cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").Build(), + name: "Should read a Cluster when being processed by the topology controller for the first time (without references)", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").Build(), + blueprint: &scope.ClusterBlueprint{}, // Expecting valid return with no ControlPlane or Infrastructure state defined and empty MachineDeployment state list want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). @@ -158,6 +161,9 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). Build(), + blueprint: &scope.ClusterBlueprint{ + InfrastructureClusterTemplate: infraClusterTemplate, + }, objects: []client.Object{ // InfrastructureCluster is missing! }, @@ -168,6 +174,9 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraClusterNotTopologyOwned). Build(), + blueprint: &scope.ClusterBlueprint{ + InfrastructureClusterTemplate: infraClusterTemplate, + }, objects: []client.Object{ infraClusterNotTopologyOwned, }, @@ -178,6 +187,12 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlaneNotTopologyOwned). Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + }, + }, objects: []client.Object{ controlPlaneNotTopologyOwned, }, @@ -189,6 +204,9 @@ func TestGetCurrentState(t *testing.T) { WithInfrastructureCluster(infraCluster). // No ControlPlane reference! Build(), + blueprint: &scope.ClusterBlueprint{ + InfrastructureClusterTemplate: infraClusterTemplate, + }, objects: []client.Object{ infraCluster, }, @@ -208,7 +226,13 @@ func TestGetCurrentState(t *testing.T) { WithControlPlane(controlPlane). WithInfrastructureCluster(infraCluster). Build(), - class: clusterClassWithNoControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithNoControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + }, + }, objects: []client.Object{ controlPlane, infraCluster, @@ -232,7 +256,13 @@ func TestGetCurrentState(t *testing.T) { WithControlPlane(controlPlane). WithInfrastructureCluster(infraCluster). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + }, + }, objects: []client.Object{ controlPlane, infraCluster, @@ -247,7 +277,13 @@ func TestGetCurrentState(t *testing.T) { // No InfrastructureCluster! WithControlPlane(controlPlaneWithInfra). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, objects: []client.Object{ controlPlaneWithInfra, controlPlaneInfrastructureMachineTemplate, @@ -269,7 +305,14 @@ func TestGetCurrentState(t *testing.T) { WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -291,8 +334,27 @@ func TestGetCurrentState(t *testing.T) { { name: "Should read a Cluster (with InfrastructureCluster, ControlPlane and ControlPlane InfrastructureMachineTemplate, workers)", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "mdClass": { + BootstrapTemplate: machineDeploymentBootstrap, + InfrastructureMachineTemplate: machineDeploymentInfrastructure, + }, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -305,6 +367,12 @@ func TestGetCurrentState(t *testing.T) { // Expecting valid return with valid ControlPlane, ControlPlane Infrastructure and InfrastructureCluster state, but no defined MachineDeployment state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, @@ -317,7 +385,13 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlane). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, objects: []client.Object{ clusterClassWithControlPlaneInfra, controlPlane, @@ -330,7 +404,7 @@ func TestGetCurrentState(t *testing.T) { name: "Should ignore unmanaged MachineDeployments and MachineDeployments belonging to other clusters", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, objects: []client.Object{ clusterClassWithControlPlaneInfra, builder.MachineDeployment(metav1.NamespaceDefault, "no-managed-label"). @@ -364,7 +438,7 @@ func TestGetCurrentState(t *testing.T) { name: "Fails if there are MachineDeployments without the topology.cluster.x-k8s.io/deployment-name", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, objects: []client.Object{ clusterClassWithControlPlaneInfra, builder.MachineDeployment(metav1.NamespaceDefault, "missing-topology-md-labelName"). @@ -383,8 +457,20 @@ func TestGetCurrentState(t *testing.T) { { name: "Fails if there are MachineDeployments with the same topology.cluster.x-k8s.io/deployment-name", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, objects: []client.Object{ clusterClassWithControlPlaneInfra, machineDeploymentInfrastructure, @@ -404,8 +490,27 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "mdClass": { + BootstrapTemplate: machineDeploymentBootstrap, + InfrastructureMachineTemplate: machineDeploymentInfrastructure, + }, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -420,6 +525,12 @@ func TestGetCurrentState(t *testing.T) { Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, InfrastructureCluster: infraCluster, @@ -435,8 +546,27 @@ func TestGetCurrentState(t *testing.T) { { name: "Fails if a Cluster has a MachineDeployment without the Bootstrap Template ref", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "mdClass": { + BootstrapTemplate: machineDeploymentBootstrap, + InfrastructureMachineTemplate: machineDeploymentInfrastructure, + }, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -455,8 +585,27 @@ func TestGetCurrentState(t *testing.T) { { name: "Fails if a Cluster has a MachineDeployments without the InfrastructureMachineTemplate ref", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "mdClass": { + BootstrapTemplate: machineDeploymentBootstrap, + InfrastructureMachineTemplate: machineDeploymentInfrastructure, + }, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -477,8 +626,27 @@ func TestGetCurrentState(t *testing.T) { cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), - class: clusterClassWithControlPlaneInfra, + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "mdClass": { + BootstrapTemplate: machineDeploymentBootstrap, + InfrastructureMachineTemplate: machineDeploymentInfrastructure, + }, + }, + }, objects: []client.Object{ infraCluster, clusterClassWithControlPlaneInfra, @@ -495,6 +663,12 @@ func TestGetCurrentState(t *testing.T) { Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). + WithTopology(builder.ClusterTopology(). + WithMachineDeployment(clusterv1.MachineDeploymentTopology{ + Class: "mdClass", + Name: "md1", + }). + Build()). Build(), ControlPlane: &scope.ControlPlaneState{ Object: controlPlaneWithInfra, @@ -519,7 +693,7 @@ func TestGetCurrentState(t *testing.T) { // Sets up a scope with a Blueprint. s := scope.New(tt.cluster) - s.Blueprint = &scope.ClusterBlueprint{ClusterClass: tt.class} + s.Blueprint = tt.blueprint // Sets up the fakeClient for the test case. objs := []client.Object{} @@ -562,3 +736,102 @@ func TestGetCurrentState(t *testing.T) { }) } } + +func TestAlignRefAPIVersion(t *testing.T) { + tests := []struct { + name string + templateFromClusterClass *unstructured.Unstructured + currentRef *corev1.ObjectReference + want *corev1.ObjectReference + wantErr bool + }{ + { + name: "Error for invalid apiVersion", + templateFromClusterClass: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + }}, + currentRef: &corev1.ObjectReference{ + APIVersion: "invalid/api/version", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + wantErr: true, + }, + { + name: "Use apiVersion from ClusterClass: group and kind is the same", + templateFromClusterClass: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + }}, + currentRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + want: &corev1.ObjectReference{ + // Group & kind is the same => apiVersion is taken from ClusterClass. + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + { + name: "Use apiVersion from currentRef: kind is different", + templateFromClusterClass: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha4", + "kind": "DifferentConfigTemplate", + }}, + currentRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "KubeadmConfigTemplate", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + want: &corev1.ObjectReference{ + // kind is different => apiVersion is taken from currentRef. + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "KubeadmConfigTemplate", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + { + name: "Use apiVersion from currentRef: group is different", + templateFromClusterClass: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "bootstrap2.cluster.x-k8s.io/v1beta1", + "kind": "KubeadmConfigTemplate", + }}, + currentRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "KubeadmConfigTemplate", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + want: &corev1.ObjectReference{ + // group is different => apiVersion is taken from currentRef. + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "KubeadmConfigTemplate", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := alignRefAPIVersion(tt.templateFromClusterClass, tt.currentRef) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(got).To(Equal(tt.want)) + }) + } +} diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 113d6fe9836b..4bca66601ef7 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/storage/names" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -80,7 +81,10 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // Compute the desired state for the Cluster object adding a reference to the // InfrastructureCluster and the ControlPlane objects generated by the previous step. - desiredState.Cluster = computeCluster(ctx, s, desiredState.InfrastructureCluster, desiredState.ControlPlane.Object) + desiredState.Cluster, err = computeCluster(ctx, s, desiredState.InfrastructureCluster, desiredState.ControlPlane.Object) + if err != nil { + return nil, errors.Wrapf(err, "failed to compute Cluster") + } // If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies // defined in the cluster. @@ -201,7 +205,23 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in // If the ClusterClass mandates the controlPlane has infrastructureMachines, add a reference to InfrastructureMachine // template and metadata to be used for the control plane machines. if s.Blueprint.HasControlPlaneInfrastructureMachine() { - if err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, infrastructureMachineTemplate); err != nil { + // We have to copy the template to avoid modifying the one from desired state. + refCopy := infrastructureMachineTemplate.DeepCopy() + + // If the ControlPlane already exists, avoid downgrading the version if it was bumped + // by the control plane controller in the meantime. + if s.Current.ControlPlane.Object != nil { + currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object) + if err != nil { + return nil, errors.Wrapf(err, "failed get spec.machineTemplate.infrastructureRef from the ControlPlane object") + } + desiredRef, err := calculateRefDesiredAPIVersion(currentRef, refCopy) + if err != nil { + return nil, errors.Wrap(err, "failed to calculate desired spec.machineTemplate.infrastructureRef") + } + refCopy.SetAPIVersion(desiredRef.APIVersion) + } + if err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, refCopy); err != nil { return nil, errors.Wrap(err, "failed to spec.machineTemplate.infrastructureRef in the ControlPlane object") } @@ -406,7 +426,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc // NOTE: Some fields of the Cluster’s fields contribute to defining the Cluster blueprint (e.g. Cluster.Spec.Topology), // while some other fields should be managed as part of the actual Cluster (e.g. Cluster.Spec.ControlPlaneRef); in this func // we are concerned only about the latest group of fields. -func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, controlPlane *unstructured.Unstructured) *clusterv1.Cluster { +func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, controlPlane *unstructured.Unstructured) (*clusterv1.Cluster, error) { cluster := s.Current.Cluster.DeepCopy() // Enforce the topology labels. @@ -420,10 +440,45 @@ func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, co // Set the references to the infrastructureCluster and controlPlane objects. // NOTE: Once set for the first time, the references are not expected to change. - cluster.Spec.InfrastructureRef = contract.ObjToRef(infrastructureCluster) - cluster.Spec.ControlPlaneRef = contract.ObjToRef(controlPlane) + var err error + cluster.Spec.InfrastructureRef, err = calculateRefDesiredAPIVersion(cluster.Spec.InfrastructureRef, infrastructureCluster) + if err != nil { + return nil, errors.Wrapf(err, "failed to calculate infrastructureRef") + } + cluster.Spec.ControlPlaneRef, err = calculateRefDesiredAPIVersion(cluster.Spec.ControlPlaneRef, controlPlane) + if err != nil { + return nil, errors.Wrapf(err, "failed to calculate controlPlaneRef") + } + + return cluster, nil +} - return cluster +// calculateRefDesiredAPIVersion returns the desired ref calculated from desiredReferencedObject +// so it doesn't override the version in apiVersion stored in the currentRef, if any. +// This is required because the apiVersion in the desired ref is aligned to the apiVersion used +// in ClusterClass when reading the current state. If the currentRef is nil or group or kind +// doesn't match, no changes are applied to desired ref. +func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredReferencedObject *unstructured.Unstructured) (*corev1.ObjectReference, error) { + desiredRef := contract.ObjToRef(desiredReferencedObject) + // If ref is not set yet, just set a ref to the desired referenced object. + if currentRef == nil { + return desiredRef, nil + } + + currentGV, err := schema.ParseGroupVersion(currentRef.APIVersion) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse apiVersion %q of current ref", currentRef.APIVersion) + } + desiredGK := desiredReferencedObject.GroupVersionKind().GroupKind() + + // Keep the apiVersion of the current ref if the group and kind is already correct. + // We only want to change the apiVersion to update the group, as it should be possible + // for other controllers to bump the version if necessary (i.e. if there is a newer + // version of the CRD compared to the one that the topology controller is working on). + if currentGV.Group == desiredGK.Group && currentRef.Kind == desiredGK.Kind { + desiredRef.APIVersion = currentRef.APIVersion + } + return desiredRef, nil } // computeMachineDeployments computes the desired state of the list of MachineDeployments. @@ -437,7 +492,7 @@ func computeMachineDeployments(ctx context.Context, s *scope.Scope, desiredContr for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments { desiredMachineDeployment, err := computeMachineDeployment(ctx, s, desiredControlPlaneState, mdTopology) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name) } machineDeploymentsStateMap[mdTopology.Name] = desiredMachineDeployment } @@ -513,11 +568,19 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, desiredControlP } // Compute the MachineDeployment object. - gv := clusterv1.GroupVersion + desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachineDeployment.BootstrapTemplate) + if err != nil { + return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref") + } + desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachineDeployment.InfrastructureMachineTemplate) + if err != nil { + return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine template ref") + } + desiredMachineDeploymentObj := &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ - Kind: gv.WithKind("MachineDeployment").Kind, - APIVersion: gv.String(), + Kind: clusterv1.GroupVersion.WithKind("MachineDeployment").Kind, + APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-%s-", s.Current.Cluster.Name, machineDeploymentTopology.Name)), @@ -533,8 +596,8 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, desiredControlP Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, Version: pointer.String(version), - Bootstrap: clusterv1.Bootstrap{ConfigRef: contract.ObjToRef(desiredMachineDeployment.BootstrapTemplate)}, - InfrastructureRef: *contract.ObjToRef(desiredMachineDeployment.InfrastructureMachineTemplate), + Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef}, + InfrastructureRef: *desiredInfraMachineTemplateRef, FailureDomain: machineDeploymentTopology.FailureDomain, NodeDrainTimeout: machineDeploymentTopology.NodeDrainTimeout, }, diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index eed9411e2cda..e40cb767d97a 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -378,17 +378,18 @@ func TestComputeControlPlane(t *testing.T) { } // aggregating current cluster objects into ClusterState (simulating getCurrentState) - scope := scope.New(cluster) - scope.Blueprint = blueprint + s := scope.New(cluster) + s.Blueprint = blueprint + s.Current.ControlPlane = &scope.ControlPlaneState{} r := &Reconciler{} - obj, err := r.computeControlPlane(ctx, scope, infrastructureMachineTemplate) + obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) assertTemplateToObject(g, assertTemplateInput{ - cluster: scope.Current.Cluster, + cluster: s.Current.Cluster, templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref, template: blueprint.ControlPlane.Template, currentRef: nil, @@ -397,12 +398,12 @@ func TestComputeControlPlane(t *testing.T) { gotMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(obj) g.Expect(err).ToNot(HaveOccurred()) - expectedLabels := mergeMap(scope.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels) + expectedLabels := mergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels) expectedLabels[clusterv1.ClusterLabelName] = cluster.Name expectedLabels[clusterv1.ClusterTopologyOwnedLabel] = "" g.Expect(gotMetadata).To(Equal(&clusterv1.ObjectMeta{ Labels: expectedLabels, - Annotations: mergeMap(scope.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations), + Annotations: mergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations), })) assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...) @@ -1227,7 +1228,8 @@ func TestComputeCluster(t *testing.T) { // aggregating current cluster objects into ClusterState (simulating getCurrentState) scope := scope.New(cluster) - obj := computeCluster(ctx, scope, infrastructureCluster, controlPlane) + obj, err := computeCluster(ctx, scope, infrastructureCluster, controlPlane) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) // TypeMeta @@ -2066,3 +2068,135 @@ func Test_computeMachineHealthCheck(t *testing.T) { g.Expect(got).To(Equal(want), cmp.Diff(got, want)) }) } + +func TestCalculateRefDesiredAPIVersion(t *testing.T) { + tests := []struct { + name string + currentRef *corev1.ObjectReference + desiredReferencedObject *unstructured.Unstructured + want *corev1.ObjectReference + wantErr bool + }{ + { + name: "Return desired ref if current ref is nil", + desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + "metadata": map[string]interface{}{ + "name": "my-cluster-abc", + "namespace": metav1.NamespaceDefault, + }, + }}, + want: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + { + name: "Error for invalid apiVersion", + currentRef: &corev1.ObjectReference{ + APIVersion: "invalid/api/version", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + "metadata": map[string]interface{}{ + "name": "my-cluster-abc", + "namespace": metav1.NamespaceDefault, + }, + }}, + wantErr: true, + }, + { + name: "Return desired ref if group changed", + currentRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure2.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + "metadata": map[string]interface{}{ + "name": "my-cluster-abc", + "namespace": metav1.NamespaceDefault, + }, + }}, + want: &corev1.ObjectReference{ + // Group changed => apiVersion is taken from desired. + APIVersion: "infrastructure2.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + { + name: "Return desired ref if kind changed", + currentRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster2", + "metadata": map[string]interface{}{ + "name": "my-cluster-abc", + "namespace": metav1.NamespaceDefault, + }, + }}, + want: &corev1.ObjectReference{ + // Kind changed => apiVersion is taken from desired. + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "DockerCluster2", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + { + name: "Return current apiVersion if group and kind are the same", + currentRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + "metadata": map[string]interface{}{ + "name": "my-cluster-abc", + "namespace": metav1.NamespaceDefault, + }, + }}, + want: &corev1.ObjectReference{ + // Group and kind are the same => apiVersion is taken from currentRef. + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", + Kind: "DockerCluster", + Name: "my-cluster-abc", + Namespace: metav1.NamespaceDefault, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := calculateRefDesiredAPIVersion(tt.currentRef, tt.desiredReferencedObject) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(got).To(Equal(tt.want)) + }) + } +} diff --git a/internal/controllers/topology/cluster/structuredmerge/dryrun.go b/internal/controllers/topology/cluster/structuredmerge/dryrun.go index bc4998532e96..79fd9adf312f 100644 --- a/internal/controllers/topology/cluster/structuredmerge/dryrun.go +++ b/internal/controllers/topology/cluster/structuredmerge/dryrun.go @@ -28,6 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/contract" + "sigs.k8s.io/cluster-api/util/conversion" ) type dryRunSSAPatchInput struct { @@ -128,6 +129,10 @@ func cleanupManagedFieldsAndAnnotation(obj *unstructured.Unstructured) error { value: obj.Object, shouldFilter: isIgnorePath([]contract.Path{ {"metadata", "annotations", clusterv1.TopologyDryRunAnnotation}, + // In case the ClusterClass we are reconciling is using not the latest apiVersion the conversion + // annotation might be added to objects. As we don't care about differences in conversion as we + // are working on the old apiVersion we want to ignore the annotation when diffing. + {"metadata", "annotations", conversion.DataAnnotation}, }), }) @@ -161,6 +166,7 @@ func cleanupManagedFieldsAndAnnotation(obj *unstructured.Unstructured) error { value: fieldsV1, shouldFilter: isIgnorePath([]contract.Path{ {"f:metadata", "f:annotations", "f:" + clusterv1.TopologyDryRunAnnotation}, + {"f:metadata", "f:annotations", "f:" + conversion.DataAnnotation}, }), }) diff --git a/internal/controllers/topology/cluster/structuredmerge/dryrun_test.go b/internal/controllers/topology/cluster/structuredmerge/dryrun_test.go index ebe2eb39af54..80945cc186df 100644 --- a/internal/controllers/topology/cluster/structuredmerge/dryrun_test.go +++ b/internal/controllers/topology/cluster/structuredmerge/dryrun_test.go @@ -25,11 +25,12 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conversion" ) func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) { - rawManagedFieldWithAnnotation := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{}}}}` - rawManagedFieldWithAnnotationSpecLabels := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{}},"f:labels":{}},"f:spec":{"f:foo":{}}}` + rawManagedFieldWithAnnotation := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{},"f:cluster.x-k8s.io/conversion-data":{}}}}` + rawManagedFieldWithAnnotationSpecLabels := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{},"f:cluster.x-k8s.io/conversion-data":{}},"f:labels":{}},"f:spec":{"f:foo":{}}}` rawManagedFieldWithSpecLabels := `{"f:metadata":{"f:labels":{}},"f:spec":{"f:foo":{}}}` tests := []struct { @@ -44,7 +45,7 @@ func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) { wantErr: false, }, { - name: "filter out annotation", + name: "filter out dry-run annotation", obj: newObjectBuilder(). WithAnnotation(clusterv1.TopologyDryRunAnnotation, ""). Build(), @@ -52,6 +53,15 @@ func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) { want: newObjectBuilder(). Build(), }, + { + name: "filter out conversion annotation", + obj: newObjectBuilder(). + WithAnnotation(conversion.DataAnnotation, ""). + Build(), + wantErr: false, + want: newObjectBuilder(). + Build(), + }, { name: "managedFields: should drop managed fields of other manager", obj: newObjectBuilder(). diff --git a/internal/controllers/topology/cluster/util.go b/internal/controllers/topology/cluster/util.go index c1ef4421c39a..1a5f6ddd8fe5 100644 --- a/internal/controllers/topology/cluster/util.go +++ b/internal/controllers/topology/cluster/util.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/cluster-api/controllers/external" - utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) // bootstrapTemplateNamePrefix calculates the name prefix for a BootstrapTemplate. @@ -44,14 +43,10 @@ func controlPlaneInfrastructureMachineTemplateNamePrefix(clusterName string) str } // getReference gets the object referenced in ref. -// If necessary, it updates the ref to the latest apiVersion of the current contract. func (r *Reconciler) getReference(ctx context.Context, ref *corev1.ObjectReference) (*unstructured.Unstructured, error) { if ref == nil { return nil, errors.New("reference is not set") } - if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { - return nil, err - } obj, err := external.Get(ctx, r.UnstructuredCachingClient, ref, ref.Namespace) if err != nil { diff --git a/internal/controllers/topology/cluster/util_test.go b/internal/controllers/topology/cluster/util_test.go index accd83f15f89..ab98226136d1 100644 --- a/internal/controllers/topology/cluster/util_test.go +++ b/internal/controllers/topology/cluster/util_test.go @@ -76,13 +76,12 @@ func TestGetReference(t *testing.T) { wantErr: true, }, { - name: "Get object and update the ref", + name: "Get object fails: object does not exist with this apiVersion", ref: contract.ObjToRef(controlPlaneTemplate), objects: []client.Object{ controlPlaneTemplatev99, }, - want: controlPlaneTemplatev99, - wantRef: contract.ObjToRef(controlPlaneTemplatev99), + wantErr: true, }, } for _, tt := range tests {