diff --git a/.gitignore b/.gitignore index 7c17efc3df3f..30c29372526d 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ hack/tools/bin # E2E test templates test/e2e/data/infrastructure-docker/v1alpha3/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1alpha4/cluster-template*.yaml +test/e2e/data/infrastructure-docker/v1.2/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1beta1/cluster-template*.yaml # E2e test extension deployment diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 7cce002bc9ec..1137e049e950 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -66,7 +66,7 @@ $(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize DOCKER_TEMPLATES := $(REPO_ROOT)/test/e2e/data/infrastructure-docker .PHONY: cluster-templates -cluster-templates: $(KUSTOMIZE) cluster-templates-v1alpha3 cluster-templates-v1alpha4 cluster-templates-v1beta1 ## Generate cluster templates for all versions +cluster-templates: $(KUSTOMIZE) cluster-templates-v1alpha3 cluster-templates-v1alpha4 cluster-templates-v1.2 cluster-templates-v1beta1 ## Generate cluster templates for all versions cluster-templates-v1alpha3: $(KUSTOMIZE) ## Generate cluster templates for v1alpha3 $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha3/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha3/cluster-template.yaml @@ -84,6 +84,10 @@ cluster-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alp $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-ipv6.yaml +cluster-templates-v1.2: $(KUSTOMIZE) ## Generate cluster templates for v1.2 + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology.yaml + cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta1 $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 8eec62783fba..a655a68dc99b 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -68,13 +68,18 @@ type ClusterctlUpgradeSpecInput struct { // InitWithProvidersContract can be used to override the INIT_WITH_PROVIDERS_CONTRACT e2e config variable with a specific // provider contract to use to initialise the secondary management cluster, e.g. `v1alpha3` InitWithProvidersContract string - SkipCleanup bool - ControlPlaneWaiters clusterctl.ControlPlaneWaiters - PreInit func(managementClusterProxy framework.ClusterProxy) - PreUpgrade func(managementClusterProxy framework.ClusterProxy) - PostUpgrade func(managementClusterProxy framework.ClusterProxy) - MgmtFlavor string - WorkloadFlavor string + // InitWithKubernetesVersion can be used to override the INIT_WITH_KUBERNETES_VERSION e2e config variable with a specific + // Kubernetes version to use to create the secondary management cluster, e.g. `v1.25.0` + InitWithKubernetesVersion string + // UpgradeClusterctlVariables can be used to set additional variables for clusterctl upgrade. + UpgradeClusterctlVariables map[string]string + SkipCleanup bool + ControlPlaneWaiters clusterctl.ControlPlaneWaiters + PreInit func(managementClusterProxy framework.ClusterProxy) + PreUpgrade func(managementClusterProxy framework.ClusterProxy) + PostUpgrade func(managementClusterProxy framework.ClusterProxy) + MgmtFlavor string + WorkloadFlavor string } // ClusterctlUpgradeSpec implements a test that verifies clusterctl upgrade of a management cluster. @@ -115,12 +120,15 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg testCancelWatches context.CancelFunc managementClusterName string - clusterctlBinaryURL string managementClusterNamespace *corev1.Namespace managementClusterCancelWatches context.CancelFunc managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult managementClusterProxy framework.ClusterProxy + initClusterctlBinaryURL string + initContract string + initKubernetesVersion string + workLoadClusterName string ) @@ -130,6 +138,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + var clusterctlBinaryURLTemplate string if input.InitWithBinary == "" { Expect(input.E2EConfig.Variables).To(HaveKey(initWithBinaryVariableName), "Invalid argument. %s variable must be defined when calling %s spec", initWithBinaryVariableName, specName) @@ -139,8 +148,27 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg clusterctlBinaryURLTemplate = input.InitWithBinary } clusterctlBinaryURLReplacer := strings.NewReplacer("{OS}", runtime.GOOS, "{ARCH}", runtime.GOARCH) - clusterctlBinaryURL = clusterctlBinaryURLReplacer.Replace(clusterctlBinaryURLTemplate) - Expect(input.E2EConfig.Variables).To(HaveKey(initWithKubernetesVersion)) + initClusterctlBinaryURL = clusterctlBinaryURLReplacer.Replace(clusterctlBinaryURLTemplate) + + // NOTE: by default we are considering all the providers, no matter of the contract. + // However, given that we want to test both v1alpha3 --> v1beta1 and v1alpha4 --> v1beta1, the INIT_WITH_PROVIDERS_CONTRACT + // variable can be used to select versions with a specific contract. + initContract = "*" + if input.E2EConfig.HasVariable(initWithProvidersContract) { + initContract = input.E2EConfig.GetVariable(initWithProvidersContract) + } + if input.InitWithProvidersContract != "" { + initContract = input.InitWithProvidersContract + } + + if input.InitWithKubernetesVersion == "" { + Expect(input.E2EConfig.Variables).To(HaveKey(initWithKubernetesVersion), "Invalid argument. %s variable must be defined when calling %s spec", initWithKubernetesVersion, specName) + Expect(input.E2EConfig.Variables[initWithKubernetesVersion]).ToNot(BeEmpty(), "Invalid argument. %s variable can't be empty when calling %s spec", initWithKubernetesVersion, specName) + initKubernetesVersion = input.E2EConfig.GetVariable(initWithKubernetesVersion) + } else { + initKubernetesVersion = input.InitWithKubernetesVersion + } + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) @@ -164,7 +192,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Flavor: input.MgmtFlavor, Namespace: managementClusterNamespace.Name, ClusterName: managementClusterName, - KubernetesVersion: input.E2EConfig.GetVariable(initWithKubernetesVersion), + KubernetesVersion: initKubernetesVersion, ControlPlaneMachineCount: pointer.Int64Ptr(1), WorkerMachineCount: pointer.Int64Ptr(1), }, @@ -193,9 +221,8 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name) // Download the older clusterctl version to be used for setting up the management cluster to be upgraded - - log.Logf("Downloading clusterctl binary from %s", clusterctlBinaryURL) - clusterctlBinaryPath := downloadToTmpFile(ctx, clusterctlBinaryURL) + log.Logf("Downloading clusterctl binary from %s", initClusterctlBinaryURL) + clusterctlBinaryPath := downloadToTmpFile(ctx, initClusterctlBinaryURL) defer os.Remove(clusterctlBinaryPath) // clean up err := os.Chmod(clusterctlBinaryPath, 0744) //nolint:gosec @@ -203,17 +230,6 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Initializing the workload cluster with older versions of providers") - // NOTE: by default we are considering all the providers, no matter of the contract. - // However, given that we want to test both v1alpha3 --> v1beta1 and v1alpha4 --> v1beta1, the INIT_WITH_PROVIDERS_CONTRACT - // variable can be used to select versions with a specific contract. - contract := "*" - if input.E2EConfig.HasVariable(initWithProvidersContract) { - contract = input.E2EConfig.GetVariable(initWithProvidersContract) - } - if input.InitWithProvidersContract != "" { - contract = input.InitWithProvidersContract - } - if input.PreInit != nil { By("Running Pre-init steps against the management cluster") input.PreInit(managementClusterProxy) @@ -223,10 +239,10 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg ClusterctlBinaryPath: clusterctlBinaryPath, // use older version of clusterctl to init the management cluster ClusterProxy: managementClusterProxy, ClusterctlConfigPath: input.ClusterctlConfigPath, - CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.ClusterAPIProviderName)[0], - BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.KubeadmBootstrapProviderName), - ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.KubeadmControlPlaneProviderName), - InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, input.E2EConfig.InfrastructureProviders()...), + CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.ClusterAPIProviderName)[0], + BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmBootstrapProviderName), + ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmControlPlaneProviderName), + InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.InfrastructureProviders()...), LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) @@ -313,6 +329,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Upgrading providers to the latest version available") clusterctl.UpgradeManagementClusterAndWait(ctx, clusterctl.UpgradeManagementClusterAndWaitInput{ ClusterctlConfigPath: input.ClusterctlConfigPath, + ClusterctlVariables: input.UpgradeClusterctlVariables, ClusterProxy: managementClusterProxy, Contract: clusterv1.GroupVersion.Version, LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), @@ -342,19 +359,34 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg // After upgrading we are sure the version is the latest version of the API, // so it is possible to use the standard helpers - testMachineDeployments := framework.GetMachineDeploymentsByCluster(ctx, framework.GetMachineDeploymentsByClusterInput{ - Lister: managementClusterProxy.GetClient(), - ClusterName: workLoadClusterName, - Namespace: testNamespace.Name, - }) - - framework.ScaleAndWaitMachineDeployment(ctx, framework.ScaleAndWaitMachineDeploymentInput{ - ClusterProxy: managementClusterProxy, - Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace.Name}}, - MachineDeployment: testMachineDeployments[0], - Replicas: 2, - WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + workloadCluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: managementClusterProxy.GetClient(), + Namespace: testNamespace.Name, + Name: workLoadClusterName, }) + if workloadCluster.Spec.Topology != nil { + // Cluster is using ClusterClass, scale up via topology. + framework.ScaleAndWaitMachineDeploymentTopology(ctx, framework.ScaleAndWaitMachineDeploymentTopologyInput{ + ClusterProxy: managementClusterProxy, + Cluster: workloadCluster, + Replicas: 2, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + } else { + // Cluster is not using ClusterClass, scale up via MachineDeployment. + testMachineDeployments := framework.GetMachineDeploymentsByCluster(ctx, framework.GetMachineDeploymentsByClusterInput{ + Lister: managementClusterProxy.GetClient(), + ClusterName: workLoadClusterName, + Namespace: testNamespace.Name, + }) + framework.ScaleAndWaitMachineDeployment(ctx, framework.ScaleAndWaitMachineDeploymentInput{ + ClusterProxy: managementClusterProxy, + Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace.Name}}, + MachineDeployment: testMachineDeployments[0], + Replicas: 2, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + } By("THE UPGRADED MANAGEMENT CLUSTER WORKS!") diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 77e163a439ba..85e1976ebbbe 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -23,14 +23,68 @@ import ( . "github.com/onsi/ginkgo/v2" ) -var _ = Describe("When testing clusterctl upgrades [clusterctl-Upgrade]", func() { +var _ = Describe("When testing clusterctl upgrades (v0.3=>current)", func() { ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { return ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.25/clusterctl-{OS}-{ARCH}", + InitWithProvidersContract: "v1alpha3", + // CAPI v0.3.x does not work on Kubernetes >= v1.22. + InitWithKubernetesVersion: "v1.21.12", + // CAPI does not work with Kubernetes < v1.22 if ClusterClass is enabled, so we have to disable it. + UpgradeClusterctlVariables: map[string]string{ + "CLUSTER_TOPOLOGY": "false", + }, + } + }) +}) + +var _ = Describe("When testing clusterctl upgrades (v0.4=>current)", func() { + ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { + return ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/clusterctl-{OS}-{ARCH}", + InitWithProvidersContract: "v1alpha4", + InitWithKubernetesVersion: "v1.25.0", + } + }) +}) + +var _ = Describe("When testing clusterctl upgrades (v1.2=>current)", func() { + ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { + return ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/clusterctl-{OS}-{ARCH}", + InitWithProvidersContract: "v1beta1", + InitWithKubernetesVersion: "v1.25.0", + } + }) +}) + +var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.2=>current)", func() { + ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { + return ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/clusterctl-{OS}-{ARCH}", + InitWithProvidersContract: "v1beta1", + InitWithKubernetesVersion: "v1.25.0", + WorkloadFlavor: "topology", } }) }) diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 1049a569d913..3280e07fa353 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -31,8 +31,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/core-components.yaml" + - name: v0.3.25 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.25/core-components.yaml" type: "url" contract: v1alpha3 replacements: @@ -40,8 +40,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha3/metadata.yaml" - - name: v0.4.4 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.4/core-components.yaml" + - name: v0.4.8 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/core-components.yaml" type: "url" contract: v1alpha4 replacements: @@ -49,8 +49,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/core-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/core-components.yaml" type: "url" contract: v1beta1 replacements: @@ -69,8 +69,8 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/bootstrap-components.yaml" + - name: v0.3.25 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.25/bootstrap-components.yaml" type: "url" contract: v1alpha3 replacements: @@ -78,8 +78,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha3/metadata.yaml" - - name: v0.4.4 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.4/bootstrap-components.yaml" + - name: v0.4.8 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/bootstrap-components.yaml" type: "url" contract: v1alpha4 replacements: @@ -87,8 +87,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/bootstrap-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/bootstrap-components.yaml" type: "url" contract: v1beta1 replacements: @@ -107,8 +107,8 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/control-plane-components.yaml" + - name: v0.3.25 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.25/control-plane-components.yaml" type: "url" contract: v1alpha3 replacements: @@ -116,8 +116,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha3/metadata.yaml" - - name: v0.4.4 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.4/control-plane-components.yaml" + - name: v0.4.8 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/control-plane-components.yaml" type: "url" contract: v1alpha4 replacements: @@ -125,8 +125,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/control-plane-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/control-plane-components.yaml" type: "url" contract: v1beta1 replacements: @@ -145,8 +145,8 @@ providers: - name: docker type: InfrastructureProvider versions: - - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/infrastructure-components-development.yaml" + - name: v0.3.25 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.25/infrastructure-components-development.yaml" type: "url" contract: v1alpha3 replacements: @@ -155,8 +155,8 @@ providers: files: - sourcePath: "../data/shared/v1alpha3/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha3/cluster-template.yaml" - - name: v0.4.4 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.4/infrastructure-components-development.yaml" + - name: v0.4.8 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/infrastructure-components-development.yaml" type: "url" contract: v1alpha4 replacements: @@ -165,8 +165,8 @@ providers: files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/infrastructure-components-development.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/infrastructure-components-development.yaml" type: "url" contract: v1beta1 replacements: @@ -174,7 +174,9 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/cluster-template-topology.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml" - name: v1.3.99 # next; use manifest from source files value: ../../../test/infrastructure/docker/config/default replacements: @@ -226,12 +228,6 @@ variables: EXP_MACHINE_POOL: "true" CLUSTER_TOPOLOGY: "true" EXP_RUNTIME_SDK: "true" - # NOTE: INIT_WITH_BINARY and INIT_WITH_KUBERNETES_VERSION are only used by the clusterctl upgrade test to initialize - # the management cluster to be upgraded. - # NOTE: We test the latest release with a previous contract. - INIT_WITH_BINARY: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/clusterctl-{OS}-{ARCH}" - INIT_WITH_PROVIDERS_CONTRACT: "v1alpha4" - INIT_WITH_KUBERNETES_VERSION: "v1.25.0" intervals: default/wait-controllers: ["3m", "10s"] diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml new file mode 100644 index 000000000000..fa8720f9833f --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml @@ -0,0 +1,95 @@ +--- +# DockerCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: '${CLUSTER_NAME}' +spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +# Cluster object with +# - Reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: '${CLUSTER_NAME}' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +# DockerMachineTemplate object referenced by the KubeadmControlPlane object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +# KubeadmControlPlane referenced by the Cluster object with +# - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" + labels: + kcp-adoption.step2: "" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml new file mode 100644 index 000000000000..26fe13f880bf --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml @@ -0,0 +1,33 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: default + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + topology: + class: "quick-start" + version: "${KUBERNETES_VERSION}" + controlPlane: + metadata: {} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + workers: + machineDeployments: + - class: "default-worker" + name: "md-0" + replicas: ${WORKER_MACHINE_COUNT} + failureDomain: fd4 + variables: + # We set an empty value to use the default tag kubeadm init is using. + - name: etcdImageTag + value: "" + # We set an empty value to use the default tag kubeadm init is using. + - name: coreDNSImageTag + value: "" diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml new file mode 100644 index 000000000000..b1b61237dc62 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml @@ -0,0 +1,24 @@ +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +binaryData: +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml new file mode 100644 index 000000000000..11abbc29248a --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml @@ -0,0 +1,52 @@ +--- +# DockerMachineTemplate referenced by the MachineDeployment and with +# - extraMounts for the docker sock, thus allowing self-hosting test +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +# KubeadmConfigTemplate referenced by the MachineDeployment +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +# MachineDeployment object +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml new file mode 100644 index 000000000000..44c449548234 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml @@ -0,0 +1,3 @@ +resources: + - ../bases/cluster-with-topology.yaml + - ../bases/crs.yaml diff --git a/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml new file mode 100644 index 000000000000..adb5919cec6f --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/crs.yaml + diff --git a/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml new file mode 100644 index 000000000000..b5eac232b244 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml @@ -0,0 +1,290 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quick-start +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quick-start-control-plane + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quick-start-control-plane + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: e2e.remediation.condition + status: "False" + timeout: 20s + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: quick-start-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: quick-start-default-worker-machinetemplate + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: e2e.remediation.condition + status: "False" + timeout: 20s + variables: + - name: lbImageRepository + required: true + schema: + openAPIV3Schema: + type: string + default: kindest + - name: etcdImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "3.5.3-0" + description: "etcdImageTag sets the tag for the etcd image." + - name: coreDNSImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "v1.8.5" + description: "coreDNSImageTag sets the tag for the coreDNS image." + - name: kubeadmControlPlaneMaxSurge + required: false + schema: + openAPIV3Schema: + type: string + default: "" + example: "0" + description: "kubeadmControlPlaneMaxSurge is the maximum number of control planes that can be scheduled above or under the desired number of control plane machines." + patches: + - name: lbImageRepository + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/loadBalancer" + valueFrom: + template: | + imageRepository: {{ .lbImageRepository }} + # We have to pin the cgroupDriver to cgroupfs for Kubernetes < v1.24 because kind does not support systemd for those versions, but kubeadm >= 1.21 defaults to systemd. + - name: cgroupDriver-controlPlane + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.controlPlane.version }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: cgroupDriver-machineDeployment + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.machineDeployment.version }}' + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: etcdImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/etcd" + valueFrom: + template: | + local: + imageTag: {{ .etcdImageTag }} + - name: coreDNSImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/dns" + valueFrom: + template: | + imageTag: {{ .coreDNSImageTag }} + - name: customImage + description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} + - name: kubeadmControlPlaneMaxSurge + description: "Sets the maxSurge value used for rolloutStrategy in the KubeadmControlPlane." + enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/rolloutStrategy/rollingUpdate/maxSurge + valueFrom: + template: "{{ .kubeadmControlPlaneMaxSurge }}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: quick-start-cluster +spec: + template: + spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quick-start-control-plane +spec: + template: + spec: + machineTemplate: + nodeDrainTimeout: 1s + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: { enable-hostpath-provisioner: 'true' } + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-control-plane +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-default-worker-machinetemplate +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index 28fbd0a1fad4..68ff47b10232 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -122,6 +122,8 @@ func InitWithBinary(_ context.Context, binary string, input InitInput) { type UpgradeInput struct { LogFolder string ClusterctlConfigPath string + ClusterctlVariables map[string]string + ClusterName string KubeconfigPath string Contract string } @@ -143,6 +145,14 @@ func Upgrade(ctx context.Context, input UpgradeInput) { WaitProviders: true, } + if len(input.ClusterctlVariables) > 0 { + amendClusterctlConfig(ctx, amendClusterctlConfigInput{ + ClusterctlConfigPath: input.ClusterctlConfigPath, + OutputPath: filepath.Join(filepath.Dir(input.ClusterctlConfigPath), fmt.Sprintf("clusterctl-upgrade-config-%s.yaml", input.ClusterName)), + Variables: input.ClusterctlVariables, + }) + } + clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder) defer log.Close() diff --git a/test/framework/clusterctl/clusterctl_config.go b/test/framework/clusterctl/clusterctl_config.go index b4b058533ab4..96b6d23322d3 100644 --- a/test/framework/clusterctl/clusterctl_config.go +++ b/test/framework/clusterctl/clusterctl_config.go @@ -42,7 +42,16 @@ type providerConfig struct { // write writes a clusterctl config file to disk. func (c *clusterctlConfig) write() { data, err := yaml.Marshal(c.Values) - Expect(err).ToNot(HaveOccurred(), "Failed to convert to yaml the clusterctl config file") + Expect(err).ToNot(HaveOccurred(), "Failed to marshal the clusterctl config file") Expect(os.WriteFile(c.Path, data, 0600)).To(Succeed(), "Failed to write the clusterctl config file") } + +// read reads a clusterctl config file from disk. +func (c *clusterctlConfig) read() { + data, err := os.ReadFile(c.Path) + Expect(err).ToNot(HaveOccurred()) + + err = yaml.Unmarshal(data, &c.Values) + Expect(err).ToNot(HaveOccurred(), "Failed to unmarshal the clusterctl config file") +} diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index 5f18ceb6e3fc..6d39fa5ded94 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -125,6 +125,7 @@ func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input Init type UpgradeManagementClusterAndWaitInput struct { ClusterProxy framework.ClusterProxy ClusterctlConfigPath string + ClusterctlVariables map[string]string Contract string LogFolder string } @@ -139,6 +140,8 @@ func UpgradeManagementClusterAndWait(ctx context.Context, input UpgradeManagemen Upgrade(ctx, UpgradeInput{ ClusterctlConfigPath: input.ClusterctlConfigPath, + ClusterctlVariables: input.ClusterctlVariables, + ClusterName: input.ClusterProxy.GetName(), KubeconfigPath: input.ClusterProxy.GetKubeconfigPath(), Contract: input.Contract, LogFolder: input.LogFolder, diff --git a/test/framework/clusterctl/repository.go b/test/framework/clusterctl/repository.go index 7b46d8ae192b..2dccc709401a 100644 --- a/test/framework/clusterctl/repository.go +++ b/test/framework/clusterctl/repository.go @@ -137,6 +137,35 @@ func CreateRepository(ctx context.Context, input CreateRepositoryInput) string { return clusterctlConfigFile.Path } +// amendClusterctlConfigInput is the input for amendClusterctlConfig. +type amendClusterctlConfigInput struct { + ClusterctlConfigPath string + OutputPath string + Variables map[string]string +} + +// amendClusterctlConfig adds the given Variables to the clusterctl config and writes it +// to OutputPath. +func amendClusterctlConfig(_ context.Context, input amendClusterctlConfigInput) { + // Read clusterctl config from ClusterctlConfigPath. + clusterctlConfigFile := &clusterctlConfig{ + Path: input.ClusterctlConfigPath, + } + clusterctlConfigFile.read() + + // Overwrite variables. + if clusterctlConfigFile.Values == nil { + clusterctlConfigFile.Values = map[string]interface{}{} + } + for key, value := range input.Variables { + clusterctlConfigFile.Values[key] = value + } + + // Write clusterctl config to OutputPath. + clusterctlConfigFile.Path = input.OutputPath + clusterctlConfigFile.write() +} + // YAMLForComponentSource returns the YAML for the provided component source. func YAMLForComponentSource(ctx context.Context, source ProviderVersionSource) ([]byte, error) { var data []byte diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 345f4bc2851d..ea803b37e15e 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -421,3 +421,78 @@ func ScaleAndWaitMachineDeployment(ctx context.Context, input ScaleAndWaitMachin return nodeRefCount, nil }, input.WaitForMachineDeployments...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(input.MachineDeployment), *input.MachineDeployment.Spec.Replicas) } + +// ScaleAndWaitMachineDeploymentTopologyInput is the input for ScaleAndWaitMachineDeployment. +type ScaleAndWaitMachineDeploymentTopologyInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + Replicas int32 + WaitForMachineDeployments []interface{} +} + +// ScaleAndWaitMachineDeploymentTopology scales MachineDeployment topology and waits until all machines have node ref and equal to Replicas. +func ScaleAndWaitMachineDeploymentTopology(ctx context.Context, input ScaleAndWaitMachineDeploymentTopologyInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for ScaleAndWaitMachineDeployment") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ScaleAndWaitMachineDeployment") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ScaleAndWaitMachineDeployment") + Expect(input.Cluster.Spec.Topology.Workers).ToNot(BeNil(), "Invalid argument. input.Cluster must have MachineDeployment topologies") + Expect(len(input.Cluster.Spec.Topology.Workers.MachineDeployments) >= 1).To(BeTrue(), "Invalid argument. input.Cluster must have at least one MachineDeployment topology") + + mdTopology := input.Cluster.Spec.Topology.Workers.MachineDeployments[0] + log.Logf("Scaling machine deployment topology %s from %d to %d replicas", mdTopology.Name, *mdTopology.Replicas, input.Replicas) + patchHelper, err := patch.NewHelper(input.Cluster, input.ClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + mdTopology.Replicas = pointer.Int32Ptr(input.Replicas) + input.Cluster.Spec.Topology.Workers.MachineDeployments[0] = mdTopology + Eventually(func() error { + return patchHelper.Patch(ctx, input.Cluster) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine deployment topology %s", mdTopology.Name) + + log.Logf("Waiting for correct number of replicas to exist") + deploymentList := &clusterv1.MachineDeploymentList{} + Eventually(func() error { + return input.ClusterProxy.GetClient().List(ctx, deploymentList, + client.InNamespace(input.Cluster.Namespace), + client.MatchingLabels{ + clusterv1.ClusterLabelName: input.Cluster.Name, + clusterv1.ClusterTopologyMachineDeploymentLabelName: mdTopology.Name, + }, + ) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineDeployments object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) + + Expect(deploymentList.Items).To(HaveLen(1)) + md := deploymentList.Items[0] + + Eventually(func() (int, error) { + selectorMap, err := metav1.LabelSelectorAsMap(&md.Spec.Selector) + if err != nil { + return -1, err + } + ms := &clusterv1.MachineSetList{} + if err := input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return -1, err + } + if len(ms.Items) == 0 { + return -1, errors.New("no machinesets were found") + } + machineSet := ms.Items[0] + selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + if err != nil { + return -1, err + } + machines := &clusterv1.MachineList{} + if err := input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return -1, err + } + nodeRefCount := 0 + for _, machine := range machines.Items { + if machine.Status.NodeRef != nil { + nodeRefCount++ + } + } + if len(machines.Items) != nodeRefCount { + return -1, errors.New("Machine count does not match existing nodes count") + } + return nodeRefCount, nil + }, input.WaitForMachineDeployments...).Should(Equal(int(*md.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(&md), *md.Spec.Replicas) +}