From 2aa49b8f203535296427a82daaa68190581f50b1 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Mon, 19 Sep 2022 19:20:14 +0200 Subject: [PATCH] test/e2e: Add clusterctl upgrade with ClusterClass test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- .gitignore | 1 + test/e2e/Makefile | 6 +- test/e2e/clusterctl_upgrade.go | 97 ++++-- test/e2e/clusterctl_upgrade_test.go | 16 + test/e2e/config/docker.yaml | 20 +- .../v1.2/bases/cluster-with-kcp.yaml | 95 ++++++ .../v1.2/bases/cluster-with-topology.yaml | 33 ++ .../infrastructure-docker/v1.2/bases/crs.yaml | 24 ++ .../infrastructure-docker/v1.2/bases/md.yaml | 52 ++++ .../kustomization.yaml | 3 + .../v1.2/cluster-template/kustomization.yaml | 5 + .../v1.2/clusterclass-quick-start.yaml | 290 ++++++++++++++++++ test/framework/machinedeployment_helpers.go | 75 +++++ test/go.mod | 2 +- 14 files changed, 674 insertions(+), 45 deletions(-) create mode 100644 test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml diff --git a/.gitignore b/.gitignore index 7c17efc3df3f..30c29372526d 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ hack/tools/bin # E2E test templates test/e2e/data/infrastructure-docker/v1alpha3/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1alpha4/cluster-template*.yaml +test/e2e/data/infrastructure-docker/v1.2/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1beta1/cluster-template*.yaml # E2e test extension deployment diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 7cce002bc9ec..1137e049e950 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -66,7 +66,7 @@ $(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize DOCKER_TEMPLATES := $(REPO_ROOT)/test/e2e/data/infrastructure-docker .PHONY: cluster-templates -cluster-templates: $(KUSTOMIZE) cluster-templates-v1alpha3 cluster-templates-v1alpha4 cluster-templates-v1beta1 ## Generate cluster templates for all versions +cluster-templates: $(KUSTOMIZE) cluster-templates-v1alpha3 cluster-templates-v1alpha4 cluster-templates-v1.2 cluster-templates-v1beta1 ## Generate cluster templates for all versions cluster-templates-v1alpha3: $(KUSTOMIZE) ## Generate cluster templates for v1alpha3 $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha3/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha3/cluster-template.yaml @@ -84,6 +84,10 @@ cluster-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alp $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-ipv6.yaml +cluster-templates-v1.2: $(KUSTOMIZE) ## Generate cluster templates for v1.2 + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology.yaml + cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta1 $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 8eec62783fba..c6585cffa08d 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -68,6 +68,9 @@ type ClusterctlUpgradeSpecInput struct { // InitWithProvidersContract can be used to override the INIT_WITH_PROVIDERS_CONTRACT e2e config variable with a specific // provider contract to use to initialise the secondary management cluster, e.g. `v1alpha3` InitWithProvidersContract string + // InitWithKubernetesVersion can be used to override the INIT_WITH_KUBERNETES_VERSION e2e config variable with a specific + // Kubernetes version to use to create the secondary management cluster, e.g. `v1.25.0` + InitWithKubernetesVersion string SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters PreInit func(managementClusterProxy framework.ClusterProxy) @@ -115,12 +118,15 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg testCancelWatches context.CancelFunc managementClusterName string - clusterctlBinaryURL string managementClusterNamespace *corev1.Namespace managementClusterCancelWatches context.CancelFunc managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult managementClusterProxy framework.ClusterProxy + initClusterctlBinaryURL string + initContract string + initKubernetesVersion string + workLoadClusterName string ) @@ -130,6 +136,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + var clusterctlBinaryURLTemplate string if input.InitWithBinary == "" { Expect(input.E2EConfig.Variables).To(HaveKey(initWithBinaryVariableName), "Invalid argument. %s variable must be defined when calling %s spec", initWithBinaryVariableName, specName) @@ -139,8 +146,27 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg clusterctlBinaryURLTemplate = input.InitWithBinary } clusterctlBinaryURLReplacer := strings.NewReplacer("{OS}", runtime.GOOS, "{ARCH}", runtime.GOARCH) - clusterctlBinaryURL = clusterctlBinaryURLReplacer.Replace(clusterctlBinaryURLTemplate) - Expect(input.E2EConfig.Variables).To(HaveKey(initWithKubernetesVersion)) + initClusterctlBinaryURL = clusterctlBinaryURLReplacer.Replace(clusterctlBinaryURLTemplate) + + // NOTE: by default we are considering all the providers, no matter of the contract. + // However, given that we want to test both v1alpha3 --> v1beta1 and v1alpha4 --> v1beta1, the INIT_WITH_PROVIDERS_CONTRACT + // variable can be used to select versions with a specific contract. + initContract = "*" + if input.E2EConfig.HasVariable(initWithProvidersContract) { + initContract = input.E2EConfig.GetVariable(initWithProvidersContract) + } + if input.InitWithProvidersContract != "" { + initContract = input.InitWithProvidersContract + } + + if input.InitWithKubernetesVersion == "" { + Expect(input.E2EConfig.Variables).To(HaveKey(initWithKubernetesVersion), "Invalid argument. %s variable must be defined when calling %s spec", initWithKubernetesVersion, specName) + Expect(input.E2EConfig.Variables[initWithKubernetesVersion]).ToNot(BeEmpty(), "Invalid argument. %s variable can't be empty when calling %s spec", initWithKubernetesVersion, specName) + initKubernetesVersion = input.E2EConfig.GetVariable(initWithKubernetesVersion) + } else { + initKubernetesVersion = input.InitWithKubernetesVersion + } + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) @@ -164,7 +190,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Flavor: input.MgmtFlavor, Namespace: managementClusterNamespace.Name, ClusterName: managementClusterName, - KubernetesVersion: input.E2EConfig.GetVariable(initWithKubernetesVersion), + KubernetesVersion: initKubernetesVersion, ControlPlaneMachineCount: pointer.Int64Ptr(1), WorkerMachineCount: pointer.Int64Ptr(1), }, @@ -193,9 +219,8 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name) // Download the older clusterctl version to be used for setting up the management cluster to be upgraded - - log.Logf("Downloading clusterctl binary from %s", clusterctlBinaryURL) - clusterctlBinaryPath := downloadToTmpFile(ctx, clusterctlBinaryURL) + log.Logf("Downloading clusterctl binary from %s", initClusterctlBinaryURL) + clusterctlBinaryPath := downloadToTmpFile(ctx, initClusterctlBinaryURL) defer os.Remove(clusterctlBinaryPath) // clean up err := os.Chmod(clusterctlBinaryPath, 0744) //nolint:gosec @@ -203,17 +228,6 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Initializing the workload cluster with older versions of providers") - // NOTE: by default we are considering all the providers, no matter of the contract. - // However, given that we want to test both v1alpha3 --> v1beta1 and v1alpha4 --> v1beta1, the INIT_WITH_PROVIDERS_CONTRACT - // variable can be used to select versions with a specific contract. - contract := "*" - if input.E2EConfig.HasVariable(initWithProvidersContract) { - contract = input.E2EConfig.GetVariable(initWithProvidersContract) - } - if input.InitWithProvidersContract != "" { - contract = input.InitWithProvidersContract - } - if input.PreInit != nil { By("Running Pre-init steps against the management cluster") input.PreInit(managementClusterProxy) @@ -223,10 +237,10 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg ClusterctlBinaryPath: clusterctlBinaryPath, // use older version of clusterctl to init the management cluster ClusterProxy: managementClusterProxy, ClusterctlConfigPath: input.ClusterctlConfigPath, - CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.ClusterAPIProviderName)[0], - BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.KubeadmBootstrapProviderName), - ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, config.KubeadmControlPlaneProviderName), - InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(contract, input.E2EConfig.InfrastructureProviders()...), + CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.ClusterAPIProviderName)[0], + BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmBootstrapProviderName), + ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, config.KubeadmControlPlaneProviderName), + InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(initContract, input.E2EConfig.InfrastructureProviders()...), LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) @@ -342,19 +356,34 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg // After upgrading we are sure the version is the latest version of the API, // so it is possible to use the standard helpers - testMachineDeployments := framework.GetMachineDeploymentsByCluster(ctx, framework.GetMachineDeploymentsByClusterInput{ - Lister: managementClusterProxy.GetClient(), - ClusterName: workLoadClusterName, - Namespace: testNamespace.Name, - }) - - framework.ScaleAndWaitMachineDeployment(ctx, framework.ScaleAndWaitMachineDeploymentInput{ - ClusterProxy: managementClusterProxy, - Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace.Name}}, - MachineDeployment: testMachineDeployments[0], - Replicas: 2, - WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + workloadCluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: managementClusterProxy.GetClient(), + Namespace: testNamespace.Name, + Name: workLoadClusterName, }) + if workloadCluster.Spec.Topology != nil { + // Cluster is using ClusterClass, scale up via topology. + framework.ScaleAndWaitMachineDeploymentTopology(ctx, framework.ScaleAndWaitMachineDeploymentTopologyInput{ + ClusterProxy: managementClusterProxy, + Cluster: workloadCluster, + Replicas: 2, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + } else { + // Cluster is not using ClusterClass, scale up via MachineDeployment. + testMachineDeployments := framework.GetMachineDeploymentsByCluster(ctx, framework.GetMachineDeploymentsByClusterInput{ + Lister: managementClusterProxy.GetClient(), + ClusterName: workLoadClusterName, + Namespace: testNamespace.Name, + }) + framework.ScaleAndWaitMachineDeployment(ctx, framework.ScaleAndWaitMachineDeploymentInput{ + ClusterProxy: managementClusterProxy, + Cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace.Name}}, + MachineDeployment: testMachineDeployments[0], + Replicas: 2, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + } By("THE UPGRADED MANAGEMENT CLUSTER WORKS!") diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 77e163a439ba..51e78eabaadf 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -34,3 +34,19 @@ var _ = Describe("When testing clusterctl upgrades [clusterctl-Upgrade]", func() } }) }) + +var _ = Describe("When testing clusterctl upgrades using ClusterClass", func() { + ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { + return ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/clusterctl-{OS}-{ARCH}", + InitWithProvidersContract: "v1beta1", + InitWithKubernetesVersion: "v1.25.0", + WorkloadFlavor: "topology", + } + }) +}) diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 1049a569d913..cea9a81d1210 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -49,8 +49,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/core-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/core-components.yaml" type: "url" contract: v1beta1 replacements: @@ -87,8 +87,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/bootstrap-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/bootstrap-components.yaml" type: "url" contract: v1beta1 replacements: @@ -125,8 +125,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/control-plane-components.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/control-plane-components.yaml" type: "url" contract: v1beta1 replacements: @@ -165,8 +165,8 @@ providers: files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template.yaml" - - name: v1.2.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/infrastructure-components-development.yaml" + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/infrastructure-components-development.yaml" type: "url" contract: v1beta1 replacements: @@ -174,7 +174,9 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/cluster-template-topology.yaml" + - sourcePath: "../data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml" - name: v1.3.99 # next; use manifest from source files value: ../../../test/infrastructure/docker/config/default replacements: diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml new file mode 100644 index 000000000000..fa8720f9833f --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-kcp.yaml @@ -0,0 +1,95 @@ +--- +# DockerCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: '${CLUSTER_NAME}' +spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +# Cluster object with +# - Reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: '${CLUSTER_NAME}' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +# DockerMachineTemplate object referenced by the KubeadmControlPlane object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +# KubeadmControlPlane referenced by the Cluster object with +# - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" + labels: + kcp-adoption.step2: "" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml new file mode 100644 index 000000000000..26fe13f880bf --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/cluster-with-topology.yaml @@ -0,0 +1,33 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: default + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + topology: + class: "quick-start" + version: "${KUBERNETES_VERSION}" + controlPlane: + metadata: {} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + workers: + machineDeployments: + - class: "default-worker" + name: "md-0" + replicas: ${WORKER_MACHINE_COUNT} + failureDomain: fd4 + variables: + # We set an empty value to use the default tag kubeadm init is using. + - name: etcdImageTag + value: "" + # We set an empty value to use the default tag kubeadm init is using. + - name: coreDNSImageTag + value: "" diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml new file mode 100644 index 000000000000..b1b61237dc62 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/crs.yaml @@ -0,0 +1,24 @@ +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +binaryData: +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml b/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml new file mode 100644 index 000000000000..11abbc29248a --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/bases/md.yaml @@ -0,0 +1,52 @@ +--- +# DockerMachineTemplate referenced by the MachineDeployment and with +# - extraMounts for the docker sock, thus allowing self-hosting test +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +# KubeadmConfigTemplate referenced by the MachineDeployment +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +# MachineDeployment object +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml new file mode 100644 index 000000000000..44c449548234 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/cluster-template-topology/kustomization.yaml @@ -0,0 +1,3 @@ +resources: + - ../bases/cluster-with-topology.yaml + - ../bases/crs.yaml diff --git a/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml new file mode 100644 index 000000000000..adb5919cec6f --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/crs.yaml + diff --git a/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml new file mode 100644 index 000000000000..b5eac232b244 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.2/clusterclass-quick-start.yaml @@ -0,0 +1,290 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quick-start +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quick-start-control-plane + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quick-start-control-plane + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: e2e.remediation.condition + status: "False" + timeout: 20s + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: quick-start-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: quick-start-default-worker-machinetemplate + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: e2e.remediation.condition + status: "False" + timeout: 20s + variables: + - name: lbImageRepository + required: true + schema: + openAPIV3Schema: + type: string + default: kindest + - name: etcdImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "3.5.3-0" + description: "etcdImageTag sets the tag for the etcd image." + - name: coreDNSImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "v1.8.5" + description: "coreDNSImageTag sets the tag for the coreDNS image." + - name: kubeadmControlPlaneMaxSurge + required: false + schema: + openAPIV3Schema: + type: string + default: "" + example: "0" + description: "kubeadmControlPlaneMaxSurge is the maximum number of control planes that can be scheduled above or under the desired number of control plane machines." + patches: + - name: lbImageRepository + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/loadBalancer" + valueFrom: + template: | + imageRepository: {{ .lbImageRepository }} + # We have to pin the cgroupDriver to cgroupfs for Kubernetes < v1.24 because kind does not support systemd for those versions, but kubeadm >= 1.21 defaults to systemd. + - name: cgroupDriver-controlPlane + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.controlPlane.version }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: cgroupDriver-machineDeployment + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.machineDeployment.version }}' + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: etcdImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/etcd" + valueFrom: + template: | + local: + imageTag: {{ .etcdImageTag }} + - name: coreDNSImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/dns" + valueFrom: + template: | + imageTag: {{ .coreDNSImageTag }} + - name: customImage + description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} + - name: kubeadmControlPlaneMaxSurge + description: "Sets the maxSurge value used for rolloutStrategy in the KubeadmControlPlane." + enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/rolloutStrategy/rollingUpdate/maxSurge + valueFrom: + template: "{{ .kubeadmControlPlaneMaxSurge }}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: quick-start-cluster +spec: + template: + spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quick-start-control-plane +spec: + template: + spec: + machineTemplate: + nodeDrainTimeout: 1s + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: { enable-hostpath-provisioner: 'true' } + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-control-plane +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-default-worker-machinetemplate +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 345f4bc2851d..ea803b37e15e 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -421,3 +421,78 @@ func ScaleAndWaitMachineDeployment(ctx context.Context, input ScaleAndWaitMachin return nodeRefCount, nil }, input.WaitForMachineDeployments...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(input.MachineDeployment), *input.MachineDeployment.Spec.Replicas) } + +// ScaleAndWaitMachineDeploymentTopologyInput is the input for ScaleAndWaitMachineDeployment. +type ScaleAndWaitMachineDeploymentTopologyInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + Replicas int32 + WaitForMachineDeployments []interface{} +} + +// ScaleAndWaitMachineDeploymentTopology scales MachineDeployment topology and waits until all machines have node ref and equal to Replicas. +func ScaleAndWaitMachineDeploymentTopology(ctx context.Context, input ScaleAndWaitMachineDeploymentTopologyInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for ScaleAndWaitMachineDeployment") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ScaleAndWaitMachineDeployment") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ScaleAndWaitMachineDeployment") + Expect(input.Cluster.Spec.Topology.Workers).ToNot(BeNil(), "Invalid argument. input.Cluster must have MachineDeployment topologies") + Expect(len(input.Cluster.Spec.Topology.Workers.MachineDeployments) >= 1).To(BeTrue(), "Invalid argument. input.Cluster must have at least one MachineDeployment topology") + + mdTopology := input.Cluster.Spec.Topology.Workers.MachineDeployments[0] + log.Logf("Scaling machine deployment topology %s from %d to %d replicas", mdTopology.Name, *mdTopology.Replicas, input.Replicas) + patchHelper, err := patch.NewHelper(input.Cluster, input.ClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + mdTopology.Replicas = pointer.Int32Ptr(input.Replicas) + input.Cluster.Spec.Topology.Workers.MachineDeployments[0] = mdTopology + Eventually(func() error { + return patchHelper.Patch(ctx, input.Cluster) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine deployment topology %s", mdTopology.Name) + + log.Logf("Waiting for correct number of replicas to exist") + deploymentList := &clusterv1.MachineDeploymentList{} + Eventually(func() error { + return input.ClusterProxy.GetClient().List(ctx, deploymentList, + client.InNamespace(input.Cluster.Namespace), + client.MatchingLabels{ + clusterv1.ClusterLabelName: input.Cluster.Name, + clusterv1.ClusterTopologyMachineDeploymentLabelName: mdTopology.Name, + }, + ) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineDeployments object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) + + Expect(deploymentList.Items).To(HaveLen(1)) + md := deploymentList.Items[0] + + Eventually(func() (int, error) { + selectorMap, err := metav1.LabelSelectorAsMap(&md.Spec.Selector) + if err != nil { + return -1, err + } + ms := &clusterv1.MachineSetList{} + if err := input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return -1, err + } + if len(ms.Items) == 0 { + return -1, errors.New("no machinesets were found") + } + machineSet := ms.Items[0] + selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + if err != nil { + return -1, err + } + machines := &clusterv1.MachineList{} + if err := input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return -1, err + } + nodeRefCount := 0 + for _, machine := range machines.Items { + if machine.Status.NodeRef != nil { + nodeRefCount++ + } + } + if len(machines.Items) != nodeRefCount { + return -1, errors.New("Machine count does not match existing nodes count") + } + return nodeRefCount, nil + }, input.WaitForMachineDeployments...).Should(Equal(int(*md.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(&md), *md.Spec.Replicas) +} diff --git a/test/go.mod b/test/go.mod index fe4e13fc572c..65a65864b888 100644 --- a/test/go.mod +++ b/test/go.mod @@ -10,6 +10,7 @@ require ( github.com/docker/go-connections v0.4.0 github.com/flatcar/ignition v0.36.2 github.com/go-logr/logr v1.2.3 + github.com/gobuffalo/flect v0.2.5 github.com/onsi/ginkgo/v2 v2.1.4 github.com/onsi/gomega v1.20.0 github.com/pkg/errors v0.9.1 @@ -59,7 +60,6 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/gobuffalo/flect v0.2.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect