diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 97e5f2df73f0..c8feb9ea2dc9 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -30,6 +30,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # tag=v6.0.1 with: - version: v1.57.2 + version: v1.59.0 args: --out-format=colored-line-number working-directory: ${{matrix.working-directory}} diff --git a/.golangci.yml b/.golangci.yml index 85720606a6db..38242b5312cf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,12 +19,12 @@ linters: - bidichk - bodyclose - containedctx + - copyloopvar - dogsled - dupword - durationcheck - errcheck - errchkjson - - exportloopref - gci - ginkgolinter - goconst @@ -38,6 +38,7 @@ linters: - govet - importas - ineffassign + - intrange - loggercheck - misspell - nakedret diff --git a/bootstrap/kubeadm/internal/cloudinit/controlplane_test.go b/bootstrap/kubeadm/internal/cloudinit/controlplane_test.go index 6706538c8b01..02f68d9e43ae 100644 --- a/bootstrap/kubeadm/internal/cloudinit/controlplane_test.go +++ b/bootstrap/kubeadm/internal/cloudinit/controlplane_test.go @@ -44,7 +44,6 @@ func TestTemplateYAMLIndent(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index d3147305f4a7..a2f02c3cc1a0 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -58,7 +58,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { objs := []client.Object{cluster} machineObjs := []client.Object{} var expectedConfigName string - for i := 0; i < 3; i++ { + for i := range 3 { configName := fmt.Sprintf("my-config-%d", i) m := builder.Machine(metav1.NamespaceDefault, fmt.Sprintf("my-machine-%d", i)). WithVersion("v1.19.1"). @@ -80,7 +80,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { Client: fakeClient, SecretCachingClient: fakeClient, } - for i := 0; i < 3; i++ { + for i := range 3 { o := machineObjs[i] configs := reconciler.MachineToBootstrapMapFunc(ctx, o) if i == 1 { @@ -675,7 +675,6 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe } for _, rt := range useCases { - rt := rt // pin! t.Run(rt.name, func(t *testing.T) { g := NewWithT(t) @@ -753,7 +752,6 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { } for _, rt := range useCases { - rt := rt // pin! t.Run(rt.name, func(t *testing.T) { g := NewWithT(t) @@ -1819,7 +1817,7 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "my-cluster").Build() objs := []client.Object{cluster} expectedNames := []string{} - for i := 0; i < 3; i++ { + for i := range 3 { configName := fmt.Sprintf("my-config-%d", i) m := builder.Machine(metav1.NamespaceDefault, fmt.Sprintf("my-machine-%d", i)). WithVersion("v1.19.1"). diff --git a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go index 23ae5c53d7f7..906df32a39af 100644 --- a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go +++ b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go @@ -561,8 +561,6 @@ func TestRender(t *testing.T) { } for _, tt := range tc { - tt := tt - t.Run(tt.desc, func(t *testing.T) { t.Parallel() diff --git a/bootstrap/kubeadm/internal/ignition/ignition_test.go b/bootstrap/kubeadm/internal/ignition/ignition_test.go index 4db5b245a25d..f57c79da2477 100644 --- a/bootstrap/kubeadm/internal/ignition/ignition_test.go +++ b/bootstrap/kubeadm/internal/ignition/ignition_test.go @@ -42,8 +42,6 @@ func Test_NewNode(t *testing.T) { } for name, input := range cases { - input := input - t.Run(name, func(t *testing.T) { t.Parallel() @@ -160,8 +158,6 @@ func Test_NewJoinControlPlane(t *testing.T) { } for name, input := range cases { - input := input - t.Run(name, func(t *testing.T) { t.Parallel() @@ -278,8 +274,6 @@ func Test_NewInitControlPlane(t *testing.T) { } for name, input := range cases { - input := input - t.Run(name, func(t *testing.T) { t.Parallel() diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go index 9d2feeff6e3a..515c05b5c461 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go @@ -99,7 +99,6 @@ func TestControlPlaneInitMutex_Lock(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { gs := NewWithT(t) @@ -259,7 +258,6 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { gs := NewWithT(t) diff --git a/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go index 215e6a6cde84..b4592c8e10fe 100644 --- a/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go @@ -83,8 +83,6 @@ func TestKubeadmConfigTemplateValidation(t *testing.T) { } for name, tt := range cases { - tt := tt - webhook := &KubeadmConfigTemplate{} t.Run(name, func(t *testing.T) { diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index bf5a7be3870b..a33cd8294bce 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -287,7 +287,6 @@ func (p *providerComponents) ValidateNoObjectsExist(ctx context.Context, provide // Filter the resources according to the delete options crsHavingObjects := []string{} for _, crd := range customResources.Items { - crd := crd storageVersion, err := storageVersionForCRD(&crd) if err != nil { return err diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index f0b6178d64f0..a6fe29017bfa 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -361,7 +361,7 @@ func (o *objectMover) move(ctx context.Context, graph *objectGraph, toProxy Prox // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Creating objects in the target cluster") - for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { + for groupIndex := range len(moveSequence.groups) { if err := o.createGroup(ctx, moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil { return err } @@ -419,7 +419,7 @@ func (o *objectMover) toDirectory(ctx context.Context, graph *objectGraph, direc // Save all objects group by group log.Info(fmt.Sprintf("Saving files to %s", directory)) - for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { + for groupIndex := range len(moveSequence.groups) { if err := o.backupGroup(ctx, moveSequence.getGroup(groupIndex), directory); err != nil { return err } @@ -459,7 +459,7 @@ func (o *objectMover) fromDirectory(ctx context.Context, graph *objectGraph, toP // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Restoring objects into the target cluster") - for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { + for groupIndex := range len(moveSequence.groups) { if err := o.restoreGroup(ctx, moveSequence.getGroup(groupIndex), toProxy); err != nil { return err } diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 7e188564524b..11ec8f8f1891 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -474,7 +474,6 @@ func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clust // Scale down provider Deployments. for _, deployment := range deploymentList.Items { - deployment := deployment log.V(5).Info("Scaling down", "Deployment", klog.KObj(&deployment)) if err := scaleDownDeployment(ctx, cs, deployment); err != nil { return err diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 786a06128549..e09043fdff40 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -349,7 +349,6 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) { } versions := []string{} for _, r := range allReleases { - r := r // pin if r.TagName == nil { continue } diff --git a/cmd/clusterctl/client/tree/util.go b/cmd/clusterctl/client/tree/util.go index eba344044a9b..d2dec36ea170 100644 --- a/cmd/clusterctl/client/tree/util.go +++ b/cmd/clusterctl/client/tree/util.go @@ -45,7 +45,6 @@ func GetOtherConditions(obj client.Object) []*clusterv1.Condition { } var conditions []*clusterv1.Condition for _, c := range getter.GetConditions() { - c := c if c.Type != clusterv1.ReadyCondition { conditions = append(conditions, &c) } diff --git a/cmd/clusterctl/cmd/version_checker.go b/cmd/clusterctl/cmd/version_checker.go index 7876e4e33a6d..48faf3fa5aa5 100644 --- a/cmd/clusterctl/cmd/version_checker.go +++ b/cmd/clusterctl/cmd/version_checker.go @@ -236,7 +236,7 @@ func writeStateFile(path string, vs *VersionState) error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } return os.WriteFile(path, vsb, 0600) diff --git a/controllers/remote/keyedmutex_test.go b/controllers/remote/keyedmutex_test.go index 031fe786fc48..8d23f464e3b7 100644 --- a/controllers/remote/keyedmutex_test.go +++ b/controllers/remote/keyedmutex_test.go @@ -59,7 +59,7 @@ func TestKeyedMutex(t *testing.T) { // Run this twice to ensure Clusters can be locked again // after they have been unlocked. - for i := 0; i < 2; i++ { + for range 2 { // Lock all Clusters (should work). for _, key := range clusters { g.Expect(km.TryLock(key)).To(BeTrue()) diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 68cc9cbefccc..5a3c8971f632 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -496,7 +496,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Workload: fakeWorkloadCluster{}, } objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -564,7 +564,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Workload: fakeWorkloadCluster{}, } objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -679,7 +679,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Workload: fakeWorkloadCluster{}, } objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -2090,7 +2090,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} machines := collections.New() - for i := 0; i < 3; i++ { + for i := range 3 { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) machines.Insert(m) @@ -2154,7 +2154,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachine.DeepCopy()} machines := collections.New() - for i := 0; i < 3; i++ { + for i := range 3 { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) machines.Insert(m) @@ -2212,7 +2212,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()} machines := collections.New() - for i := 0; i < 3; i++ { + for i := range 3 { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) machines.Insert(m) diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 48766c5d7a43..3e9ae0095a5c 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -137,7 +137,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { Workload: fakeWorkloadCluster{}, } - for i := 0; i < 2; i++ { + for i := range 2 { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) setMachineHealthy(m) fmc.Machines.Insert(m) @@ -197,7 +197,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { cluster.Status.InfrastructureReady = true beforeMachines := collections.New() - for i := 0; i < 2; i++ { + for i := range 2 { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster.DeepCopy(), kcp.DeepCopy(), true) beforeMachines.Insert(m) } diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 833d0f717e42..ab5d5a2e2a49 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -134,7 +134,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, false) objs = append(objs, n, m) @@ -207,7 +207,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), kubeadmConfigMap()} machines := map[string]*clusterv1.Machine{} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, true) objs = append(objs, n, m) @@ -287,7 +287,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing g.Expect(err).ToNot(HaveOccurred()) machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} - for i := 0; i < 4; i++ { + for i := range 4 { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, false) machines[m.Name] = m @@ -369,7 +369,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} // Create the desired number of machines - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, false) machines[m.Name] = m diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index 998fb39ada25..b2f60094e844 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -197,7 +197,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { }, } objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} - for i := 0; i < 3; i++ { + for i := range 3 { name := fmt.Sprintf("test-%d", i) m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index c04a061ba297..956627036a09 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -516,7 +516,7 @@ func (r *MachinePoolReconciler) waitForMachineCreation(ctx context.Context, mach // The polling is against a local memory cache. const waitForCacheUpdateInterval = 100 * time.Millisecond - for i := 0; i < len(machineList); i++ { + for i := range len(machineList) { machine := machineList[i] pollErr := wait.PollUntilContextTimeout(ctx, waitForCacheUpdateInterval, waitForCacheUpdateTimeout, true, func(ctx context.Context) (bool, error) { key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index 19b98c7f75b6..b416c818c874 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -1903,7 +1903,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { func getInfraMachines(replicas int, mpName, clusterName, nsName string) []unstructured.Unstructured { infraMachines := make([]unstructured.Unstructured, replicas) - for i := 0; i < replicas; i++ { + for i := range replicas { infraMachines[i] = unstructured.Unstructured{ Object: map[string]interface{}{ "kind": builder.GenericInfrastructureMachineKind, @@ -1924,7 +1924,7 @@ func getInfraMachines(replicas int, mpName, clusterName, nsName string) []unstru func getMachines(replicas int, mpName, clusterName, nsName string) []clusterv1.Machine { machines := make([]clusterv1.Machine, replicas) - for i := 0; i < replicas; i++ { + for i := range replicas { machines[i] = clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-machine-%d", mpName, i), diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 2da2cd26b435..d3925ef355b2 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -632,7 +632,6 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope var machineDeploymentClass *clusterv1.MachineDeploymentClass for _, mdClass := range s.Blueprint.ClusterClass.Spec.Workers.MachineDeployments { - mdClass := mdClass if mdClass.Class == className { machineDeploymentClass = &mdClass break @@ -958,7 +957,6 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin var machinePoolClass *clusterv1.MachinePoolClass for _, mpClass := range s.Blueprint.ClusterClass.Spec.Workers.MachinePools { - mpClass := mpClass if mpClass.Class == className { machinePoolClass = &mpClass break diff --git a/exp/util/util_test.go b/exp/util/util_test.go index bde9a221c5d1..cf5c2f648ecb 100644 --- a/exp/util/util_test.go +++ b/exp/util/util_test.go @@ -130,7 +130,6 @@ func TestGetMachinePoolByLabels(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(*testing.T) { clientFake := fake.NewClientBuilder(). WithScheme(fakeScheme). diff --git a/hack/tools/prowjob-gen/generator.go b/hack/tools/prowjob-gen/generator.go index f923b903fede..f4a1a01b3bb9 100644 --- a/hack/tools/prowjob-gen/generator.go +++ b/hack/tools/prowjob-gen/generator.go @@ -223,7 +223,7 @@ func mustHas(needle interface{}, haystack interface{}) (bool, error) { l2 := reflect.ValueOf(haystack) var item interface{} l := l2.Len() - for i := 0; i < l; i++ { + for i := range l { item = l2.Index(i).Interface() if reflect.DeepEqual(needle, item) { return true, nil diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index b54f128cb41e..a82768f2abee 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -388,7 +388,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { if err := env.List(ctx, machineSets, msListOpts...); err != nil { return false } - for i := 0; i < len(machineSets.Items); i++ { + for range len(machineSets.Items) { ms := machineSets.Items[0] if !metav1.IsControlledBy(&ms, deployment) || metav1.GetControllerOf(&ms).Kind != "MachineDeployment" { return false @@ -414,7 +414,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} g.Expect(env.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) - for i := 0; i < len(foundMachines.Items); i++ { + for i := range len(foundMachines.Items) { m := foundMachines.Items[i] // Skip over deleted Machines if !m.DeletionTimestamp.IsZero() { @@ -442,7 +442,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} g.Expect(env.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) - for i := 0; i < len(foundMachines.Items); i++ { + for i := range len(foundMachines.Items) { m := foundMachines.Items[i] if !m.DeletionTimestamp.IsZero() { continue diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index a64fc1a51761..ba513922f2c6 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -630,7 +630,7 @@ func (r *Reconciler) cleanupDeployment(ctx context.Context, oldMSs []*clusterv1. sort.Sort(mdutil.MachineSetsByCreationTimestamp(cleanableMSes)) log.V(4).Info("Looking to cleanup old machine sets for deployment") - for i := int32(0); i < diff; i++ { + for i := range diff { ms := cleanableMSes[i] if ms.Spec.Replicas == nil { return errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 55d13ab5f931..4645f43df91e 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -2492,13 +2492,11 @@ func createMachinesWithNodes( op(o) } - var ( - nodes []*corev1.Node - machines []*clusterv1.Machine - infraMachines []*unstructured.Unstructured - ) + nodes := make([]*corev1.Node, 0, o.count) + machines := make([]*clusterv1.Machine, 0, o.count) + infraMachines := make([]*unstructured.Unstructured, 0, o.count) - for i := 0; i < o.count; i++ { + for i := range o.count { machine := newRunningMachine(c, o.labels) if i == 0 && o.firstMachineAsControlPlane { if machine.Labels == nil { diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 88fc35257ba8..dc4b1c1a1a26 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -461,7 +461,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, cluster *clusterv1.Cluste errs []error ) - for i := 0; i < diff; i++ { + for i := range diff { // Create a new logger so the global logger is not modified. log := log machine := r.computeDesiredMachine(ms, nil) @@ -729,7 +729,7 @@ func (r *Reconciler) adoptOrphan(ctx context.Context, machineSet *clusterv1.Mach func (r *Reconciler) waitForMachineCreation(ctx context.Context, machineList []*clusterv1.Machine) error { log := ctrl.LoggerFrom(ctx) - for i := 0; i < len(machineList); i++ { + for i := range len(machineList) { machine := machineList[i] pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) { key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} @@ -755,7 +755,7 @@ func (r *Reconciler) waitForMachineCreation(ctx context.Context, machineList []* func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []*clusterv1.Machine) error { log := ctrl.LoggerFrom(ctx) - for i := 0; i < len(machineList); i++ { + for i := range len(machineList) { machine := machineList[i] pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) { m := &clusterv1.Machine{} diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index edc8591d0e16..b490632e4c13 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -350,7 +350,7 @@ func TestMachineSetReconciler(t *testing.T) { // Verify that each machine has the desired kubelet version, // create a fake node in Ready state, update NodeRef, and wait for a reconciliation request. - for i := 0; i < len(machines.Items); i++ { + for i := range len(machines.Items) { m := machines.Items[i] if !m.DeletionTimestamp.IsZero() { // Skip deleted Machines diff --git a/internal/controllers/machineset/machineset_delete_policy_test.go b/internal/controllers/machineset/machineset_delete_policy_test.go index 2dee46ac37e6..9b4d8693909a 100644 --- a/internal/controllers/machineset/machineset_delete_policy_test.go +++ b/internal/controllers/machineset/machineset_delete_policy_test.go @@ -557,7 +557,7 @@ func TestMachineOldestDelete(t *testing.T) { func TestMachineDeleteMultipleSamePriority(t *testing.T) { machines := make([]*clusterv1.Machine, 0, 10) // All of these machines will have the same delete priority because they all have the "must delete" annotation. - for i := 0; i < 10; i++ { + for i := range 10 { machines = append(machines, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("machine-%d", i), Annotations: map[string]string{clusterv1.DeleteMachineAnnotation: "true"}}, }) diff --git a/internal/util/compare/equal_test.go b/internal/util/compare/equal_test.go index 845b2a12b9fc..7d5e72b615f1 100644 --- a/internal/util/compare/equal_test.go +++ b/internal/util/compare/equal_test.go @@ -77,7 +77,6 @@ consider using a custom Comparer; if you control the implementation of type, you } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index 050bc4071a39..34568da6eef3 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -515,7 +515,6 @@ func validateClusterClassMetadata(clusterClass *clusterv1.ClusterClass) field.Er func validateAutoscalerAnnotationsForClusterClass(clusters []clusterv1.Cluster, newClusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList for _, c := range clusters { - c := c allErrs = append(allErrs, validateAutoscalerAnnotationsForCluster(&c, newClusterClass)...) } return allErrs diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index e9c1c3238429..d0bd47a2c773 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -1135,7 +1135,6 @@ func getClusterObjects(ctx context.Context, g Gomega, clusterProxy framework.Clu g.Expect(err).ToNot(HaveOccurred()) g.Expect(controlPlaneMachineList.Items).To(HaveLen(int(*replicas))) for _, machine := range controlPlaneMachineList.Items { - machine := machine res.ControlPlaneMachines = append(res.ControlPlaneMachines, &machine) addMachineObjects(ctx, mgmtClient, workloadClient, g, res, cluster, &machine) } @@ -1172,7 +1171,6 @@ func getClusterObjects(ctx context.Context, g Gomega, clusterProxy framework.Clu // Check all MachineDeployment machines already exist. g.Expect(machines).To(HaveLen(int(*md.Spec.Replicas))) for _, machine := range machines { - machine := machine res.MachinesByMachineSet[machine.Labels[clusterv1.MachineSetNameLabel]] = append( res.MachinesByMachineSet[machine.Labels[clusterv1.MachineSetNameLabel]], &machine) addMachineObjects(ctx, mgmtClient, workloadClient, g, res, cluster, &machine) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 637a9a916a21..e5e32ae7aa12 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -891,7 +891,6 @@ func calculateExpectedMachinePoolMachineCount(ctx context.Context, c client.Clie client.MatchingLabels{clusterv1.ClusterNameLabel: workloadClusterName}, ); err == nil { for _, mp := range machinePoolList.Items { - mp := mp infraMachinePool, err := external.Get(ctx, c, &mp.Spec.Template.Spec.InfrastructureRef, workloadClusterNamespace) if err != nil { return 0, err @@ -1039,7 +1038,6 @@ func validateMachineRollout(preMachineList, postMachineList *unstructured.Unstru if len(newMachines) > 0 { log.Logf("Detected new Machines") for _, obj := range postMachineList.Items { - obj := obj if newMachines.Has(obj.GetName()) { resourceYAML, err := yaml.Marshal(obj) Expect(err).ToNot(HaveOccurred()) @@ -1051,7 +1049,6 @@ func validateMachineRollout(preMachineList, postMachineList *unstructured.Unstru if len(deletedMachines) > 0 { log.Logf("Detected deleted Machines") for _, obj := range preMachineList.Items { - obj := obj if deletedMachines.Has(obj.GetName()) { resourceYAML, err := yaml.Marshal(obj) Expect(err).ToNot(HaveOccurred()) diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index 44b9fe1e2a07..c489fe06ebb5 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -194,7 +194,6 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu )).To(Succeed()) for _, m := range machines.Items { - m := m Expect(&m).To(HaveControllerRef(framework.ObjectToKind(controlPlane), controlPlane)) // TODO there is a missing unit test here Expect(m.CreationTimestamp.Time).To(BeTemporally("<", controlPlane.CreationTimestamp.Time), @@ -230,7 +229,6 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu } for _, s := range secrets.Items { - s := s // We don't check the data, and removing it from the object makes assertions much easier to read s.Data = nil diff --git a/test/e2e/scale.go b/test/e2e/scale.go index 7e0d3b9d6983..8a2cdd0bb77f 100644 --- a/test/e2e/scale.go +++ b/test/e2e/scale.go @@ -481,7 +481,7 @@ func workConcurrentlyAndWait(ctx context.Context, input workConcurrentlyAndWaitI defer cancel() // Start the workers. - for i := int64(0); i < input.Concurrency; i++ { + for range input.Concurrency { wg.Add(1) go input.WorkerFunc(ctx, inputChan, resultChan, wg) } diff --git a/test/framework/docker_logcollector.go b/test/framework/docker_logcollector.go index e4e132bfb441..6655685cb2a2 100644 --- a/test/framework/docker_logcollector.go +++ b/test/framework/docker_logcollector.go @@ -141,7 +141,7 @@ func (k DockerLogCollector) collectLogsFromNode(ctx context.Context, outputPath return errors.Wrapf(err, execErr) } - err = os.MkdirAll(outputDir, os.ModePerm) + err = os.MkdirAll(outputDir, 0750) if err != nil { return err } @@ -182,7 +182,7 @@ func (k DockerLogCollector) collectLogsFromNode(ctx context.Context, outputPath // even if the parent directory doesn't exist // in which case it will be created with ModePerm. func fileOnHost(path string) (*os.File, error) { - if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return nil, err } return os.Create(path) //nolint:gosec // No security issue: path is safe. diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go index 6172599496c2..2afae7393738 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go @@ -62,7 +62,7 @@ func (r *DockerMachinePoolReconciler) reconcileDockerContainers(ctx context.Cont matchingMachineCount := len(machinesMatchingInfrastructureSpec(ctx, machines, machinePool, dockerMachinePool)) numToCreate := int(*machinePool.Spec.Replicas) - matchingMachineCount - for i := 0; i < numToCreate; i++ { + for range numToCreate { log.V(2).Info("Creating a new Docker container for machinePool", "MachinePool", klog.KObj(machinePool)) name := fmt.Sprintf("worker-%s", util.RandomString(6)) if err := createDockerContainer(ctx, name, cluster, machinePool, dockerMachinePool); err != nil { diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go index 00570fc3c568..dd5f053e0499 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go @@ -75,7 +75,7 @@ func Test_cache_scale(t *testing.T) { return fmt.Sprintf("machine-%d", j) } - for i := 0; i < resourceGroups; i++ { + for i := range resourceGroups { resourceGroup := fmt.Sprintf("resourceGroup-%d", i) c.AddResourceGroup(resourceGroup) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/gc.go b/test/infrastructure/inmemory/pkg/runtime/cache/gc.go index 0a7c5b1c51de..adf186638a02 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/gc.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/gc.go @@ -52,7 +52,7 @@ func (c *cache) startGarbageCollector(ctx context.Context) error { log.Info("Starting garbage collector workers", "count", c.garbageCollectorConcurrency) wg := &sync.WaitGroup{} wg.Add(c.garbageCollectorConcurrency) - for i := 0; i < c.garbageCollectorConcurrency; i++ { + for range c.garbageCollectorConcurrency { go func() { atomic.AddInt64(&workers, 1) defer wg.Done() diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/sync.go b/test/infrastructure/inmemory/pkg/runtime/cache/sync.go index 5057e599d070..cd5b78ced2ad 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/sync.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/sync.go @@ -69,7 +69,7 @@ func (c *cache) startSyncer(ctx context.Context) error { log.Info("Starting sync workers", "count", c.syncConcurrency) wg := &sync.WaitGroup{} wg.Add(c.syncConcurrency) - for i := 0; i < c.syncConcurrency; i++ { + for range c.syncConcurrency { go func() { atomic.AddInt64(&workers, 1) defer wg.Done() diff --git a/util/annotations/helpers_test.go b/util/annotations/helpers_test.go index b0991d6e192e..3886af8a6244 100644 --- a/util/annotations/helpers_test.go +++ b/util/annotations/helpers_test.go @@ -265,7 +265,6 @@ func TestHasTruthyAnnotationValue(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) ret := hasTruthyAnnotationValue(tt.obj, tt.annotationKey) diff --git a/util/conversion/conversion.go b/util/conversion/conversion.go index af1dff3c4a34..561cf69d102b 100644 --- a/util/conversion/conversion.go +++ b/util/conversion/conversion.go @@ -190,7 +190,7 @@ func FuzzTestFunc(input FuzzTestFuncInput) func(*testing.T) { g := gomega.NewWithT(t) fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...) - for i := 0; i < 10000; i++ { + for range 10000 { // Create the spoke and fuzz it spokeBefore := input.Spoke.DeepCopyObject().(conversion.Convertible) fuzzer.Fuzz(spokeBefore) @@ -221,7 +221,7 @@ func FuzzTestFunc(input FuzzTestFuncInput) func(*testing.T) { g := gomega.NewWithT(t) fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...) - for i := 0; i < 10000; i++ { + for range 10000 { // Create the hub and fuzz it hubBefore := input.Hub.DeepCopyObject().(conversion.Hub) fuzzer.Fuzz(hubBefore) diff --git a/util/labels/helpers_test.go b/util/labels/helpers_test.go index e2ff0bd0ebbc..6a5a48ea4edb 100644 --- a/util/labels/helpers_test.go +++ b/util/labels/helpers_test.go @@ -131,8 +131,7 @@ func TestIsMachinePoolOwned(t *testing.T) { }, } - for i := range tests { - tt := tests[i] + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) diff --git a/util/resource/resource_test.go b/util/resource/resource_test.go index 3a891e0f33e4..909ef5aa9123 100644 --- a/util/resource/resource_test.go +++ b/util/resource/resource_test.go @@ -74,7 +74,7 @@ func TestSortForCreateAllShuffle(t *testing.T) { resource.SetKind(kind) resources = append(resources, resource) } - for j := 0; j < 100; j++ { + for j := range 100 { // determinically shuffle resources rnd := rand.New(rand.NewSource(int64(j))) //nolint:gosec rnd.Shuffle(len(resources), func(i, j int) { diff --git a/util/util.go b/util/util.go index 1648cb3d1256..3892d81cdd55 100644 --- a/util/util.go +++ b/util/util.go @@ -348,7 +348,6 @@ func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerRefe // It matches the object based on the Group, Kind and Name. func IsOwnedByObject(obj metav1.Object, target client.Object) bool { for _, ref := range obj.GetOwnerReferences() { - ref := ref if refersTo(&ref, target) { return true }