Skip to content

Commit

Permalink
Update golangci-lint (1.44.0 -> 1.48.0) and Go version (1.17 -> 1.18)…
Browse files Browse the repository at this point in the history
… for lint workflow

Also fix new lint findings
  • Loading branch information
oscr committed Aug 18, 2022
1 parent 50d525e commit 043fa69
Show file tree
Hide file tree
Showing 35 changed files with 238 additions and 213 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.19
- name: golangci-lint
uses: golangci/golangci-lint-action@v3.2.0
with:
version: v1.44.0
version: v1.48.0
working-directory: ${{matrix.working-directory}}
1 change: 1 addition & 0 deletions cmd/clusterctl/api/v1alpha3/metadata_type_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha3

import (
Expand Down
12 changes: 6 additions & 6 deletions cmd/clusterctl/client/cluster/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,12 +206,12 @@ func (k *proxy) CheckClusterAvailable() error {
// This is done to avoid errors when listing resources of providers which have already been deleted/scaled down to 0 replicas/with
// malfunctioning webhooks.
// For example:
// * The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity.
// * The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred
// version is v1beta1)
// * If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources
// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted.
// * Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider.
// - The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity.
// - The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred
// version is v1beta1)
// - If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources
// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted.
// - Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider.
func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) {
cs, err := k.newClientSet()
if err != nil {
Expand Down
30 changes: 16 additions & 14 deletions cmd/clusterctl/client/cluster/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,9 +257,9 @@ func (t *topologyClient) validateInput(in *TopologyPlanInput) error {
}

// prepareInput does the following on the input objects:
// - Set the target namespace on the objects if not set (this operation is generally done by kubectl)
// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents
// the expected changes.
// - Set the target namespace on the objects if not set (this operation is generally done by kubectl)
// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents
// the expected changes.
func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error {
if err := t.setMissingNamespaces(in.TargetNamespace, in.Objs); err != nil {
return errors.Wrap(err, "failed to set missing namespaces")
Expand Down Expand Up @@ -297,18 +297,20 @@ func (t *topologyClient) setMissingNamespaces(currentNamespace string, objs []*u
}

// prepareClusters does the following operations on each Cluster in the input.
// - Check if the Cluster exists in the real apiserver.
// - If the Cluster exists in the real apiserver we merge the object from the
// server with the object from the input. This final object correctly represents the
// modified cluster object.
// Note: We are using a simple 2-way merge to calculate the final object in this function
// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly
// the same way as kubectl does.
// - Check if the Cluster exists in the real apiserver.
// - If the Cluster exists in the real apiserver we merge the object from the
// server with the object from the input. This final object correctly represents the
// modified cluster object.
// Note: We are using a simple 2-way merge to calculate the final object in this function
// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly
// the same way as kubectl does.
//
// *Important note*: We do this above operation because the topology reconciler in a
// real run takes as input a cluster object from the apiserver that has merged spec of
// the changes in the input and the one stored in the server. For example: the cluster
// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef
// but the merged object will have these fields set.
//
// real run takes as input a cluster object from the apiserver that has merged spec of
// the changes in the input and the one stored in the server. For example: the cluster
// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef
// but the merged object will have these fields set.
func (t *topologyClient) prepareClusters(ctx context.Context, clusters []*unstructured.Unstructured, apiReader client.Reader) error {
if apiReader == nil {
// If there is no backing server there is nothing more to do here.
Expand Down
12 changes: 6 additions & 6 deletions cmd/clusterctl/client/repository/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,12 +144,12 @@ func NewTemplate(input TemplateInput) (Template, error) {

// MergeTemplates merges the provided Templates into one Template.
// Notes on the merge operation:
// - The merge operation returns an error if all the templates do not have the same TargetNamespace.
// - The Variables of the resulting template is a union of all Variables in the templates.
// - The default value is picked from the first template that defines it.
// The defaults of the same variable in the subsequent templates will be ignored.
// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence)
// - The Objs of the final template will be a union of all the Objs in the templates.
// - The merge operation returns an error if all the templates do not have the same TargetNamespace.
// - The Variables of the resulting template is a union of all Variables in the templates.
// - The default value is picked from the first template that defines it.
// The defaults of the same variable in the subsequent templates will be ignored.
// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence)
// - The Objs of the final template will be a union of all the Objs in the templates.
func MergeTemplates(templates ...Template) (Template, error) {
templates = filterNilTemplates(templates...)
if len(templates) == 0 {
Expand Down
34 changes: 17 additions & 17 deletions cmd/clusterctl/client/tree/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,29 @@ understanding if there are problems and where.
The "at glance" view is based on the idea that we should avoid to overload the user with information, but instead
surface problems, if any; in practice:
- The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md.
As a consequence each object should have a Ready condition summarizing the object state.
- The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md.
As a consequence each object should have a Ready condition summarizing the object state.
- The view organizes objects in a hierarchical tree, however it is not required that the
tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status
e.g. secrets or templates.
- The view organizes objects in a hierarchical tree, however it is not required that the
tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status
e.g. secrets or templates.
- It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users,
e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.).
- It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users,
e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.).
- It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users,
e.g. adding a Workers object to group all the MachineDeployments.
- It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users,
e.g. adding a Workers object to group all the MachineDeployments.
- It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true
in a single node instead of listing each one of them.
- It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true
in a single node instead of listing each one of them.
- Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo"
(reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already
surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine
given that representing its state is redundant in this case.
- Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo"
(reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already
surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine
given that representing its state is redundant in this case.
- In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the
same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready
- In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the
same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready
The ObjectTree object defined implements all the above behaviors of the "at glance" visualization, by generating
a tree of Kubernetes objects; each object gets a set of annotation, reflecting its own visualization specific attributes,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package yamlprocessor

import (
Expand Down
10 changes: 5 additions & 5 deletions cmd/clusterctl/log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,11 +150,11 @@ func copySlice(in []interface{}) []interface{} {

// flatten returns a human readable/machine parsable text representing the LogEntry.
// Most notable difference with the klog implementation are:
// - The message is printed at the beginning of the line, without the Msg= variable name e.g.
// "Msg"="This is a message" --> This is a message
// - Variables name are not quoted, eg.
// This is a message "Var1"="value" --> This is a message Var1="value"
// - Variables are not sorted, thus allowing full control to the developer on the output.
// - The message is printed at the beginning of the line, without the Msg= variable name e.g.
// "Msg"="This is a message" --> This is a message
// - Variables name are not quoted, eg.
// This is a message "Var1"="value" --> This is a message Var1="value"
// - Variables are not sorted, thus allowing full control to the developer on the output.
func flatten(entry logEntry) (string, error) {
var msgValue string
var errorValue error
Expand Down
8 changes: 4 additions & 4 deletions controllers/noderefutil/providerid.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ type ProviderID struct {
}

/*
- must start with at least one non-colon
- followed by ://
- followed by any number of characters
- must end with a non-slash
- must start with at least one non-colon
- followed by ://
- followed by any number of characters
- must end with a non-slash.
*/
var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$")

Expand Down
6 changes: 3 additions & 3 deletions controllers/remote/cluster_cache_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func TestClusterCacheReconciler(t *testing.T) {

// createAndWatchCluster creates a new cluster and ensures the clusterCacheTracker has a clusterAccessor for it
createAndWatchCluster := func(clusterName string, testNamespace *corev1.Namespace, g *WithT) {
t.Log(fmt.Sprintf("Creating a cluster %q", clusterName))
t.Logf("Creating a cluster %q", clusterName)
testCluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Expand Down Expand Up @@ -136,7 +136,7 @@ func TestClusterCacheReconciler(t *testing.T) {
defer teardown(t, g, testNamespace)

for _, clusterName := range []string{"cluster-1", "cluster-2", "cluster-3"} {
t.Log(fmt.Sprintf("Deleting cluster %q", clusterName))
t.Logf("Deleting cluster %q", clusterName)
obj := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace.Name,
Expand All @@ -145,7 +145,7 @@ func TestClusterCacheReconciler(t *testing.T) {
}
g.Expect(k8sClient.Delete(ctx, obj)).To(Succeed())

t.Log(fmt.Sprintf("Checking cluster %q's clusterAccessor is removed", clusterName))
t.Logf("Checking cluster %q's clusterAccessor is removed", clusterName)
g.Eventually(func() bool { return cct.clusterAccessorExists(util.ObjectKey(obj)) }, timeout).Should(BeFalse())
}
})
Expand Down
14 changes: 7 additions & 7 deletions controlplane/kubeadm/internal/controllers/remediation.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,13 +190,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C
//
// The answer mostly depend on the existence of other failing members on top of the one being deleted, and according
// to the etcd fault tolerance specification (see https://etcd.io/docs/v3.3/faq/#what-is-failure-tolerance):
// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target
// cluster size after deletion is 2, fault tolerance 0)
// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target
// cluster size after deletion is 4, fault tolerance 1)
// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target
// cluster size after deletion is 6, fault tolerance 2)
// - etc.
// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target
// cluster size after deletion is 2, fault tolerance 0)
// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target
// cluster size after deletion is 4, fault tolerance 1)
// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target
// cluster size after deletion is 6, fault tolerance 2)
// - etc.
//
// NOTE: this func assumes the list of members in sync with the list of machines/nodes, it is required to call reconcileEtcdMembers
// ans well as reconcileControlPlaneConditions before this.
Expand Down
10 changes: 5 additions & 5 deletions hack/tools/conversion-verifier/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ limitations under the License.
// This command line application runs verification steps for conversion types.
//
// The following checks are performed:
// - For each API Kind and Group, only one storage version must exist.
// - Each storage version type and its List counterpart, if there are multiple API versions,
// the type MUST have a Hub() method.
// - For each type with multiple versions, that has a Hub() and storage version,
// the type MUST have ConvertFrom() and ConvertTo() methods.
// - For each API Kind and Group, only one storage version must exist.
// - Each storage version type and its List counterpart, if there are multiple API versions,
// the type MUST have a Hub() method.
// - For each type with multiple versions, that has a Hub() and storage version,
// the type MUST have ConvertFrom() and ConvertTo() methods.
package main
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,10 @@ func (r *Reconciler) sync(ctx context.Context, d *clusterv1.MachineDeployment, m
// msList should come from getMachineSetsForDeployment(d).
// machineMap should come from getMachineMapForDeployment(d, msList).
//
// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV).
// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1),
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV).
// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1),
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
//
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of machine sets, thus incorrect deployment status.
Expand Down
7 changes: 4 additions & 3 deletions internal/controllers/machinedeployment/mdutil/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ var annotationsToSkip = map[string]bool{

// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO(tbd): How to decide which annotations should / should not be copied?
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
//
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key]
}
Expand Down Expand Up @@ -411,8 +412,8 @@ func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*cluste

// FindOldMachineSets returns the old machine sets targeted by the given Deployment, with the given slice of MSes.
// Returns two list of machine sets
// - the first contains all old machine sets with all non-zero replicas
// - the second contains all old machine sets
// - the first contains all old machine sets with all non-zero replicas
// - the second contains all old machine sets
func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) ([]*clusterv1.MachineSet, []*clusterv1.MachineSet) {
var requiredMSs []*clusterv1.MachineSet
allMSs := make([]*clusterv1.MachineSet, 0, len(msList))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package machinehealthcheck

import (
Expand Down
10 changes: 5 additions & 5 deletions internal/controllers/topology/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -854,11 +854,11 @@ func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) error {
}

// assertControlPlaneReconcile checks if the ControlPlane object:
// 1) Is created.
// 2) Has the correct labels and annotations.
// 3) If it requires ControlPlane Infrastructure and if so:
// i) That the infrastructureMachineTemplate is created correctly.
// ii) That the infrastructureMachineTemplate has the correct labels and annotations
// 1. Is created.
// 2. Has the correct labels and annotations.
// 3. If it requires ControlPlane Infrastructure and if so:
// i) That the infrastructureMachineTemplate is created correctly.
// ii) That the infrastructureMachineTemplate has the correct labels and annotations
func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error {
cp, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name)
if err != nil {
Expand Down
Loading

0 comments on commit 043fa69

Please sign in to comment.