diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 5f21bfad46a5..cf3995d5a7cb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -21,9 +21,9 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: 1.17 + go-version: 1.19 - name: golangci-lint uses: golangci/golangci-lint-action@v3.2.0 with: - version: v1.44.0 + version: v1.48.0 working-directory: ${{matrix.working-directory}} diff --git a/cmd/clusterctl/api/v1alpha3/metadata_type_test.go b/cmd/clusterctl/api/v1alpha3/metadata_type_test.go index 9cc67e67c41d..ce71a64343a9 100644 --- a/cmd/clusterctl/api/v1alpha3/metadata_type_test.go +++ b/cmd/clusterctl/api/v1alpha3/metadata_type_test.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package v1alpha3 import ( diff --git a/cmd/clusterctl/client/cluster/proxy.go b/cmd/clusterctl/client/cluster/proxy.go index 8ec4a9af43fc..d72c1aa9af0a 100644 --- a/cmd/clusterctl/client/cluster/proxy.go +++ b/cmd/clusterctl/client/cluster/proxy.go @@ -206,12 +206,12 @@ func (k *proxy) CheckClusterAvailable() error { // This is done to avoid errors when listing resources of providers which have already been deleted/scaled down to 0 replicas/with // malfunctioning webhooks. // For example: -// * The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity. -// * The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred -// version is v1beta1) -// * If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources -// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted. -// * Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider. +// - The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity. +// - The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred +// version is v1beta1) +// - If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources +// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted. +// - Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider. func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { cs, err := k.newClientSet() if err != nil { diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index b8df88d722bd..0a520e0c67e7 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -257,9 +257,9 @@ func (t *topologyClient) validateInput(in *TopologyPlanInput) error { } // prepareInput does the following on the input objects: -// - Set the target namespace on the objects if not set (this operation is generally done by kubectl) -// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents -// the expected changes. +// - Set the target namespace on the objects if not set (this operation is generally done by kubectl) +// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents +// the expected changes. func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error { if err := t.setMissingNamespaces(in.TargetNamespace, in.Objs); err != nil { return errors.Wrap(err, "failed to set missing namespaces") @@ -297,18 +297,20 @@ func (t *topologyClient) setMissingNamespaces(currentNamespace string, objs []*u } // prepareClusters does the following operations on each Cluster in the input. -// - Check if the Cluster exists in the real apiserver. -// - If the Cluster exists in the real apiserver we merge the object from the -// server with the object from the input. This final object correctly represents the -// modified cluster object. -// Note: We are using a simple 2-way merge to calculate the final object in this function -// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly -// the same way as kubectl does. +// - Check if the Cluster exists in the real apiserver. +// - If the Cluster exists in the real apiserver we merge the object from the +// server with the object from the input. This final object correctly represents the +// modified cluster object. +// Note: We are using a simple 2-way merge to calculate the final object in this function +// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly +// the same way as kubectl does. +// // *Important note*: We do this above operation because the topology reconciler in a -// real run takes as input a cluster object from the apiserver that has merged spec of -// the changes in the input and the one stored in the server. For example: the cluster -// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef -// but the merged object will have these fields set. +// +// real run takes as input a cluster object from the apiserver that has merged spec of +// the changes in the input and the one stored in the server. For example: the cluster +// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef +// but the merged object will have these fields set. func (t *topologyClient) prepareClusters(ctx context.Context, clusters []*unstructured.Unstructured, apiReader client.Reader) error { if apiReader == nil { // If there is no backing server there is nothing more to do here. diff --git a/cmd/clusterctl/client/repository/template.go b/cmd/clusterctl/client/repository/template.go index ebb04591b05e..e9bf685e3384 100644 --- a/cmd/clusterctl/client/repository/template.go +++ b/cmd/clusterctl/client/repository/template.go @@ -144,12 +144,12 @@ func NewTemplate(input TemplateInput) (Template, error) { // MergeTemplates merges the provided Templates into one Template. // Notes on the merge operation: -// - The merge operation returns an error if all the templates do not have the same TargetNamespace. -// - The Variables of the resulting template is a union of all Variables in the templates. -// - The default value is picked from the first template that defines it. -// The defaults of the same variable in the subsequent templates will be ignored. -// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence) -// - The Objs of the final template will be a union of all the Objs in the templates. +// - The merge operation returns an error if all the templates do not have the same TargetNamespace. +// - The Variables of the resulting template is a union of all Variables in the templates. +// - The default value is picked from the first template that defines it. +// The defaults of the same variable in the subsequent templates will be ignored. +// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence) +// - The Objs of the final template will be a union of all the Objs in the templates. func MergeTemplates(templates ...Template) (Template, error) { templates = filterNilTemplates(templates...) if len(templates) == 0 { diff --git a/cmd/clusterctl/client/tree/doc.go b/cmd/clusterctl/client/tree/doc.go index 146ac163f8c1..4337ab51f087 100644 --- a/cmd/clusterctl/client/tree/doc.go +++ b/cmd/clusterctl/client/tree/doc.go @@ -21,29 +21,29 @@ understanding if there are problems and where. The "at glance" view is based on the idea that we should avoid to overload the user with information, but instead surface problems, if any; in practice: -- The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md. - As a consequence each object should have a Ready condition summarizing the object state. + - The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md. + As a consequence each object should have a Ready condition summarizing the object state. -- The view organizes objects in a hierarchical tree, however it is not required that the - tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status - e.g. secrets or templates. + - The view organizes objects in a hierarchical tree, however it is not required that the + tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status + e.g. secrets or templates. -- It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users, - e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.). + - It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users, + e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.). -- It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users, - e.g. adding a Workers object to group all the MachineDeployments. + - It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users, + e.g. adding a Workers object to group all the MachineDeployments. -- It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true - in a single node instead of listing each one of them. + - It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true + in a single node instead of listing each one of them. -- Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo" - (reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already - surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine - given that representing its state is redundant in this case. + - Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo" + (reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already + surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine + given that representing its state is redundant in this case. -- In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the - same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready + - In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the + same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready The ObjectTree object defined implements all the above behaviors of the "at glance" visualization, by generating a tree of Kubernetes objects; each object gets a set of annotation, reflecting its own visualization specific attributes, diff --git a/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go b/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go index 2275a2c89191..5e7efafb96e5 100644 --- a/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go +++ b/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package yamlprocessor import ( diff --git a/cmd/clusterctl/log/logger.go b/cmd/clusterctl/log/logger.go index 9e70b110337a..71ade25e18d6 100644 --- a/cmd/clusterctl/log/logger.go +++ b/cmd/clusterctl/log/logger.go @@ -150,11 +150,11 @@ func copySlice(in []interface{}) []interface{} { // flatten returns a human readable/machine parsable text representing the LogEntry. // Most notable difference with the klog implementation are: -// - The message is printed at the beginning of the line, without the Msg= variable name e.g. -// "Msg"="This is a message" --> This is a message -// - Variables name are not quoted, eg. -// This is a message "Var1"="value" --> This is a message Var1="value" -// - Variables are not sorted, thus allowing full control to the developer on the output. +// - The message is printed at the beginning of the line, without the Msg= variable name e.g. +// "Msg"="This is a message" --> This is a message +// - Variables name are not quoted, eg. +// This is a message "Var1"="value" --> This is a message Var1="value" +// - Variables are not sorted, thus allowing full control to the developer on the output. func flatten(entry logEntry) (string, error) { var msgValue string var errorValue error diff --git a/controllers/noderefutil/providerid.go b/controllers/noderefutil/providerid.go index 4137d5de1de7..8056331eed07 100644 --- a/controllers/noderefutil/providerid.go +++ b/controllers/noderefutil/providerid.go @@ -41,10 +41,10 @@ type ProviderID struct { } /* - - must start with at least one non-colon - - followed by :// - - followed by any number of characters - - must end with a non-slash +- must start with at least one non-colon +- followed by :// +- followed by any number of characters +- must end with a non-slash. */ var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$") diff --git a/controllers/remote/cluster_cache_reconciler_test.go b/controllers/remote/cluster_cache_reconciler_test.go index b818bcfe765f..c9567cbf0cd7 100644 --- a/controllers/remote/cluster_cache_reconciler_test.go +++ b/controllers/remote/cluster_cache_reconciler_test.go @@ -45,7 +45,7 @@ func TestClusterCacheReconciler(t *testing.T) { // createAndWatchCluster creates a new cluster and ensures the clusterCacheTracker has a clusterAccessor for it createAndWatchCluster := func(clusterName string, testNamespace *corev1.Namespace, g *WithT) { - t.Log(fmt.Sprintf("Creating a cluster %q", clusterName)) + t.Logf("Creating a cluster %q", clusterName) testCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, @@ -136,7 +136,7 @@ func TestClusterCacheReconciler(t *testing.T) { defer teardown(t, g, testNamespace) for _, clusterName := range []string{"cluster-1", "cluster-2", "cluster-3"} { - t.Log(fmt.Sprintf("Deleting cluster %q", clusterName)) + t.Logf("Deleting cluster %q", clusterName) obj := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace.Name, @@ -145,7 +145,7 @@ func TestClusterCacheReconciler(t *testing.T) { } g.Expect(k8sClient.Delete(ctx, obj)).To(Succeed()) - t.Log(fmt.Sprintf("Checking cluster %q's clusterAccessor is removed", clusterName)) + t.Logf("Checking cluster %q's clusterAccessor is removed", clusterName) g.Eventually(func() bool { return cct.clusterAccessorExists(util.ObjectKey(obj)) }, timeout).Should(BeFalse()) } }) diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 38f1e23091c3..e0941e88cfa5 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -190,13 +190,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // // The answer mostly depend on the existence of other failing members on top of the one being deleted, and according // to the etcd fault tolerance specification (see https://etcd.io/docs/v3.3/faq/#what-is-failure-tolerance): -// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target -// cluster size after deletion is 2, fault tolerance 0) -// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target -// cluster size after deletion is 4, fault tolerance 1) -// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target -// cluster size after deletion is 6, fault tolerance 2) -// - etc. +// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target +// cluster size after deletion is 2, fault tolerance 0) +// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target +// cluster size after deletion is 4, fault tolerance 1) +// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target +// cluster size after deletion is 6, fault tolerance 2) +// - etc. // // NOTE: this func assumes the list of members in sync with the list of machines/nodes, it is required to call reconcileEtcdMembers // ans well as reconcileControlPlaneConditions before this. diff --git a/hack/tools/conversion-verifier/doc.go b/hack/tools/conversion-verifier/doc.go index af040df7f7d0..92f0674e4db7 100644 --- a/hack/tools/conversion-verifier/doc.go +++ b/hack/tools/conversion-verifier/doc.go @@ -17,9 +17,9 @@ limitations under the License. // This command line application runs verification steps for conversion types. // // The following checks are performed: -// - For each API Kind and Group, only one storage version must exist. -// - Each storage version type and its List counterpart, if there are multiple API versions, -// the type MUST have a Hub() method. -// - For each type with multiple versions, that has a Hub() and storage version, -// the type MUST have ConvertFrom() and ConvertTo() methods. +// - For each API Kind and Group, only one storage version must exist. +// - Each storage version type and its List counterpart, if there are multiple API versions, +// the type MUST have a Hub() method. +// - For each type with multiple versions, that has a Hub() and storage version, +// the type MUST have ConvertFrom() and ConvertTo() methods. package main diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 519a96ed17cb..0e245a74f91d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -68,10 +68,10 @@ func (r *Reconciler) sync(ctx context.Context, d *clusterv1.MachineDeployment, m // msList should come from getMachineSetsForDeployment(d). // machineMap should come from getMachineMapForDeployment(d, msList). // -// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV). -// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1), -// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. -// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. +// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV). +// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1), +// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. +// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of machine sets, thus incorrect deployment status. diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index bfa88719febc..a8025ad5f5d2 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -138,7 +138,8 @@ var annotationsToSkip = map[string]bool{ // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key // TODO(tbd): How to decide which annotations should / should not be copied? -// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +// +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 func skipCopyAnnotation(key string) bool { return annotationsToSkip[key] } @@ -411,8 +412,8 @@ func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*cluste // FindOldMachineSets returns the old machine sets targeted by the given Deployment, with the given slice of MSes. // Returns two list of machine sets -// - the first contains all old machine sets with all non-zero replicas -// - the second contains all old machine sets +// - the first contains all old machine sets with all non-zero replicas +// - the second contains all old machine sets func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) ([]*clusterv1.MachineSet, []*clusterv1.MachineSet) { var requiredMSs []*clusterv1.MachineSet allMSs := make([]*clusterv1.MachineSet, 0, len(msList)) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 05eea12ab281..21df470a31c5 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package machinehealthcheck import ( diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 278af2d94ca3..84efee37e4ba 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -854,11 +854,11 @@ func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) error { } // assertControlPlaneReconcile checks if the ControlPlane object: -// 1) Is created. -// 2) Has the correct labels and annotations. -// 3) If it requires ControlPlane Infrastructure and if so: -// i) That the infrastructureMachineTemplate is created correctly. -// ii) That the infrastructureMachineTemplate has the correct labels and annotations +// 1. Is created. +// 2. Has the correct labels and annotations. +// 3. If it requires ControlPlane Infrastructure and if so: +// i) That the infrastructureMachineTemplate is created correctly. +// ii) That the infrastructureMachineTemplate has the correct labels and annotations func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error { cp, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name) if err != nil { diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 8c4f85e828a6..66685c09d0fa 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -56,10 +56,10 @@ type engine struct { // Apply applies patches to the desired state according to the patches from the ClusterClass, variables from the Cluster // and builtin variables. -// * A GeneratePatchesRequest with all templates and global and template-specific variables is created. -// * Then for all ClusterClassPatches of a ClusterClass, JSON or JSON merge patches are generated -// and successively applied to the templates in the GeneratePatchesRequest. -// * Eventually the patched templates are used to update the specs of the desired objects. +// - A GeneratePatchesRequest with all templates and global and template-specific variables is created. +// - Then for all ClusterClassPatches of a ClusterClass, JSON or JSON merge patches are generated +// and successively applied to the templates in the GeneratePatchesRequest. +// - Eventually the patched templates are used to update the specs of the desired objects. func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, desired *scope.ClusterState) error { // Return if there are no patches. if len(blueprint.ClusterClass.Spec.Patches) == 0 { diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 71decbea10de..6dfcf2c44b7a 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -308,24 +308,24 @@ func renderValueTemplate(valueTemplate string, variables map[string]apiextension // calculateTemplateData calculates data for the template, by converting // the variables to their Go types. // Example: -// * Input: -// map[string]apiextensionsv1.JSON{ +// - Input: +// map[string]apiextensionsv1.JSON{ // "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name"}}`}, // "integerVariable": {Raw: []byte("4")}, // "numberVariable": {Raw: []byte("2.5")}, // "booleanVariable": {Raw: []byte("true")}, -// } -// * Output: -// map[string]interface{}{ +// } +// - Output: +// map[string]interface{}{ // "builtin": map[string]interface{}{ -// "cluster": map[string]interface{}{ -// "name": "cluster-name" -// } +// "cluster": map[string]interface{}{ +// "name": "cluster-name" +// } // }, // "integerVariable": 4, // "numberVariable": 2.5, // "booleanVariable": true, -// } +// } func calculateTemplateData(variables map[string]apiextensionsv1.JSON) (map[string]interface{}, error) { res := make(map[string]interface{}, len(variables)) diff --git a/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go b/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go index 8d7f6fdf9f30..abb93795220a 100644 --- a/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go +++ b/internal/controllers/topology/cluster/structuredmerge/twowayspatchhelper.go @@ -53,13 +53,14 @@ type TwoWaysPatchHelper struct { // by the topology controller are going to be preserved without changes. // NOTE: TwoWaysPatch is considered a minimal viable replacement for server side apply during topology dry run, with // the following limitations: -// - TwoWaysPatch doesn't consider OpenAPI schema extension like +ListMap this can lead to false positive when topology -// dry run is simulating a change to an existing slice -// (TwoWaysPatch always revert external changes, like server side apply when +ListMap=atomic). -// - TwoWaysPatch doesn't consider existing metadata.managedFields, and this can lead to false negative when topology dry run -// is simulating a change to an existing object where the topology controller is dropping an opinion for a field -// (TwoWaysPatch always preserve dropped fields, like server side apply when the field has more than one manager). -// - TwoWaysPatch doesn't generate metadata.managedFields as server side apply does. +// - TwoWaysPatch doesn't consider OpenAPI schema extension like +ListMap this can lead to false positive when topology +// dry run is simulating a change to an existing slice +// (TwoWaysPatch always revert external changes, like server side apply when +ListMap=atomic). +// - TwoWaysPatch doesn't consider existing metadata.managedFields, and this can lead to false negative when topology dry run +// is simulating a change to an existing object where the topology controller is dropping an opinion for a field +// (TwoWaysPatch always preserve dropped fields, like server side apply when the field has more than one manager). +// - TwoWaysPatch doesn't generate metadata.managedFields as server side apply does. +// // NOTE: NewTwoWaysPatchHelper consider changes only in metadata.labels, metadata.annotation and spec; it also respects // the ignorePath option (same as the server side apply helper). func NewTwoWaysPatchHelper(original, modified client.Object, c client.Client, opts ...HelperOption) (*TwoWaysPatchHelper, error) { diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index 5910f4ab1951..ed40a1d13b5f 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -74,10 +74,13 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // i.e. the templates would otherwise be orphaned after the MachineDeployment deletion completes. // Additional context: // * MachineDeployment deletion: -// * MachineDeployments are deleted and garbage collected first (without waiting until all MachineSets are also deleted). -// * After that, deletion of MachineSets is automatically triggered by Kubernetes based on owner references. +// - MachineDeployments are deleted and garbage collected first (without waiting until all MachineSets are also deleted). +// - After that, deletion of MachineSets is automatically triggered by Kubernetes based on owner references. +// // Note: We assume templates are not reused by different MachineDeployments, which is only true for topology-owned -// MachineDeployments. +// +// MachineDeployments. +// // We don't have to set the finalizer, as it's already set during MachineDeployment creation // in the cluster topology controller. func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index 09d129f2b113..036c78bf8c19 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -75,10 +75,13 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // i.e. the templates would otherwise be orphaned after the MachineSet deletion completes. // Additional context: // * MachineSet deletion: -// * MachineSets are deleted and garbage collected first (without waiting until all Machines are also deleted) -// * After that, deletion of Machines is automatically triggered by Kubernetes based on owner references. +// - MachineSets are deleted and garbage collected first (without waiting until all Machines are also deleted) +// - After that, deletion of Machines is automatically triggered by Kubernetes based on owner references. +// // Note: We assume templates are not reused by different MachineDeployments, which is (only) true for topology-owned -// MachineDeployments. +// +// MachineDeployments. +// // We don't have to set the finalizer, as it's already set during MachineSet creation // in the MachineSet controller. func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index e89d0c1085b6..1d1b793257e2 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -426,9 +426,9 @@ func InfrastructureMachineTemplate(namespace, name string) *InfrastructureMachin // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *InfrastructureMachineTemplateBuilder) WithSpecFields(fields map[string]interface{}) *InfrastructureMachineTemplateBuilder { setSpecFields(i.obj, fields) return i @@ -465,9 +465,9 @@ func TestInfrastructureMachineTemplate(namespace, name string) *TestInfrastructu // // Note: all the paths should start with "spec."; the path should correspond to a field defined in the CRD. // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *TestInfrastructureMachineTemplateBuilder) WithSpecFields(fields map[string]interface{}) *TestInfrastructureMachineTemplateBuilder { setSpecFields(i.obj, fields) return i @@ -562,9 +562,9 @@ func InfrastructureClusterTemplate(namespace, name string) *InfrastructureCluste // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *InfrastructureClusterTemplateBuilder) WithSpecFields(fields map[string]interface{}) *InfrastructureClusterTemplateBuilder { setSpecFields(i.obj, fields) return i @@ -600,9 +600,9 @@ func TestInfrastructureClusterTemplate(namespace, name string) *TestInfrastructu // // Note: all the paths should start with "spec."; the path should correspond to a field defined in the CRD. // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *TestInfrastructureClusterTemplateBuilder) WithSpecFields(fields map[string]interface{}) *TestInfrastructureClusterTemplateBuilder { setSpecFields(i.obj, fields) return i @@ -636,9 +636,9 @@ func ControlPlaneTemplate(namespace, name string) *ControlPlaneTemplateBuilder { // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (c *ControlPlaneTemplateBuilder) WithSpecFields(fields map[string]interface{}) *ControlPlaneTemplateBuilder { setSpecFields(c.obj, fields) return c @@ -683,9 +683,9 @@ func TestControlPlaneTemplate(namespace, name string) *TestControlPlaneTemplateB // // Note: all the paths should start with "spec."; the path should correspond to a field defined in the CRD. // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (c *TestControlPlaneTemplateBuilder) WithSpecFields(fields map[string]interface{}) *TestControlPlaneTemplateBuilder { setSpecFields(c.obj, fields) return c @@ -714,9 +714,9 @@ type InfrastructureClusterBuilder struct { // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *InfrastructureClusterBuilder) WithSpecFields(fields map[string]interface{}) *InfrastructureClusterBuilder { setSpecFields(i.obj, fields) return i @@ -759,9 +759,9 @@ func TestInfrastructureCluster(namespace, name string) *TestInfrastructureCluste // // Note: all the paths should start with "spec."; the path should correspond to a field defined in the CRD. // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (i *TestInfrastructureClusterBuilder) WithSpecFields(fields map[string]interface{}) *TestInfrastructureClusterBuilder { setSpecFields(i.obj, fields) return i @@ -819,9 +819,9 @@ func (c *ControlPlaneBuilder) WithVersion(version string) *ControlPlaneBuilder { // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (c *ControlPlaneBuilder) WithSpecFields(fields map[string]interface{}) *ControlPlaneBuilder { setSpecFields(c.obj, fields) return c @@ -832,9 +832,9 @@ func (c *ControlPlaneBuilder) WithSpecFields(fields map[string]interface{}) *Con // // Note: all the paths should start with "status." // -// Example map: map[string]interface{}{ -// "status.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "status.version": "v1.2.3", +// }. func (c *ControlPlaneBuilder) WithStatusFields(fields map[string]interface{}) *ControlPlaneBuilder { setStatusFields(c.obj, fields) return c @@ -892,9 +892,9 @@ func (c *TestControlPlaneBuilder) WithVersion(version string) *TestControlPlaneB // // Note: all the paths should start with "spec." // -// Example map: map[string]interface{}{ -// "spec.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "spec.version": "v1.2.3", +// }. func (c *TestControlPlaneBuilder) WithSpecFields(fields map[string]interface{}) *TestControlPlaneBuilder { setSpecFields(c.obj, fields) return c @@ -905,9 +905,9 @@ func (c *TestControlPlaneBuilder) WithSpecFields(fields map[string]interface{}) // // Note: all the paths should start with "status." // -// Example map: map[string]interface{}{ -// "status.version": "v1.2.3", -// }. +// Example map: map[string]interface{}{ +// "status.version": "v1.2.3", +// }. func (c *TestControlPlaneBuilder) WithStatusFields(fields map[string]interface{}) *TestControlPlaneBuilder { setStatusFields(c.obj, fields) return c diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index f0e4bf608410..065a191c957a 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -99,10 +99,13 @@ type RunInput struct { // Run executes the tests of the given testing.M in a test environment. // Note: The environment will be created in this func and should not be created before. This func takes a *Environment -// because our tests require access to the *Environment. We use this field to make the created Environment available -// to the consumer. +// +// because our tests require access to the *Environment. We use this field to make the created Environment available +// to the consumer. +// // Note: Test environment creation can be skipped by setting the environment variable `CAPI_DISABLE_TEST_ENV`. This only -// makes sense when executing tests which don't require the test environment, e.g. tests using only the fake client. +// +// makes sense when executing tests which don't require the test environment, e.g. tests using only the fake client. func Run(ctx context.Context, input RunInput) int { if os.Getenv("CAPI_DISABLE_TEST_ENV") != "" { return input.M.Run() diff --git a/main.go b/main.go index ba81125db441..2445aef8d164 100644 --- a/main.go +++ b/main.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package main import ( diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 9c8747628624..4453c7d63bc3 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -437,10 +437,11 @@ func beforeClusterDeleteHandler(ctx context.Context, c client.Client, namespace, } // runtimeHookTestHandler runs a series of tests in sequence to check if the runtimeHook passed to it succeeds. -// 1) Checks that the hook has been called at least once and, if withTopologyReconciledCondition is set, checks that the TopologyReconciled condition is a Failure. -// 2) Check that the hook's blockingCondition is consistently true. -// - At this point the function sets the hook's response to be non-blocking. -// 3) Check that the hook's blocking condition becomes false. +// 1. Checks that the hook has been called at least once and, if withTopologyReconciledCondition is set, checks that the TopologyReconciled condition is a Failure. +// 2. Check that the hook's blockingCondition is consistently true. +// - At this point the function sets the hook's response to be non-blocking. +// 3. Check that the hook's blocking condition becomes false. +// // Note: runtimeHookTestHandler assumes that the hook passed to it is currently returning a blocking response. // Updating the response to be non-blocking happens inline in the function. func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, withTopologyReconciledCondition bool, blockingCondition func() bool, intervals []interface{}) { diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index fda9314ea1a3..003961c35810 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -75,15 +75,16 @@ type ClusterClassChangesSpecInput struct { // ClusterClassChangesSpec implements a test that verifies that ClusterClass changes are rolled out successfully. // Thus, the test consists of the following steps: -// * Deploy Cluster using a ClusterClass and wait until it is fully provisioned. -// * Modify the ControlPlaneTemplate of the ClusterClass by setting ModifyControlPlaneFields -// and wait until the change has been rolled out to the ControlPlane of the Cluster. -// * Modify the BootstrapTemplate of all MachineDeploymentClasses of the ClusterClass by setting -// ModifyMachineDeploymentBootstrapConfigTemplateFields and wait until the change has been rolled out -// to the MachineDeployments of the Cluster. -// * Rebase the Cluster to a copy of the ClusterClass which has an additional worker label set. Then wait -// until the change has been rolled out to the MachineDeployments of the Cluster and verify the ControlPlane -// has not been changed. +// - Deploy Cluster using a ClusterClass and wait until it is fully provisioned. +// - Modify the ControlPlaneTemplate of the ClusterClass by setting ModifyControlPlaneFields +// and wait until the change has been rolled out to the ControlPlane of the Cluster. +// - Modify the BootstrapTemplate of all MachineDeploymentClasses of the ClusterClass by setting +// ModifyMachineDeploymentBootstrapConfigTemplateFields and wait until the change has been rolled out +// to the MachineDeployments of the Cluster. +// - Rebase the Cluster to a copy of the ClusterClass which has an additional worker label set. Then wait +// until the change has been rolled out to the MachineDeployments of the Cluster and verify the ControlPlane +// has not been changed. +// // NOTE: The ClusterClass can be changed in many ways (as documented in the ClusterClass Operations doc). // This test verifies a subset of the possible operations and aims to test the most complicated rollouts // (template changes, label propagation, rebase), everything else will be covered by unit or integration tests. diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 8869905c7ac2..c994f1a932b1 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -99,10 +99,10 @@ type ClusterctlUpgradeSpecInput struct { // // The following commands are then added to preKubeadmCommands: // -// preKubeadmCommands: -// - mkdir -p /opt/cluster-api -// - aws s3 cp "s3://${S3_BUCKET}/${E2E_IMAGE_SHA}" /opt/cluster-api/image.tar -// - ctr -n k8s.io images import /opt/cluster-api/image.tar # The image must be imported into the k8s.io namespace +// preKubeadmCommands: +// - mkdir -p /opt/cluster-api +// - aws s3 cp "s3://${S3_BUCKET}/${E2E_IMAGE_SHA}" /opt/cluster-api/image.tar +// - ctr -n k8s.io images import /opt/cluster-api/image.tar # The image must be imported into the k8s.io namespace func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpgradeSpecInput) { var ( specName = "clusterctl-upgrade" diff --git a/test/framework/convenience.go b/test/framework/convenience.go index 6a883e779b2f..3af93beac1cb 100644 --- a/test/framework/convenience.go +++ b/test/framework/convenience.go @@ -36,11 +36,12 @@ import ( ) // TryAddDefaultSchemes tries to add the following schemes: -// * Kubernetes corev1 -// * Kubernetes appsv1 -// * CAPI core -// * Kubeadm Bootstrapper -// * Kubeadm ControlPlane +// - Kubernetes corev1 +// - Kubernetes appsv1 +// - CAPI core +// - Kubeadm Bootstrapper +// - Kubeadm ControlPlane +// // Any error that occurs when trying to add the schemes is ignored. func TryAddDefaultSchemes(scheme *runtime.Scheme) { // Add the core schemes. diff --git a/test/framework/namespace_helpers.go b/test/framework/namespace_helpers.go index 4e88324fb8aa..0446cbb29e5d 100644 --- a/test/framework/namespace_helpers.go +++ b/test/framework/namespace_helpers.go @@ -115,16 +115,17 @@ type WatchNamespaceEventsInput struct { // WatchNamespaceEvents creates a watcher that streams namespace events into a file. // Example usage: -// ctx, cancelWatches := context.WithCancel(context.Background()) -// go func() { -// defer GinkgoRecover() -// framework.WatchNamespaceEvents(ctx, framework.WatchNamespaceEventsInput{ -// ClientSet: clientSet, -// Name: namespace.Name, -// LogFolder: logFolder, -// }) -// }() -// defer cancelWatches() +// +// ctx, cancelWatches := context.WithCancel(context.Background()) +// go func() { +// defer GinkgoRecover() +// framework.WatchNamespaceEvents(ctx, framework.WatchNamespaceEventsInput{ +// ClientSet: clientSet, +// Name: namespace.Name, +// LogFolder: logFolder, +// }) +// }() +// defer cancelWatches() func WatchNamespaceEvents(ctx context.Context, input WatchNamespaceEventsInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for WatchNamespaceEvents") Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for WatchNamespaceEvents") diff --git a/util/conditions/merge.go b/util/conditions/merge.go index 7566a093e5e0..a3aa304b6e53 100644 --- a/util/conditions/merge.go +++ b/util/conditions/merge.go @@ -44,6 +44,7 @@ type localizedCondition struct { // - P2 - Status=False, Severity=Info // - P3 - Status=True // - P4 - Status=Unknown +// // 3. The group with highest priority is used to determine status, severity and other info of the target condition. // // Please note that the last operation includes also the task of computing the Reason and the Message for the target diff --git a/util/conditions/unstructured.go b/util/conditions/unstructured.go index cc6291c66aea..64e03a7e7381 100644 --- a/util/conditions/unstructured.go +++ b/util/conditions/unstructured.go @@ -45,11 +45,11 @@ type unstructuredWrapper struct { // // NOTE: Due to the constraints of JSON-unmarshal, this operation is to be considered best effort. // In more details: -// - Errors during JSON-unmarshal are ignored and a empty collection list is returned. -// - It's not possible to detect if the object has an empty condition list or if it does not implement conditions; -// in both cases the operation returns an empty slice is returned. -// - If the object doesn't implement conditions on under status as defined in Cluster API, -// JSON-unmarshal matches incoming object keys to the keys; this can lead to to conditions values partially set. +// - Errors during JSON-unmarshal are ignored and a empty collection list is returned. +// - It's not possible to detect if the object has an empty condition list or if it does not implement conditions; +// in both cases the operation returns an empty slice is returned. +// - If the object doesn't implement conditions on under status as defined in Cluster API, +// JSON-unmarshal matches incoming object keys to the keys; this can lead to to conditions values partially set. func (c *unstructuredWrapper) GetConditions() clusterv1.Conditions { conditions := clusterv1.Conditions{} if err := util.UnstructuredUnmarshalField(c.Unstructured, &conditions, "status", "conditions"); err != nil { @@ -62,9 +62,9 @@ func (c *unstructuredWrapper) GetConditions() clusterv1.Conditions { // // NOTE: Due to the constraints of JSON-unmarshal, this operation is to be considered best effort. // In more details: -// - Errors during JSON-unmarshal are ignored and a empty collection list is returned. -// - It's not possible to detect if the object has an empty condition list or if it does not implement conditions; -// in both cases the operation returns an empty slice is returned. +// - Errors during JSON-unmarshal are ignored and a empty collection list is returned. +// - It's not possible to detect if the object has an empty condition list or if it does not implement conditions; +// in both cases the operation returns an empty slice is returned. func (c *unstructuredWrapper) SetConditions(conditions clusterv1.Conditions) { v := make([]interface{}, 0, len(conditions)) for i := range conditions { diff --git a/util/container/image.go b/util/container/image.go index fa11667596f3..4da11c508d42 100644 --- a/util/container/image.go +++ b/util/container/image.go @@ -18,10 +18,8 @@ limitations under the License. package container import ( - // Import the crypto sha256 algorithm for the docker image parser to work _ "crypto/sha256" - // Import the crypto/sha512 algorithm for the docker image parser to work with 384 and 512 sha hashes _ "crypto/sha512" "fmt" diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index e636a44d2994..ffe9905339e5 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -155,13 +155,14 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { // This implements a common requirement for many cluster-api and provider controllers (such as Cluster Infrastructure // controllers) to resume reconciliation when the Cluster is unpaused. // Example use: -// err := controller.Watch( -// &source.Kind{Type: &clusterv1.Cluster{}}, -// &handler.EnqueueRequestsFromMapFunc{ -// ToRequests: clusterToMachines, -// }, -// predicates.ClusterUnpaused(r.Log), -// ) +// +// err := controller.Watch( +// &source.Kind{Type: &clusterv1.Cluster{}}, +// &handler.EnqueueRequestsFromMapFunc{ +// ToRequests: clusterToMachines, +// }, +// predicates.ClusterUnpaused(r.Log), +// ) func ClusterUnpaused(logger logr.Logger) predicate.Funcs { log := logger.WithValues("predicate", "ClusterUnpaused") @@ -172,13 +173,14 @@ func ClusterUnpaused(logger logr.Logger) predicate.Funcs { // ClusterControlPlaneInitialized returns a Predicate that returns true on Update events // when ControlPlaneInitializedCondition on a Cluster changes to true. // Example use: -// err := controller.Watch( -// &source.Kind{Type: &clusterv1.Cluster{}}, -// &handler.EnqueueRequestsFromMapFunc{ -// ToRequests: clusterToMachines, -// }, -// predicates.ClusterControlPlaneInitialized(r.Log), -// ) +// +// err := controller.Watch( +// &source.Kind{Type: &clusterv1.Cluster{}}, +// &handler.EnqueueRequestsFromMapFunc{ +// ToRequests: clusterToMachines, +// }, +// predicates.ClusterControlPlaneInitialized(r.Log), +// ) func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { @@ -214,13 +216,14 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { // This implements a common requirement for some cluster-api and provider controllers (such as Machine Infrastructure // controllers) to resume reconciliation when the Cluster is unpaused and when the infrastructure becomes ready. // Example use: -// err := controller.Watch( -// &source.Kind{Type: &clusterv1.Cluster{}}, -// &handler.EnqueueRequestsFromMapFunc{ -// ToRequests: clusterToMachines, -// }, -// predicates.ClusterUnpausedAndInfrastructureReady(r.Log), -// ) +// +// err := controller.Watch( +// &source.Kind{Type: &clusterv1.Cluster{}}, +// &handler.EnqueueRequestsFromMapFunc{ +// ToRequests: clusterToMachines, +// }, +// predicates.ClusterUnpausedAndInfrastructureReady(r.Log), +// ) func ClusterUnpausedAndInfrastructureReady(logger logr.Logger) predicate.Funcs { log := logger.WithValues("predicate", "ClusterUnpausedAndInfrastructureReady") diff --git a/util/predicates/generic_predicates.go b/util/predicates/generic_predicates.go index d1d09749be31..ccce5de6d1da 100644 --- a/util/predicates/generic_predicates.go +++ b/util/predicates/generic_predicates.go @@ -152,6 +152,7 @@ func ResourceHasFilterLabel(logger logr.Logger, labelValue string) predicate.Fun // This implements a common requirement for all cluster-api and provider controllers skip reconciliation when the paused // annotation is present for a resource. // Example use: +// // func (r *MyReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { // controller, err := ctrl.NewControllerManagedBy(mgr). // For(&v1.MyType{}). diff --git a/util/version/version.go b/util/version/version.go index 90466e4fca5b..dcf85fdadbbe 100644 --- a/util/version/version.go +++ b/util/version/version.go @@ -192,10 +192,11 @@ type CompareOption func(*comparer) // - Identifiers with letters or hyphens are compared only for equality, otherwise, 2 is returned given // that it is not possible to identify if lower or greater (non-numeric identifiers could be random build // identifiers). -// -1 == a is less than b. -// 0 == a is equal to b. -// 1 == a is greater than b. -// 2 == v is different than o (it is not possible to identify if lower or greater). +// +// -1 == a is less than b. +// 0 == a is equal to b. +// 1 == a is greater than b. +// 2 == v is different than o (it is not possible to identify if lower or greater). func WithBuildTags() CompareOption { return func(c *comparer) { c.buildTags = true