From 5d15f0f796e371284b47a1831a5d06b73afb6bc4 Mon Sep 17 00:00:00 2001 From: Hector Sam Date: Mon, 4 May 2020 21:49:54 +0200 Subject: [PATCH] Fixed golint checks and removed -min_confidence flag from verify-golint.sh --- clusterloader2/api/types.go | 16 ++--- clusterloader2/cmd/clusterloader.go | 16 ++--- clusterloader2/pkg/config/cluster.go | 8 +-- .../pkg/framework/client/objects.go | 34 +++++----- clusterloader2/pkg/framework/framework.go | 10 +-- .../measurement/common/bundle/test_metrics.go | 8 +-- .../pkg/measurement/common/etcd_metrics.go | 2 +- .../pkg/measurement/common/metrics_for_e2e.go | 2 +- .../pkg/measurement/common/probes/probes.go | 8 +-- .../pkg/measurement/common/profile.go | 14 ++-- .../pkg/measurement/common/resource_usage.go | 8 +-- .../measurement/common/scheduler_latency.go | 4 +- .../common/scheduling_throughput.go | 2 +- .../common/service_creation_latency.go | 6 +- .../pkg/measurement/common/sleep.go | 2 +- .../slos/api_responsiveness_prometheus.go | 6 +- .../common/slos/network_programming.go | 4 +- .../common/slos/pod_startup_latency.go | 2 +- .../common/slos/prometheus_measurement.go | 6 +- .../slos/windows_node_resource_usage.go | 6 +- .../measurement/common/system_pod_metrics.go | 8 +-- .../common/system_pod_metrics_test.go | 6 +- .../pkg/measurement/common/timer.go | 2 +- .../common/wait_for_controlled_pods.go | 6 +- .../pkg/measurement/common/wait_for_nodes.go | 2 +- .../pkg/measurement/common/wait_for_pods.go | 2 +- .../pkg/measurement/common/wait_for_pvcs.go | 2 +- .../pkg/measurement/common/wait_for_pvs.go | 2 +- clusterloader2/pkg/measurement/interface.go | 6 +- clusterloader2/pkg/measurement/manager.go | 10 +-- .../pkg/measurement/measurement_executor.go | 4 +- .../measurement/util/checker/checker_map.go | 14 ++-- .../gatherers/container_resource_gatherer.go | 2 +- .../pkg/measurement/util/kubemark/kubemark.go | 12 ++-- .../pkg/measurement/util/resource.go | 14 ++-- clusterloader2/pkg/measurement/util/ssh.go | 4 +- clusterloader2/pkg/prometheus/experimental.go | 14 ++-- clusterloader2/pkg/prometheus/prometheus.go | 28 ++++---- clusterloader2/pkg/state/namespaces_state.go | 16 ++--- clusterloader2/pkg/state/resource_state.go | 14 ++-- clusterloader2/pkg/state/state.go | 8 +-- clusterloader2/pkg/test/interface.go | 20 +++--- clusterloader2/pkg/test/simple_context.go | 14 ++-- .../pkg/test/simple_test_executor.go | 44 ++++++------- clusterloader2/pkg/test/test.go | 4 +- clusterloader2/pkg/tuningset/interface.go | 4 +- clusterloader2/pkg/tuningset/qps_load.go | 6 +- .../pkg/tuningset/randomized_load.go | 6 +- .../tuningset/simple_tuning_set_factory.go | 16 ++--- dns/jsonify/main.go | 32 ++++----- network/benchmarks/netperf/nptest/nptest.go | 66 +++++++++---------- perfdash/github-configs-fetcher.go | 6 +- perfdash/google-gcs-downloader.go | 8 +-- perfdash/parser.go | 14 ++-- perfdash/perfdash.go | 4 +- util-images/probes/cmd/main.go | 2 +- util-images/probes/pkg/ping/server/server.go | 12 ++-- verify/verify-golint.sh | 2 +- 58 files changed, 300 insertions(+), 300 deletions(-) diff --git a/clusterloader2/api/types.go b/clusterloader2/api/types.go index 4a157487a..e58de0910 100644 --- a/clusterloader2/api/types.go +++ b/clusterloader2/api/types.go @@ -144,8 +144,8 @@ type TuningSet struct { Name string `json: name` // InitialDelay specifies the waiting time before starting phase execution. InitialDelay Duration `json: initialDelay` - // QpsLoad is a definition for QpsLoad tuning set. - QpsLoad *QpsLoad `json: qpsLoad` + // QPSLoad is a definition for QPSLoad tuning set. + QPSLoad *QPSLoad `json: qpsLoad` // RandomizedLoad is a definition for RandomizedLoad tuning set. RandomizedLoad *RandomizedLoad `json: randomizedLoad` // SteppedLoad is a definition for SteppedLoad tuning set. @@ -187,17 +187,17 @@ type Measurement struct { Instances []MeasurementInstanceConfig } -// QpsLoad defines a uniform load with a given QPS. -type QpsLoad struct { - // Qps specifies requested qps. - Qps float64 `json: qps` +// QPSLoad defines a uniform load with a given QPS. +type QPSLoad struct { + // QPS specifies requested qps. + QPS float64 `json: qps` } // RandomizedLoad defines a load that is spread randomly // across a given total time. type RandomizedLoad struct { - // AverageQps specifies the expected average qps. - AverageQps float64 `json: averageQps` + // AverageQPS specifies the expected average qps. + AverageQPS float64 `json: averageQps` } // SteppedLoad defines a load that generates a burst of diff --git a/clusterloader2/cmd/clusterloader.go b/clusterloader2/cmd/clusterloader.go index 4b46dde1a..144eeef78 100644 --- a/clusterloader2/cmd/clusterloader.go +++ b/clusterloader2/cmd/clusterloader.go @@ -251,12 +251,12 @@ func main() { klog.Exitf("Framework creation error: %v", err) } - var prometheusController *prometheus.PrometheusController + var prometheusController *prometheus.Controller var prometheusFramework *framework.Framework if clusterLoaderConfig.PrometheusConfig.EnableServer { // Pass overrides to prometheus controller clusterLoaderConfig.TestScenario.OverridePaths = testOverridePaths - if prometheusController, err = prometheus.NewPrometheusController(&clusterLoaderConfig); err != nil { + if prometheusController, err = prometheus.NewController(&clusterLoaderConfig); err != nil { klog.Exitf("Error while creating Prometheus Controller: %v", err) } prometheusFramework = prometheusController.GetFramework() @@ -320,28 +320,28 @@ func runSingleTest( junitReporter *ginkgoreporters.JUnitReporter, suiteSummary *ginkgotypes.SuiteSummary, ) { - testId := getTestId(clusterLoaderConfig.TestScenario) + testID := getTestID(clusterLoaderConfig.TestScenario) testStart := time.Now() specSummary := &ginkgotypes.SpecSummary{ - ComponentTexts: []string{suiteSummary.SuiteDescription, testId}, + ComponentTexts: []string{suiteSummary.SuiteDescription, testID}, } - printTestStart(testId) + printTestStart(testID) if errList := test.RunTest(f, prometheusFramework, &clusterLoaderConfig); !errList.IsEmpty() { suiteSummary.NumberOfFailedSpecs++ specSummary.State = ginkgotypes.SpecStateFailed specSummary.Failure = ginkgotypes.SpecFailure{ Message: errList.String(), } - printTestResult(testId, "Fail", errList.String()) + printTestResult(testID, "Fail", errList.String()) } else { specSummary.State = ginkgotypes.SpecStatePassed - printTestResult(testId, "Success", "") + printTestResult(testID, "Success", "") } specSummary.RunTime = time.Since(testStart) junitReporter.SpecDidComplete(specSummary) } -func getTestId(ts api.TestScenario) string { +func getTestID(ts api.TestScenario) string { if ts.Identifier != "" { return fmt.Sprintf("%s(%s)", ts.Identifier, ts.ConfigPath) } diff --git a/clusterloader2/pkg/config/cluster.go b/clusterloader2/pkg/config/cluster.go index 597ada104..6e4870697 100644 --- a/clusterloader2/pkg/config/cluster.go +++ b/clusterloader2/pkg/config/cluster.go @@ -65,18 +65,18 @@ type PrometheusConfig struct { SnapshotProject string } -// GetMasterIp returns the first master ip, added for backward compatibility. +// GetMasterIP returns the first master ip, added for backward compatibility. // TODO(mmatt): Remove this method once all the codebase is migrated to support multiple masters. -func (c *ClusterConfig) GetMasterIp() string { +func (c *ClusterConfig) GetMasterIP() string { if len(c.MasterIPs) > 0 { return c.MasterIPs[0] } return "" } -// GetMasterInternalIp returns the first internal master ip, added for backward compatibility. +// GetMasterInternalIP returns the first internal master ip, added for backward compatibility. // TODO(mmatt): Remove this method once all the codebase is migrated to support multiple masters. -func (c *ClusterConfig) GetMasterInternalIp() string { +func (c *ClusterConfig) GetMasterInternalIP() string { if len(c.MasterInternalIPs) > 0 { return c.MasterInternalIPs[0] } diff --git a/clusterloader2/pkg/framework/client/objects.go b/clusterloader2/pkg/framework/client/objects.go index b530e23d0..ccfbd4b6b 100644 --- a/clusterloader2/pkg/framework/client/objects.go +++ b/clusterloader2/pkg/framework/client/objects.go @@ -96,25 +96,25 @@ func IsRetryableNetError(err error) bool { return false } -// ApiCallOptions describes how api call errors should be treated, i.e. which errors should be +// APICallOptions describes how api call errors should be treated, i.e. which errors should be // allowed (ignored) and which should be retried. -type ApiCallOptions struct { +type APICallOptions struct { shouldAllowError func(error) bool shouldRetryError func(error) bool } -// Allow creates an ApiCallOptions that allows (ignores) errors matching the given predicate. -func Allow(allowErrorPredicate func(error) bool) *ApiCallOptions { - return &ApiCallOptions{shouldAllowError: allowErrorPredicate} +// Allow creates an APICallOptions that allows (ignores) errors matching the given predicate. +func Allow(allowErrorPredicate func(error) bool) *APICallOptions { + return &APICallOptions{shouldAllowError: allowErrorPredicate} } -// Retry creates an ApiCallOptions that retries errors matching the given predicate. -func Retry(retryErrorPredicate func(error) bool) *ApiCallOptions { - return &ApiCallOptions{shouldRetryError: retryErrorPredicate} +// Retry creates an APICallOptions that retries errors matching the given predicate. +func Retry(retryErrorPredicate func(error) bool) *APICallOptions { + return &APICallOptions{shouldRetryError: retryErrorPredicate} } // RetryFunction opaques given function into retryable function. -func RetryFunction(f func() error, options ...*ApiCallOptions) wait.ConditionFunc { +func RetryFunction(f func() error, options ...*APICallOptions) wait.ConditionFunc { var shouldAllowErrorFuncs, shouldRetryErrorFuncs []func(error) bool for _, option := range options { if option.shouldAllowError != nil { @@ -237,7 +237,7 @@ func WaitForDeleteNamespace(c clientset.Interface, namespace string) error { } // ListEvents retrieves events for the object with the given name. -func ListEvents(c clientset.Interface, namespace string, name string, options ...*ApiCallOptions) (obj *apiv1.EventList, err error) { +func ListEvents(c clientset.Interface, namespace string, name string, options ...*APICallOptions) (obj *apiv1.EventList, err error) { getFunc := func() error { obj, err = c.CoreV1().Events(namespace).List(metav1.ListOptions{ FieldSelector: "involvedObject.name=" + name, @@ -259,7 +259,7 @@ func DeleteStorageClass(c clientset.Interface, name string) error { } // CreateObject creates object based on given object description. -func CreateObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured, options ...*ApiCallOptions) error { +func CreateObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured, options ...*APICallOptions) error { gvk := obj.GroupVersionKind() gvr, _ := meta.UnsafeGuessKindToResource(gvk) obj.SetName(name) @@ -272,7 +272,7 @@ func CreateObject(dynamicClient dynamic.Interface, namespace string, name string } // PatchObject updates (using patch) object with given name, group, version and kind based on given object description. -func PatchObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured, options ...*ApiCallOptions) error { +func PatchObject(dynamicClient dynamic.Interface, namespace string, name string, obj *unstructured.Unstructured, options ...*APICallOptions) error { gvk := obj.GroupVersionKind() gvr, _ := meta.UnsafeGuessKindToResource(gvk) obj.SetName(name) @@ -292,7 +292,7 @@ func PatchObject(dynamicClient dynamic.Interface, namespace string, name string, } // DeleteObject deletes object with given name, group, version and kind. -func DeleteObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, namespace string, name string, options ...*ApiCallOptions) error { +func DeleteObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, namespace string, name string, options ...*APICallOptions) error { gvr, _ := meta.UnsafeGuessKindToResource(gvk) deleteFunc := func() error { // Delete operation removes object with all of the dependants. @@ -305,7 +305,7 @@ func DeleteObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, } // GetObject retrieves object with given name, group, version and kind. -func GetObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, namespace string, name string, options ...*ApiCallOptions) (*unstructured.Unstructured, error) { +func GetObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, namespace string, name string, options ...*APICallOptions) (*unstructured.Unstructured, error) { var obj *unstructured.Unstructured gvr, _ := meta.UnsafeGuessKindToResource(gvk) getFunc := func() error { @@ -322,11 +322,11 @@ func GetObject(dynamicClient dynamic.Interface, gvk schema.GroupVersionKind, nam } func createPatch(current, modified *unstructured.Unstructured) ([]byte, error) { - currentJson, err := current.MarshalJSON() + currentJSON, err := current.MarshalJSON() if err != nil { return []byte{}, err } - modifiedJson, err := modified.MarshalJSON() + modifiedJSON, err := modified.MarshalJSON() if err != nil { return []byte{}, err } @@ -336,5 +336,5 @@ func createPatch(current, modified *unstructured.Unstructured) ([]byte, error) { // if some field has been deleted between `original` and `modified` object // (e.g. by removing field in object's yaml), we will never remove that field from 'current'. // TODO(mborsz): Pass here the original object. - return jsonmergepatch.CreateThreeWayJSONMergePatch(nil /* original */, modifiedJson, currentJson, preconditions...) + return jsonmergepatch.CreateThreeWayJSONMergePatch(nil /* original */, modifiedJSON, currentJSON, preconditions...) } diff --git a/clusterloader2/pkg/framework/framework.go b/clusterloader2/pkg/framework/framework.go index 6dd4563c7..46ce4c48b 100644 --- a/clusterloader2/pkg/framework/framework.go +++ b/clusterloader2/pkg/framework/framework.go @@ -203,28 +203,28 @@ func (f *Framework) DeleteNamespaces(namespaces []string) *errors.ErrorList { } // CreateObject creates object base on given object description. -func (f *Framework) CreateObject(namespace string, name string, obj *unstructured.Unstructured, options ...*client.ApiCallOptions) error { +func (f *Framework) CreateObject(namespace string, name string, obj *unstructured.Unstructured, options ...*client.APICallOptions) error { return client.CreateObject(f.dynamicClients.GetClient(), namespace, name, obj, options...) } // PatchObject updates object (using patch) with given name using given object description. -func (f *Framework) PatchObject(namespace string, name string, obj *unstructured.Unstructured, options ...*client.ApiCallOptions) error { +func (f *Framework) PatchObject(namespace string, name string, obj *unstructured.Unstructured, options ...*client.APICallOptions) error { return client.PatchObject(f.dynamicClients.GetClient(), namespace, name, obj) } // DeleteObject deletes object with given name and group-version-kind. -func (f *Framework) DeleteObject(gvk schema.GroupVersionKind, namespace string, name string, options ...*client.ApiCallOptions) error { +func (f *Framework) DeleteObject(gvk schema.GroupVersionKind, namespace string, name string, options ...*client.APICallOptions) error { return client.DeleteObject(f.dynamicClients.GetClient(), gvk, namespace, name) } // GetObject retrieves object with given name and group-version-kind. -func (f *Framework) GetObject(gvk schema.GroupVersionKind, namespace string, name string, options ...*client.ApiCallOptions) (*unstructured.Unstructured, error) { +func (f *Framework) GetObject(gvk schema.GroupVersionKind, namespace string, name string, options ...*client.APICallOptions) (*unstructured.Unstructured, error) { return client.GetObject(f.dynamicClients.GetClient(), gvk, namespace, name) } // ApplyTemplatedManifests finds and applies all manifest template files matching the provided // manifestGlob pattern. It substitutes the template placeholders using the templateMapping map. -func (f *Framework) ApplyTemplatedManifests(manifestGlob string, templateMapping map[string]interface{}, options ...*client.ApiCallOptions) error { +func (f *Framework) ApplyTemplatedManifests(manifestGlob string, templateMapping map[string]interface{}, options ...*client.APICallOptions) error { // TODO(mm4tt): Consider using the out-of-the-box "kubectl create -f". manifestGlob = os.ExpandEnv(manifestGlob) templateProvider := config.NewTemplateProvider(filepath.Dir(manifestGlob)) diff --git a/clusterloader2/pkg/measurement/common/bundle/test_metrics.go b/clusterloader2/pkg/measurement/common/bundle/test_metrics.go index bc53719ad..aebb32d84 100644 --- a/clusterloader2/pkg/measurement/common/bundle/test_metrics.go +++ b/clusterloader2/pkg/measurement/common/bundle/test_metrics.go @@ -103,7 +103,7 @@ type testMetrics struct { // Execute supports two actions. start - which sets up all metrics. // stop - which stops all metrics and collects all measurements. -func (t *testMetrics) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (t *testMetrics) Execute(config *measurement.Config) ([]measurement.Summary, error) { var summaries []measurement.Summary errList := errors.NewErrorList() action, err := util.GetString(config.Params, "action") @@ -243,7 +243,7 @@ func (*testMetrics) String() string { return testMetricsMeasurementName } -func createConfig(config *measurement.MeasurementConfig, overrides map[string]interface{}) *measurement.MeasurementConfig { +func createConfig(config *measurement.Config, overrides map[string]interface{}) *measurement.Config { params := make(map[string]interface{}) for k, v := range config.Params { params[k] = v @@ -251,7 +251,7 @@ func createConfig(config *measurement.MeasurementConfig, overrides map[string]in for k, v := range overrides { params[k] = v } - return &measurement.MeasurementConfig{ + return &measurement.Config{ ClusterFramework: config.ClusterFramework, PrometheusFramework: config.PrometheusFramework, Params: params, @@ -260,7 +260,7 @@ func createConfig(config *measurement.MeasurementConfig, overrides map[string]in } } -func execute(m measurement.Measurement, config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func execute(m measurement.Measurement, config *measurement.Config) ([]measurement.Summary, error) { if m == nil { return nil, fmt.Errorf("uninitialized metric") } diff --git a/clusterloader2/pkg/measurement/common/etcd_metrics.go b/clusterloader2/pkg/measurement/common/etcd_metrics.go index 97169a37f..b561fd2e7 100644 --- a/clusterloader2/pkg/measurement/common/etcd_metrics.go +++ b/clusterloader2/pkg/measurement/common/etcd_metrics.go @@ -59,7 +59,7 @@ type etcdMetricsMeasurement struct { // Execute supports two actions: // - start - Starts collecting etcd metrics. // - gather - Gathers and prints etcd metrics summary. -func (e *etcdMetricsMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (e *etcdMetricsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { // Etcd is only exposed on localhost level. We are using ssh method if !config.ClusterFramework.GetClusterConfig().SSHToMasterSupported { klog.Infof("not grabbing etcd metrics through master SSH: unsupported for provider, %s", config.ClusterFramework.GetClusterConfig().Provider) diff --git a/clusterloader2/pkg/measurement/common/metrics_for_e2e.go b/clusterloader2/pkg/measurement/common/metrics_for_e2e.go index 412eee7aa..079bc69a6 100644 --- a/clusterloader2/pkg/measurement/common/metrics_for_e2e.go +++ b/clusterloader2/pkg/measurement/common/metrics_for_e2e.go @@ -54,7 +54,7 @@ func createmetricsForE2EMeasurement() measurement.Measurement { type metricsForE2EMeasurement struct{} // Execute gathers and prints e2e metrics data. -func (m *metricsForE2EMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (m *metricsForE2EMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { provider, err := util.GetStringOrDefault(config.Params, "provider", config.ClusterFramework.GetClusterConfig().Provider) if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/probes/probes.go b/clusterloader2/pkg/measurement/common/probes/probes.go index f3b57f7d9..01feac821 100644 --- a/clusterloader2/pkg/measurement/common/probes/probes.go +++ b/clusterloader2/pkg/measurement/common/probes/probes.go @@ -97,7 +97,7 @@ type probesMeasurement struct { // Execute supports two actions: // - start - starts probes and sets up monitoring // - gather - Gathers and prints metrics. -func (p *probesMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (p *probesMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { if config.CloudProvider == "kubemark" { klog.Infof("%s: Probes cannot work in Kubemark, skipping the measurement!", p) return nil, nil @@ -146,7 +146,7 @@ func (p *probesMeasurement) String() string { return p.config.Name } -func (p *probesMeasurement) initialize(config *measurement.MeasurementConfig) error { +func (p *probesMeasurement) initialize(config *measurement.Config) error { replicasPerProbe, err := util.GetInt(config.Params, "replicasPerProbe") if err != nil { return err @@ -157,7 +157,7 @@ func (p *probesMeasurement) initialize(config *measurement.MeasurementConfig) er return nil } -func (p *probesMeasurement) start(config *measurement.MeasurementConfig) error { +func (p *probesMeasurement) start(config *measurement.Config) error { klog.Infof("Starting %s probe...", p) if !p.startTime.IsZero() { return fmt.Errorf("measurement %s cannot be started twice", p) @@ -224,7 +224,7 @@ func (p *probesMeasurement) createProbesObjects() error { return p.framework.ApplyTemplatedManifests(path.Join(manifestsPathPrefix, p.config.Manifests), p.templateMapping) } -func (p *probesMeasurement) waitForProbesReady(config *measurement.MeasurementConfig) error { +func (p *probesMeasurement) waitForProbesReady(config *measurement.Config) error { klog.Infof("Waiting for Probe %s to become ready...", p) checkProbesReadyTimeout, err := util.GetDurationOrDefault(config.Params, "checkProbesReadyTimeout", defaultCheckProbesReadyTimeout) if err != nil { diff --git a/clusterloader2/pkg/measurement/common/profile.go b/clusterloader2/pkg/measurement/common/profile.go index b4c1fa1bc..a8daff012 100644 --- a/clusterloader2/pkg/measurement/common/profile.go +++ b/clusterloader2/pkg/measurement/common/profile.go @@ -58,7 +58,7 @@ type profileConfig struct { kind string } -func (p *profileMeasurement) populateProfileConfig(config *measurement.MeasurementConfig) error { +func (p *profileMeasurement) populateProfileConfig(config *measurement.Config) error { var err error if p.config.componentName, err = util.GetString(config.Params, "componentName"); err != nil { return err @@ -88,7 +88,7 @@ func createProfileMeasurementFactory(name, kind string) func() measurement.Measu } } -func (p *profileMeasurement) start(config *measurement.MeasurementConfig, SSHToMasterSupported bool) error { +func (p *profileMeasurement) start(config *measurement.Config, SSHToMasterSupported bool) error { if err := p.populateProfileConfig(config); err != nil { return err } @@ -97,7 +97,7 @@ func (p *profileMeasurement) start(config *measurement.MeasurementConfig, SSHToM return nil } k8sClient := config.ClusterFramework.GetClientSets().GetClient() - if p.shouldExposeApiServerDebugEndpoint() { + if p.shouldExposeAPIServerDebugEndpoint() { if err := exposeAPIServerDebugEndpoint(k8sClient); err != nil { klog.Warningf("error while exposing kube-apiserver /debug endpoint: %v", err) } @@ -143,7 +143,7 @@ func (p *profileMeasurement) stop() { } // Execute gathers memory profile of a given component. -func (p *profileMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (p *profileMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { SSHToMasterSupported := config.ClusterFramework.GetClusterConfig().SSHToMasterSupported APIServerPprofEnabled := config.ClusterFramework.GetClusterConfig().APIServerPprofByClientEnabled @@ -180,7 +180,7 @@ func (p *profileMeasurement) String() string { return p.name } -func (p *profileMeasurement) gatherProfile(c clientset.Interface, SSHToMasterSupported bool, config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (p *profileMeasurement) gatherProfile(c clientset.Interface, SSHToMasterSupported bool, config *measurement.Config) ([]measurement.Summary, error) { getCommand, err := p.getProfileCommand(config) if err != nil { return nil, goerrors.Errorf("profile gathering failed during retrieving profile command: %v", err) @@ -220,11 +220,11 @@ func (p *profileMeasurement) gatherProfile(c clientset.Interface, SSHToMasterSup return summaries, nil } -func (p *profileMeasurement) shouldExposeApiServerDebugEndpoint() bool { +func (p *profileMeasurement) shouldExposeAPIServerDebugEndpoint() bool { return p.config.componentName == "kube-apiserver" } -func (p *profileMeasurement) getProfileCommand(config *measurement.MeasurementConfig) (string, error) { +func (p *profileMeasurement) getProfileCommand(config *measurement.Config) (string, error) { profilePort, err := getPortForComponent(p.config.componentName) if err != nil { return "", goerrors.Errorf("get profile command failed finding component port: %v", err) diff --git a/clusterloader2/pkg/measurement/common/resource_usage.go b/clusterloader2/pkg/measurement/common/resource_usage.go index 05aec37d9..b60d41f67 100644 --- a/clusterloader2/pkg/measurement/common/resource_usage.go +++ b/clusterloader2/pkg/measurement/common/resource_usage.go @@ -54,7 +54,7 @@ type resourceUsageMetricMeasurement struct { // Execute supports two actions: // - start - Starts resource metrics collecting. // - gather - Gathers and prints current resource usage metrics. -func (e *resourceUsageMetricMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (e *resourceUsageMetricMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { action, err := util.GetString(config.Params, "action") if err != nil { return nil, err @@ -66,7 +66,7 @@ func (e *resourceUsageMetricMeasurement) Execute(config *measurement.Measurement if err != nil { return nil, err } - host, err := util.GetStringOrDefault(config.Params, "host", config.ClusterFramework.GetClusterConfig().GetMasterIp()) + host, err := util.GetStringOrDefault(config.Params, "host", config.ClusterFramework.GetClusterConfig().GetMasterIP()) if err != nil { return nil, err } @@ -156,12 +156,12 @@ func (e *resourceUsageMetricMeasurement) verifySummary(summary *gatherers.Resour for _, containerSummary := range summary.Get("99") { containerName := strings.Split(containerSummary.Name, "/")[1] if constraint, ok := e.resourceConstraints[containerName]; ok { - if containerSummary.Cpu > constraint.CPUConstraint { + if containerSummary.CPU > constraint.CPUConstraint { violatedConstraints = append( violatedConstraints, fmt.Sprintf("container %v is using %v/%v CPU", containerSummary.Name, - containerSummary.Cpu, + containerSummary.CPU, constraint.CPUConstraint, ), ) diff --git a/clusterloader2/pkg/measurement/common/scheduler_latency.go b/clusterloader2/pkg/measurement/common/scheduler_latency.go index 4eb2bf089..085c599d9 100644 --- a/clusterloader2/pkg/measurement/common/scheduler_latency.go +++ b/clusterloader2/pkg/measurement/common/scheduler_latency.go @@ -75,7 +75,7 @@ type schedulerLatencyMeasurement struct{} // Execute supports two actions: // - reset - Resets latency data on api scheduler side. // - gather - Gathers and prints current scheduler latency data. -func (s *schedulerLatencyMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (s *schedulerLatencyMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { SSHToMasterSupported := config.ClusterFramework.GetClusterConfig().SSHToMasterSupported c := config.ClusterFramework.GetClientSets().GetClient() @@ -105,7 +105,7 @@ func (s *schedulerLatencyMeasurement) Execute(config *measurement.MeasurementCon if err != nil { return nil, err } - masterIP, err := util.GetStringOrDefault(config.Params, "masterIP", config.ClusterFramework.GetClusterConfig().GetMasterIp()) + masterIP, err := util.GetStringOrDefault(config.Params, "masterIP", config.ClusterFramework.GetClusterConfig().GetMasterIP()) if err != nil { return nil, err } diff --git a/clusterloader2/pkg/measurement/common/scheduling_throughput.go b/clusterloader2/pkg/measurement/common/scheduling_throughput.go index 9fb153686..263b7f2e2 100644 --- a/clusterloader2/pkg/measurement/common/scheduling_throughput.go +++ b/clusterloader2/pkg/measurement/common/scheduling_throughput.go @@ -56,7 +56,7 @@ type schedulingThroughputMeasurement struct { // Pods can be specified by field and/or label selectors. // If namespace is not passed by parameter, all-namespace scope is assumed. // - gather - creates summary for observed values. -func (s *schedulingThroughputMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (s *schedulingThroughputMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { action, err := util.GetString(config.Params, "action") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/service_creation_latency.go b/clusterloader2/pkg/measurement/common/service_creation_latency.go index d09b86a0e..578766c7e 100644 --- a/clusterloader2/pkg/measurement/common/service_creation_latency.go +++ b/clusterloader2/pkg/measurement/common/service_creation_latency.go @@ -58,7 +58,7 @@ func createServiceCreationLatencyMeasurement() measurement.Measurement { selector: measurementutil.NewObjectSelector(), queue: workerqueue.NewWorkerQueue(serviceCreationLatencyWorkers), creationTimes: measurementutil.NewObjectTransitionTimes(serviceCreationLatencyName), - pingCheckers: checker.NewCheckerMap(), + pingCheckers: checker.NewMap(), } } @@ -70,7 +70,7 @@ type serviceCreationLatencyMeasurement struct { queue workerqueue.Interface client clientset.Interface creationTimes *measurementutil.ObjectTransitionTimes - pingCheckers checker.CheckerMap + pingCheckers checker.Map } // Execute executes service startup latency measurement actions. @@ -80,7 +80,7 @@ type serviceCreationLatencyMeasurement struct { // "waitForReady" waits until all services are reachable. // "gather" returns service created latency summary. // This measurement only works for services with ClusterIP, NodePort and LoadBalancer type. -func (s *serviceCreationLatencyMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (s *serviceCreationLatencyMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { s.client = config.ClusterFramework.GetClientSets().GetClient() action, err := util.GetString(config.Params, "action") if err != nil { diff --git a/clusterloader2/pkg/measurement/common/sleep.go b/clusterloader2/pkg/measurement/common/sleep.go index 86cbc7e57..e02abbc1a 100644 --- a/clusterloader2/pkg/measurement/common/sleep.go +++ b/clusterloader2/pkg/measurement/common/sleep.go @@ -41,7 +41,7 @@ func createSleepMeasurement() measurement.Measurement { type sleepMeasurement struct{} -func (s *sleepMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (s *sleepMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { durationStr, err := util.GetString(config.Params, "duration") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/slos/api_responsiveness_prometheus.go b/clusterloader2/pkg/measurement/common/slos/api_responsiveness_prometheus.go index 60ec0ce74..0ace67c88 100644 --- a/clusterloader2/pkg/measurement/common/slos/api_responsiveness_prometheus.go +++ b/clusterloader2/pkg/measurement/common/slos/api_responsiveness_prometheus.go @@ -100,7 +100,7 @@ func (a *apiResponsiveness) Less(i, j int) bool { type apiResponsivenessGatherer struct{} -func (a *apiResponsivenessGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (a *apiResponsivenessGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.Config) ([]measurement.Summary, error) { apiCalls, err := a.gatherAPICalls(executor, startTime, config) if err != nil { klog.Errorf("%s: samples gathering error: %v", config.Identifier, err) @@ -133,11 +133,11 @@ func (a *apiResponsivenessGatherer) String() string { return apiResponsivenessPrometheusMeasurementName } -func (a *apiResponsivenessGatherer) IsEnabled(config *measurement.MeasurementConfig) bool { +func (a *apiResponsivenessGatherer) IsEnabled(config *measurement.Config) bool { return true } -func (a *apiResponsivenessGatherer) gatherAPICalls(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]apiCall, error) { +func (a *apiResponsivenessGatherer) gatherAPICalls(executor QueryExecutor, startTime time.Time, config *measurement.Config) ([]apiCall, error) { measurementEnd := time.Now() measurementDuration := measurementEnd.Sub(startTime) diff --git a/clusterloader2/pkg/measurement/common/slos/network_programming.go b/clusterloader2/pkg/measurement/common/slos/network_programming.go index 85df05e9c..c49cb2c88 100644 --- a/clusterloader2/pkg/measurement/common/slos/network_programming.go +++ b/clusterloader2/pkg/measurement/common/slos/network_programming.go @@ -47,7 +47,7 @@ func init() { type netProgGatherer struct{} -func (n *netProgGatherer) IsEnabled(config *measurement.MeasurementConfig) bool { +func (n *netProgGatherer) IsEnabled(config *measurement.Config) bool { // Disable NetworkProgrammingLatency measurement if scraping kube-proxy is disabled. if !config.ClusterLoaderConfig.PrometheusConfig.ScrapeKubeProxy { return false @@ -55,7 +55,7 @@ func (n *netProgGatherer) IsEnabled(config *measurement.MeasurementConfig) bool return config.CloudProvider != "kubemark" } -func (n *netProgGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (n *netProgGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.Config) ([]measurement.Summary, error) { latency, err := n.query(executor, startTime) if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/slos/pod_startup_latency.go b/clusterloader2/pkg/measurement/common/slos/pod_startup_latency.go index f6f5eac9b..75c804b7a 100644 --- a/clusterloader2/pkg/measurement/common/slos/pod_startup_latency.go +++ b/clusterloader2/pkg/measurement/common/slos/pod_startup_latency.go @@ -67,7 +67,7 @@ type podStartupLatencyMeasurement struct { // - gather - Gathers and prints current pod latency data. // Does NOT support concurrency. Multiple calls to this measurement // shouldn't be done within one step. -func (p *podStartupLatencyMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (p *podStartupLatencyMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { action, err := util.GetString(config.Params, "action") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/slos/prometheus_measurement.go b/clusterloader2/pkg/measurement/common/slos/prometheus_measurement.go index a7dd9e33e..c458b11ed 100644 --- a/clusterloader2/pkg/measurement/common/slos/prometheus_measurement.go +++ b/clusterloader2/pkg/measurement/common/slos/prometheus_measurement.go @@ -43,8 +43,8 @@ type QueryExecutor interface { // It's assumed Prometheus is up, running and instructed to scrape required metrics in the test cluster // (please see clusterloader2/pkg/prometheus/manifests). type Gatherer interface { - Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) - IsEnabled(config *measurement.MeasurementConfig) bool + Gather(executor QueryExecutor, startTime time.Time, config *measurement.Config) ([]measurement.Summary, error) + IsEnabled(config *measurement.Config) bool String() string } @@ -54,7 +54,7 @@ type prometheusMeasurement struct { startTime time.Time } -func (m *prometheusMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (m *prometheusMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { if config.PrometheusFramework == nil { klog.Warningf("%s: Prometheus is disabled, skipping the measurement!", config.Identifier) return nil, nil diff --git a/clusterloader2/pkg/measurement/common/slos/windows_node_resource_usage.go b/clusterloader2/pkg/measurement/common/slos/windows_node_resource_usage.go index 1771fe8a8..98c9ae848 100644 --- a/clusterloader2/pkg/measurement/common/slos/windows_node_resource_usage.go +++ b/clusterloader2/pkg/measurement/common/slos/windows_node_resource_usage.go @@ -43,7 +43,7 @@ const ( type convertFunc func([]*model.Sample) *measurementutil.PerfData type windowsResourceUsageGatherer struct{} -func (w *windowsResourceUsageGatherer) IsEnabled(config *measurement.MeasurementConfig) bool { +func (w *windowsResourceUsageGatherer) IsEnabled(config *measurement.Config) bool { return true } @@ -92,7 +92,7 @@ func convertToMemoryPerfData(samples []*model.Sample) *measurementutil.PerfData return perfData } -func getSummary(query string, converter convertFunc, metricsName string, executor QueryExecutor, config *measurement.MeasurementConfig) (measurement.Summary, error) { +func getSummary(query string, converter convertFunc, metricsName string, executor QueryExecutor, config *measurement.Config) (measurement.Summary, error) { samples, err := executor.Query(query, time.Now()) if err != nil { return nil, err @@ -109,7 +109,7 @@ func getSummary(query string, converter convertFunc, metricsName string, executo } // Gather gathers the metrics and convert to json summary -func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.Config) ([]measurement.Summary, error) { cpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config) if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/system_pod_metrics.go b/clusterloader2/pkg/measurement/common/system_pod_metrics.go index b735424d6..c9a902a9e 100644 --- a/clusterloader2/pkg/measurement/common/system_pod_metrics.go +++ b/clusterloader2/pkg/measurement/common/system_pod_metrics.go @@ -73,7 +73,7 @@ type systemPodsMetrics struct { } // Execute gathers and prints system pod metrics. -func (m *systemPodMetricsMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (m *systemPodMetricsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { systemPodMetricsEnabled, err := util.GetBoolOrDefault(config.Params, systemPodMetricsEnabledFlagName, false) if err != nil { return nil, err @@ -120,7 +120,7 @@ func (m *systemPodMetricsMeasurement) Execute(config *measurement.MeasurementCon } } -func getPodMetrics(config *measurement.MeasurementConfig) (*systemPodsMetrics, error) { +func getPodMetrics(config *measurement.Config) (*systemPodsMetrics, error) { klog.Info("collecting system pod metrics...") lst, err := getPodList(config.ClusterFramework.GetClientSets().GetClient()) if err != nil { @@ -165,7 +165,7 @@ func subtractInitialRestartCounts(metrics *systemPodsMetrics, initMetrics *syste } } -func validateRestartCounts(metrics *systemPodsMetrics, config *measurement.MeasurementConfig, overrides map[string]int) error { +func validateRestartCounts(metrics *systemPodsMetrics, config *measurement.Config, overrides map[string]int) error { enabled, err := util.GetBoolOrDefault(config.Params, enableRestartCountCheckFlagName, false) if err != nil { return err @@ -215,7 +215,7 @@ using golang map format (for example "map[c1:4 c2:8]"), but it would require imp for such format. It would also introduce a dependency on golang map serialization format, which might break clusterloader if format ever changes. */ -func getThresholdOverrides(config *measurement.MeasurementConfig) (map[string]int, error) { +func getThresholdOverrides(config *measurement.Config) (map[string]int, error) { serialized, err := util.GetStringOrDefault(config.Params, restartThresholdOverridesFlagName, "") if err != nil { return make(map[string]int), nil diff --git a/clusterloader2/pkg/measurement/common/system_pod_metrics_test.go b/clusterloader2/pkg/measurement/common/system_pod_metrics_test.go index ac703ff47..0defb0bc1 100644 --- a/clusterloader2/pkg/measurement/common/system_pod_metrics_test.go +++ b/clusterloader2/pkg/measurement/common/system_pod_metrics_test.go @@ -64,7 +64,7 @@ func Test_validateRestartCounts(t *testing.T) { tests := []struct { name string metrics *systemPodsMetrics - config *measurement.MeasurementConfig + config *measurement.Config wantErr bool }{ { @@ -147,12 +147,12 @@ func generatePodMetrics(podName string, contName string, restartCount int32) *sy } } -func buildConfig(t *testing.T, checkEnabled bool, thresholdOverrides map[string]int) *measurement.MeasurementConfig { +func buildConfig(t *testing.T, checkEnabled bool, thresholdOverrides map[string]int) *measurement.Config { serializedOverrides, err := yaml.Marshal(thresholdOverrides) if err != nil { t.Fatal(err) } - return &measurement.MeasurementConfig{ + return &measurement.Config{ Params: map[string]interface{}{ "enableRestartCountCheck": checkEnabled, "restartCountThresholdOverrides": string(serializedOverrides), diff --git a/clusterloader2/pkg/measurement/common/timer.go b/clusterloader2/pkg/measurement/common/timer.go index b72049088..11bf1b65e 100644 --- a/clusterloader2/pkg/measurement/common/timer.go +++ b/clusterloader2/pkg/measurement/common/timer.go @@ -54,7 +54,7 @@ type timer struct { // and collects time duration between start and stop. // Both start and stop actions require label parameter to be provided. // Gather action logs a measurement for all collected phases durations. -func (t *timer) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (t *timer) Execute(config *measurement.Config) ([]measurement.Summary, error) { action, err := util.GetString(config.Params, "action") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/wait_for_controlled_pods.go b/clusterloader2/pkg/measurement/common/wait_for_controlled_pods.go index 88f919821..01d644d79 100644 --- a/clusterloader2/pkg/measurement/common/wait_for_controlled_pods.go +++ b/clusterloader2/pkg/measurement/common/wait_for_controlled_pods.go @@ -59,7 +59,7 @@ func createWaitForControlledPodsRunningMeasurement() measurement.Measurement { return &waitForControlledPodsRunningMeasurement{ selector: measurementutil.NewObjectSelector(), queue: workerqueue.NewWorkerQueue(waitForControlledPodsWorkers), - checkerMap: checker.NewCheckerMap(), + checkerMap: checker.NewMap(), } } @@ -75,7 +75,7 @@ type waitForControlledPodsRunningMeasurement struct { lock sync.Mutex opResourceVersion uint64 gvr schema.GroupVersionResource - checkerMap checker.CheckerMap + checkerMap checker.Map clusterFramework *framework.Framework checkIfPodsAreUpdated bool } @@ -85,7 +85,7 @@ type waitForControlledPodsRunningMeasurement struct { // If namespace is not passed by parameter, all-namespace scope is assumed. // "Start" action starts observation of the controlling objects, while "gather" waits for until // specified number of controlling objects have all pods running. -func (w *waitForControlledPodsRunningMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *waitForControlledPodsRunningMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { w.clusterFramework = config.ClusterFramework action, err := util.GetString(config.Params, "action") diff --git a/clusterloader2/pkg/measurement/common/wait_for_nodes.go b/clusterloader2/pkg/measurement/common/wait_for_nodes.go index 129423424..d7d29e286 100644 --- a/clusterloader2/pkg/measurement/common/wait_for_nodes.go +++ b/clusterloader2/pkg/measurement/common/wait_for_nodes.go @@ -46,7 +46,7 @@ type waitForNodesMeasurement struct{} // Execute waits until desired number of Nodes are ready or until a // timeout happens. Nodes can be optionally specified by field and/or label // selectors. -func (w *waitForNodesMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *waitForNodesMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { minNodeCount, maxNodeCount, err := getMinMaxDesiredNodeCount(config.Params) if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/wait_for_pods.go b/clusterloader2/pkg/measurement/common/wait_for_pods.go index 84218b400..c5f3baf9e 100644 --- a/clusterloader2/pkg/measurement/common/wait_for_pods.go +++ b/clusterloader2/pkg/measurement/common/wait_for_pods.go @@ -46,7 +46,7 @@ type waitForRunningPodsMeasurement struct{} // Execute waits until desired number of pods are running or until timeout happens. // Pods can be specified by field and/or label selectors. // If namespace is not passed by parameter, all-namespace scope is assumed. -func (w *waitForRunningPodsMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *waitForRunningPodsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { desiredPodCount, err := util.GetInt(config.Params, "desiredPodCount") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/wait_for_pvcs.go b/clusterloader2/pkg/measurement/common/wait_for_pvcs.go index 647f9e3ba..83645cdb1 100644 --- a/clusterloader2/pkg/measurement/common/wait_for_pvcs.go +++ b/clusterloader2/pkg/measurement/common/wait_for_pvcs.go @@ -46,7 +46,7 @@ type waitForBoundPVCsMeasurement struct{} // Execute waits until desired number of PVCs are bound or until timeout happens. // PVCs can be specified by field and/or label selectors. // If namespace is not passed by parameter, all-namespace scope is assumed. -func (w *waitForBoundPVCsMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *waitForBoundPVCsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { desiredPVCCount, err := util.GetInt(config.Params, "desiredPVCCount") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/common/wait_for_pvs.go b/clusterloader2/pkg/measurement/common/wait_for_pvs.go index 2fe674d5d..c7ee2e789 100644 --- a/clusterloader2/pkg/measurement/common/wait_for_pvs.go +++ b/clusterloader2/pkg/measurement/common/wait_for_pvs.go @@ -45,7 +45,7 @@ type waitForAvailablePVsMeasurement struct{} // Execute waits until desired number of PVs are Available or until timeout happens. // PVs can be specified by field and/or label selectors. -func (w *waitForAvailablePVsMeasurement) Execute(config *measurement.MeasurementConfig) ([]measurement.Summary, error) { +func (w *waitForAvailablePVsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) { desiredPVCount, err := util.GetInt(config.Params, "desiredPVCount") if err != nil { return nil, err diff --git a/clusterloader2/pkg/measurement/interface.go b/clusterloader2/pkg/measurement/interface.go index d02a9ed5c..a83cea498 100644 --- a/clusterloader2/pkg/measurement/interface.go +++ b/clusterloader2/pkg/measurement/interface.go @@ -23,8 +23,8 @@ import ( "k8s.io/perf-tests/clusterloader2/pkg/framework" ) -// MeasurementConfig provides client and parameters required for the measurement execution. -type MeasurementConfig struct { +// Config provides client and parameters required for the measurement execution. +type Config struct { // ClusterFramework returns cluster framework. ClusterFramework *framework.Framework // PrometheusFramework returns prometheus framework. @@ -45,7 +45,7 @@ type MeasurementConfig struct { // allow his/her measurement method to be registered in the measurement factory. // See https://github.com/kubernetes/perf-tests/blob/master/clusterloader2/docs/design.md for reference. type Measurement interface { - Execute(config *MeasurementConfig) ([]Summary, error) + Execute(config *Config) ([]Summary, error) Dispose() String() string } diff --git a/clusterloader2/pkg/measurement/manager.go b/clusterloader2/pkg/measurement/manager.go index 8ad7a3b8e..d7feede92 100644 --- a/clusterloader2/pkg/measurement/manager.go +++ b/clusterloader2/pkg/measurement/manager.go @@ -36,15 +36,15 @@ type measurementManager struct { summaries []Summary } -// MeasurementManager provides the interface for measurementManager -type MeasurementManager interface { +// Manager provides the interface for measurementManager +type Manager interface { Execute(methodName string, identifier string, params map[string]interface{}) error GetSummaries() []Summary Dispose() } -// CreateMeasurementManager creates new instance of measurementManager. -func CreateMeasurementManager(clusterFramework, prometheusFramework *framework.Framework, templateProvider *config.TemplateProvider, config *config.ClusterLoaderConfig) MeasurementManager { +// CreateManager creates new instance of measurementManager. +func CreateManager(clusterFramework, prometheusFramework *framework.Framework, templateProvider *config.TemplateProvider, config *config.ClusterLoaderConfig) Manager { return &measurementManager{ clusterFramework: clusterFramework, clusterLoaderConfig: config, @@ -61,7 +61,7 @@ func (mm *measurementManager) Execute(methodName string, identifier string, para if err != nil { return err } - config := &MeasurementConfig{ + config := &Config{ ClusterFramework: mm.clusterFramework, PrometheusFramework: mm.prometheusFramework, Params: params, diff --git a/clusterloader2/pkg/measurement/measurement_executor.go b/clusterloader2/pkg/measurement/measurement_executor.go index c6fa9d8cd..45f13bf3f 100644 --- a/clusterloader2/pkg/measurement/measurement_executor.go +++ b/clusterloader2/pkg/measurement/measurement_executor.go @@ -25,7 +25,7 @@ import ( // Execute executes a measurement, which can be a single measurement or a wrapper for multiple measurements. // It throws an error if both identifier and Instances has been supplied in the config. -func Execute(mm MeasurementManager, m *api.Measurement) error { +func Execute(mm Manager, m *api.Measurement) error { if len(m.Instances) != 0 && m.Identifier != "" { return fmt.Errorf("only one of instances or identifier must be supplied. Measurement method - %s, identifier - %s, instances - %v", m.Identifier, m.Method, m.Instances) } @@ -35,7 +35,7 @@ func Execute(mm MeasurementManager, m *api.Measurement) error { return executeWrapperMeasurement(mm, m) } -func executeWrapperMeasurement(mm MeasurementManager, m *api.Measurement) error { +func executeWrapperMeasurement(mm Manager, m *api.Measurement) error { var wg wait.Group errList := errors.NewErrorList() for i := range m.Instances { diff --git a/clusterloader2/pkg/measurement/util/checker/checker_map.go b/clusterloader2/pkg/measurement/util/checker/checker_map.go index 3a2d3ab8b..f090dec89 100644 --- a/clusterloader2/pkg/measurement/util/checker/checker_map.go +++ b/clusterloader2/pkg/measurement/util/checker/checker_map.go @@ -21,16 +21,16 @@ type Checker interface { Stop() } -// CheckerMap is a map of Checkers. -type CheckerMap map[string]Checker +// Map is a map of Checkers. +type Map map[string]Checker -// NewCheckerMap creates new checker map. -func NewCheckerMap() CheckerMap { +// NewMap creates new checker map. +func NewMap() Map { return make(map[string]Checker) } // Dispose stops all checkers and cleans up the map. -func (cm CheckerMap) Dispose() { +func (cm Map) Dispose() { for _, c := range cm { c.Stop() } @@ -38,7 +38,7 @@ func (cm CheckerMap) Dispose() { } // Add adds checker to the checker map. -func (cm CheckerMap) Add(key string, c Checker) { +func (cm Map) Add(key string, c Checker) { if old, exists := cm[key]; exists { old.Stop() } @@ -46,7 +46,7 @@ func (cm CheckerMap) Add(key string, c Checker) { } // DeleteAndStop stops checker and deletes it if exists. -func (cm CheckerMap) DeleteAndStop(key string) bool { +func (cm Map) DeleteAndStop(key string) bool { if old, exists := cm[key]; exists { old.Stop() delete(cm, key) diff --git a/clusterloader2/pkg/measurement/util/gatherers/container_resource_gatherer.go b/clusterloader2/pkg/measurement/util/gatherers/container_resource_gatherer.go index dbd25714f..57309730f 100644 --- a/clusterloader2/pkg/measurement/util/gatherers/container_resource_gatherer.go +++ b/clusterloader2/pkg/measurement/util/gatherers/container_resource_gatherer.go @@ -226,7 +226,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int) (*Resour usage := data[perc][name] summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], util.SingleContainerSummary{ Name: name, - Cpu: usage.CPUUsageInCores, + CPU: usage.CPUUsageInCores, Mem: usage.MemoryWorkingSetInBytes, }) } diff --git a/clusterloader2/pkg/measurement/util/kubemark/kubemark.go b/clusterloader2/pkg/measurement/util/kubemark/kubemark.go index 408b6547d..730dc2be1 100644 --- a/clusterloader2/pkg/measurement/util/kubemark/kubemark.go +++ b/clusterloader2/pkg/measurement/util/kubemark/kubemark.go @@ -25,8 +25,8 @@ import ( "k8s.io/perf-tests/clusterloader2/pkg/measurement/util" ) -// KubemarkResourceUsage represents resources used by the kubemark. -type KubemarkResourceUsage struct { +// ResourceUsage represents resources used by the kubemark. +type ResourceUsage struct { Name string MemoryWorkingSetInBytes uint64 CPUUsageInCores float64 @@ -42,8 +42,8 @@ func getMasterUsageByPrefix(host, provider, prefix string) (string, error) { // GetKubemarkMasterComponentsResourceUsage returns resource usage of the kubemark components. // TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework) -func GetKubemarkMasterComponentsResourceUsage(host, provider string) map[string]*KubemarkResourceUsage { - result := make(map[string]*KubemarkResourceUsage) +func GetKubemarkMasterComponentsResourceUsage(host, provider string) map[string]*ResourceUsage { + result := make(map[string]*ResourceUsage) // Get kubernetes component resource usage sshResult, err := getMasterUsageByPrefix(host, provider, "kube") if err != nil { @@ -59,7 +59,7 @@ func GetKubemarkMasterComponentsResourceUsage(host, provider string) map[string] if name != "" { // Gatherer expects pod_name/container_name format fullName := name + "/" + name - result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} + result[fullName] = &ResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} } } // Get etcd resource usage @@ -82,7 +82,7 @@ func GetKubemarkMasterComponentsResourceUsage(host, provider string) map[string] if etcdKind != "" { // Gatherer expects pod_name/container_name format fullName := "etcd/" + etcdKind - result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} + result[fullName] = &ResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100} } } return result diff --git a/clusterloader2/pkg/measurement/util/resource.go b/clusterloader2/pkg/measurement/util/resource.go index 074408cfb..e677f8e9d 100644 --- a/clusterloader2/pkg/measurement/util/resource.go +++ b/clusterloader2/pkg/measurement/util/resource.go @@ -39,7 +39,7 @@ type ResourceUsagePerContainer map[string]*ContainerResourceUsage // UsageDataPerContainer contains resource usage data series. type UsageDataPerContainer struct { - CpuData []float64 + CPUData []float64 MemUseData []uint64 MemWorkSetData []uint64 } @@ -53,7 +53,7 @@ type ResourceConstraint struct { // SingleContainerSummary is a resource usage summary for a single container. type SingleContainerSummary struct { Name string - Cpu float64 + CPU float64 Mem uint64 } @@ -73,18 +73,18 @@ func ComputePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCom for name, data := range timeSeries[i] { if dataMap[name] == nil { dataMap[name] = &UsageDataPerContainer{ - CpuData: make([]float64, 0, len(timeSeries)), + CPUData: make([]float64, 0, len(timeSeries)), MemUseData: make([]uint64, 0, len(timeSeries)), MemWorkSetData: make([]uint64, 0, len(timeSeries)), } } - dataMap[name].CpuData = append(dataMap[name].CpuData, data.CPUUsageInCores) + dataMap[name].CPUData = append(dataMap[name].CPUData, data.CPUUsageInCores) dataMap[name].MemUseData = append(dataMap[name].MemUseData, data.MemoryUsageInBytes) dataMap[name].MemWorkSetData = append(dataMap[name].MemWorkSetData, data.MemoryWorkingSetInBytes) } } for _, v := range dataMap { - sort.Float64s(v.CpuData) + sort.Float64s(v.CPUData) sort.Sort(uint64arr(v.MemUseData)) sort.Sort(uint64arr(v.MemWorkSetData)) } @@ -93,10 +93,10 @@ func ComputePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCom for _, perc := range percentilesToCompute { data := make(ResourceUsagePerContainer) for k, v := range dataMap { - percentileIndex := int(math.Ceil(float64(len(v.CpuData)*perc)/100)) - 1 + percentileIndex := int(math.Ceil(float64(len(v.CPUData)*perc)/100)) - 1 data[k] = &ContainerResourceUsage{ Name: k, - CPUUsageInCores: v.CpuData[percentileIndex], + CPUUsageInCores: v.CPUData[percentileIndex], MemoryUsageInBytes: v.MemUseData[percentileIndex], MemoryWorkingSetInBytes: v.MemWorkSetData[percentileIndex], } diff --git a/clusterloader2/pkg/measurement/util/ssh.go b/clusterloader2/pkg/measurement/util/ssh.go index abf5ea932..511c8e1c1 100644 --- a/clusterloader2/pkg/measurement/util/ssh.go +++ b/clusterloader2/pkg/measurement/util/ssh.go @@ -28,11 +28,11 @@ import ( // GetMasterHost turns host name (without prefix and port). func GetMasterHost(host string) (string, error) { - masterUrl, err := url.Parse(host) + masterURL, err := url.Parse(host) if err != nil { return "", err } - return masterUrl.Hostname(), nil + return masterURL.Hostname(), nil } // SSHResult represents result of ssh command. diff --git a/clusterloader2/pkg/prometheus/experimental.go b/clusterloader2/pkg/prometheus/experimental.go index 188ac93cc..8e6d6a630 100644 --- a/clusterloader2/pkg/prometheus/experimental.go +++ b/clusterloader2/pkg/prometheus/experimental.go @@ -45,7 +45,7 @@ var ( prometheusDiskSnapshotName = pflag.String("experimental-prometheus-disk-snapshot-name", "", "Name of the prometheus disk snapshot that will be created if snapshots are enabled. If not set, the prometheus disk name will be used.") ) -func (pc *PrometheusController) isEnabled() (bool, error) { +func (pc *Controller) isEnabled() (bool, error) { if !*shouldSnapshotPrometheusDisk { return false, nil } @@ -56,7 +56,7 @@ func (pc *PrometheusController) isEnabled() (bool, error) { return true, nil } -func (pc *PrometheusController) cachePrometheusDiskMetadataIfEnabled() error { +func (pc *Controller) cachePrometheusDiskMetadataIfEnabled() error { if enabled, err := pc.isEnabled(); !enabled { return err } @@ -66,7 +66,7 @@ func (pc *PrometheusController) cachePrometheusDiskMetadataIfEnabled() error { pc.tryRetrievePrometheusDiskMetadata) } -func (pc *PrometheusController) tryRetrievePrometheusDiskMetadata() (bool, error) { +func (pc *Controller) tryRetrievePrometheusDiskMetadata() (bool, error) { klog.Info("Retrieving Prometheus' persistent disk metadata...") k8sClient := pc.framework.GetClientSets().GetClient() list, err := k8sClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) @@ -104,7 +104,7 @@ func (pc *PrometheusController) tryRetrievePrometheusDiskMetadata() (bool, error return true, nil } -func (pc *PrometheusController) snapshotPrometheusDiskIfEnabled() error { +func (pc *Controller) snapshotPrometheusDiskIfEnabled() error { if enabled, err := pc.isEnabled(); !enabled { return err } @@ -133,7 +133,7 @@ func (pc *PrometheusController) snapshotPrometheusDiskIfEnabled() error { }) } -func (pc *PrometheusController) trySnapshotPrometheusDisk(pdName, snapshotName, zone string) error { +func (pc *Controller) trySnapshotPrometheusDisk(pdName, snapshotName, zone string) error { klog.Info("Trying to snapshot Prometheus' persistent disk...") project := pc.clusterLoaderConfig.PrometheusConfig.SnapshotProject if project == "" { @@ -152,7 +152,7 @@ func (pc *PrometheusController) trySnapshotPrometheusDisk(pdName, snapshotName, return err } -func (pc *PrometheusController) deletePrometheusDiskIfEnabled() error { +func (pc *Controller) deletePrometheusDiskIfEnabled() error { if enabled, err := pc.isEnabled(); !enabled { return err } @@ -172,7 +172,7 @@ func (pc *PrometheusController) deletePrometheusDiskIfEnabled() error { }) } -func (pc *PrometheusController) tryDeletePrometheusDisk(pdName, zone string) error { +func (pc *Controller) tryDeletePrometheusDisk(pdName, zone string) error { klog.Info("Trying to delete Prometheus' persistent disk...") project := pc.clusterLoaderConfig.PrometheusConfig.SnapshotProject if project == "" { diff --git a/clusterloader2/pkg/prometheus/prometheus.go b/clusterloader2/pkg/prometheus/prometheus.go index e5b9d9866..85b3d87c6 100644 --- a/clusterloader2/pkg/prometheus/prometheus.go +++ b/clusterloader2/pkg/prometheus/prometheus.go @@ -61,9 +61,9 @@ func InitFlags(p *config.PrometheusConfig) { flags.StringEnvVar(&p.SnapshotProject, "experimental-snapshot-project", "PROJECT", "", "GCP project used where disks and snapshots are located.") } -// PrometheusController is a util for managing (setting up / tearing down) the prometheus stack in +// Controller is a util for managing (setting up / tearing down) the prometheus stack in // the cluster. -type PrometheusController struct { +type Controller struct { clusterLoaderConfig *config.ClusterLoaderConfig // provider is the cloud provider derived from the --provider flag. provider string @@ -78,9 +78,9 @@ type PrometheusController struct { ssh util.SSHExecutor } -// NewPrometheusController creates a new instance of PrometheusController for the given config. -func NewPrometheusController(clusterLoaderConfig *config.ClusterLoaderConfig) (pc *PrometheusController, err error) { - pc = &PrometheusController{ +// NewController creates a new instance of Controller for the given config. +func NewController(clusterLoaderConfig *config.ClusterLoaderConfig) (pc *Controller, err error) { + pc = &Controller{ clusterLoaderConfig: clusterLoaderConfig, provider: clusterLoaderConfig.ClusterConfig.Provider, } @@ -134,7 +134,7 @@ func NewPrometheusController(clusterLoaderConfig *config.ClusterLoaderConfig) (p // SetUpPrometheusStack sets up prometheus stack in the cluster. // This method is idempotent, if the prometheus stack is already set up applying the manifests // again will be no-op. -func (pc *PrometheusController) SetUpPrometheusStack() error { +func (pc *Controller) SetUpPrometheusStack() error { k8sClient := pc.framework.GetClientSets().GetClient() klog.Info("Setting up prometheus stack") @@ -185,7 +185,7 @@ func (pc *PrometheusController) SetUpPrometheusStack() error { } // TearDownPrometheusStack tears down prometheus stack, releasing all prometheus resources. -func (pc *PrometheusController) TearDownPrometheusStack() error { +func (pc *Controller) TearDownPrometheusStack() error { // Get disk metadata again to be sure if err := pc.cachePrometheusDiskMetadataIfEnabled(); err != nil { klog.Warningf("Error while caching prometheus disk metadata: %v", err) @@ -208,17 +208,17 @@ func (pc *PrometheusController) TearDownPrometheusStack() error { } // GetFramework returns prometheus framework. -func (pc *PrometheusController) GetFramework() *framework.Framework { +func (pc *Controller) GetFramework() *framework.Framework { return pc.framework } -func (pc *PrometheusController) applyManifests(manifestGlob string) error { +func (pc *Controller) applyManifests(manifestGlob string) error { return pc.framework.ApplyTemplatedManifests( manifestGlob, pc.templateMapping, client.Retry(apierrs.IsNotFound)) } // exposeAPIServerMetrics configures anonymous access to the apiserver metrics. -func (pc *PrometheusController) exposeAPIServerMetrics() error { +func (pc *Controller) exposeAPIServerMetrics() error { klog.Info("Exposing kube-apiserver metrics in the cluster") // We need to get a client to the cluster where the test is being executed on, // not the cluster that the prometheus is running in. Usually, there is only @@ -260,7 +260,7 @@ func (pc *PrometheusController) exposeAPIServerMetrics() error { // runNodeExporter adds node-exporter as master's static manifest pod. // TODO(mborsz): Consider migrating to something less ugly, e.g. daemonset-based approach, // when master nodes have configured networking. -func (pc *PrometheusController) runNodeExporter() error { +func (pc *Controller) runNodeExporter() error { klog.Infof("Starting node-exporter on master nodes.") kubemarkFramework, err := framework.NewFramework(&pc.clusterLoaderConfig.ClusterConfig, numK8sClients) if err != nil { @@ -297,7 +297,7 @@ func (pc *PrometheusController) runNodeExporter() error { return g.Wait() } -func (pc *PrometheusController) waitForPrometheusToBeHealthy() error { +func (pc *Controller) waitForPrometheusToBeHealthy() error { klog.Info("Waiting for Prometheus stack to become healthy...") return wait.Poll( checkPrometheusReadyInterval, @@ -305,7 +305,7 @@ func (pc *PrometheusController) waitForPrometheusToBeHealthy() error { pc.isPrometheusReady) } -func (pc *PrometheusController) isPrometheusReady() (bool, error) { +func (pc *Controller) isPrometheusReady() (bool, error) { // TODO(mm4tt): Re-enable kube-proxy monitoring and expect more targets. // This is a safeguard from a race condition where the prometheus server is started before // targets are registered. These 4 targets are always expected, in all possible configurations: @@ -341,7 +341,7 @@ func retryCreateFunction(f func() error) error { client.RetryFunction(f, client.Allow(apierrs.IsAlreadyExists))) } -func (pc *PrometheusController) isKubemark() bool { +func (pc *Controller) isKubemark() bool { return pc.provider == "kubemark" } diff --git a/clusterloader2/pkg/state/namespaces_state.go b/clusterloader2/pkg/state/namespaces_state.go index e74542f3f..61b54a88c 100644 --- a/clusterloader2/pkg/state/namespaces_state.go +++ b/clusterloader2/pkg/state/namespaces_state.go @@ -34,28 +34,28 @@ type InstancesState struct { type InstancesIdentifier struct { Basename string ObjectKind string - ApiGroup string + APIGroup string } // namespaceState represents state of a single namespace. type namespaceState map[InstancesIdentifier]*InstancesState -// namespacesState represents state of all used namespaces. -type namespacesState struct { +// NamespacesState represents state of all used namespaces. +type NamespacesState struct { lock sync.RWMutex namespaceStates map[string]namespaceState } // newNamespacesState creates new namespaces state. -func newNamespacesState() *namespacesState { - return &namespacesState{ +func newNamespacesState() *NamespacesState { + return &NamespacesState{ namespaceStates: make(map[string]namespaceState), } } // Get returns state of object instances - // number of existing replicas and its configuration. -func (ns *namespacesState) Get(namespace string, identifier InstancesIdentifier) (*InstancesState, bool) { +func (ns *NamespacesState) Get(namespace string, identifier InstancesIdentifier) (*InstancesState, bool) { ns.lock.RLock() defer ns.lock.RUnlock() namespaceState, exists := ns.namespaceStates[namespace] @@ -68,7 +68,7 @@ func (ns *namespacesState) Get(namespace string, identifier InstancesIdentifier) // Set stores information about object instances state // to test state. -func (ns *namespacesState) Set(namespace string, identifier InstancesIdentifier, instances *InstancesState) { +func (ns *NamespacesState) Set(namespace string, identifier InstancesIdentifier, instances *InstancesState) { ns.lock.Lock() defer ns.lock.Unlock() _, exists := ns.namespaceStates[namespace] @@ -81,7 +81,7 @@ func (ns *namespacesState) Set(namespace string, identifier InstancesIdentifier, // Delete removes information about given instances. // It there is no information for given object it is assumed that // there are no object replicas. -func (ns *namespacesState) Delete(namespace string, identifier InstancesIdentifier) error { +func (ns *NamespacesState) Delete(namespace string, identifier InstancesIdentifier) error { ns.lock.Lock() defer ns.lock.Unlock() namespaceState, exists := ns.namespaceStates[namespace] diff --git a/clusterloader2/pkg/state/resource_state.go b/clusterloader2/pkg/state/resource_state.go index f538d5d01..554911b83 100644 --- a/clusterloader2/pkg/state/resource_state.go +++ b/clusterloader2/pkg/state/resource_state.go @@ -25,26 +25,26 @@ import ( // ResourceTypeIdentifier is a unique identifier for a resource type. type ResourceTypeIdentifier struct { ObjectKind string - ApiGroup string + APIGroup string } -// resourcesVersionsState represents most recent resources versions for a given object types. +// ResourcesVersionsState represents most recent resources versions for a given object types. // These versions are available only for resource types of objects that were create/updated // by the clusterloader. -type resourcesVersionsState struct { +type ResourcesVersionsState struct { lock sync.RWMutex resourcesVersions map[ResourceTypeIdentifier]uint64 } // newResourcesVersionState creates new resources versions state. -func newResourcesVersionsState() *resourcesVersionsState { - return &resourcesVersionsState{ +func newResourcesVersionsState() *ResourcesVersionsState { + return &ResourcesVersionsState{ resourcesVersions: make(map[ResourceTypeIdentifier]uint64), } } // Get returns state of current resource version. -func (rs *resourcesVersionsState) Get(identifier ResourceTypeIdentifier) (string, bool) { +func (rs *ResourcesVersionsState) Get(identifier ResourceTypeIdentifier) (string, bool) { rs.lock.RLock() defer rs.lock.RUnlock() version, exists := rs.resourcesVersions[identifier] @@ -55,7 +55,7 @@ func (rs *resourcesVersionsState) Get(identifier ResourceTypeIdentifier) (string } // Set stores information about current resource version. -func (rs *resourcesVersionsState) Set(identifier ResourceTypeIdentifier, resourceVersion string) error { +func (rs *ResourcesVersionsState) Set(identifier ResourceTypeIdentifier, resourceVersion string) error { rs.lock.Lock() defer rs.lock.Unlock() version, err := strconv.ParseUint(resourceVersion, 10, 64) diff --git a/clusterloader2/pkg/state/state.go b/clusterloader2/pkg/state/state.go index 29ddc83df..380824373 100644 --- a/clusterloader2/pkg/state/state.go +++ b/clusterloader2/pkg/state/state.go @@ -19,8 +19,8 @@ package state // State is a state of the cluster. // It is composed of namespaces state and resources versions state. type State struct { - namespacesState *namespacesState - resourcesVersionState *resourcesVersionsState + namespacesState *NamespacesState + resourcesVersionState *ResourcesVersionsState } // NewState creates new State instance. @@ -32,11 +32,11 @@ func NewState() *State { } // GetNamespacesState returns namespaces state. -func (s *State) GetNamespacesState() *namespacesState { +func (s *State) GetNamespacesState() *NamespacesState { return s.namespacesState } // GetResourcesVersionState returns resources versions state. -func (s *State) GetResourcesVersionState() *resourcesVersionsState { +func (s *State) GetResourcesVersionState() *ResourcesVersionsState { return s.resourcesVersionState } diff --git a/clusterloader2/pkg/test/interface.go b/clusterloader2/pkg/test/interface.go index 913c1b72c..23308263e 100644 --- a/clusterloader2/pkg/test/interface.go +++ b/clusterloader2/pkg/test/interface.go @@ -34,13 +34,13 @@ type CreatContextFunc func(c *config.ClusterLoaderConfig, f *framework.Framework type OperationType int const ( - // CREATE_OBJECT is create object operation. - CREATE_OBJECT = OperationType(0) - // PATCH_OBJECT is update object (using patch) operation. + // createObject is create object operation. + createObject = OperationType(0) + // patchObject is update object (using patch) operation. // TODO(krzysied): Figure out how to implement UPDATE_OBJECT operation. - PATCH_OBJECT = OperationType(1) - // DELETE_OBJECT is delete object operation. - DELETE_OBJECT = OperationType(2) + patchObject = OperationType(1) + // deleteObject is delete object operation. + deleteObject = OperationType(2) ) // Context is an interface for test context. @@ -52,13 +52,13 @@ type Context interface { GetState() *state.State GetTemplateMappingCopy() map[string]interface{} GetTemplateProvider() *config.TemplateProvider - GetTuningSetFactory() tuningset.TuningSetFactory - GetMeasurementManager() measurement.MeasurementManager + GetFactory() tuningset.Factory + GetManager() measurement.Manager GetChaosMonkey() *chaos.Monkey } -// TestExecutor is an interface for test executing object. -type TestExecutor interface { +// Executor is an interface for test executing object. +type Executor interface { ExecuteTest(ctx Context, conf *api.Config) *errors.ErrorList ExecuteStep(ctx Context, step *api.Step) *errors.ErrorList ExecutePhase(ctx Context, phase *api.Phase) *errors.ErrorList diff --git a/clusterloader2/pkg/test/simple_context.go b/clusterloader2/pkg/test/simple_context.go index 5c9f1d2a2..c24a1d05f 100644 --- a/clusterloader2/pkg/test/simple_context.go +++ b/clusterloader2/pkg/test/simple_context.go @@ -35,8 +35,8 @@ type simpleContext struct { state *state.State templateMapping map[string]interface{} templateProvider *config.TemplateProvider - tuningSetFactory tuningset.TuningSetFactory - measurementManager measurement.MeasurementManager + tuningSetFactory tuningset.Factory + measurementManager measurement.Manager chaosMonkey *chaos.Monkey } @@ -49,8 +49,8 @@ func createSimpleContext(c *config.ClusterLoaderConfig, f, p *framework.Framewor state: s, templateMapping: util.CloneMap(templateMapping), templateProvider: templateProvider, - tuningSetFactory: tuningset.NewTuningSetFactory(), - measurementManager: measurement.CreateMeasurementManager(f, p, templateProvider, c), + tuningSetFactory: tuningset.NewFactory(), + measurementManager: measurement.CreateManager(f, p, templateProvider, c), chaosMonkey: chaos.NewMonkey(f.GetClientSets().GetClient(), c.ClusterConfig.Provider), } } @@ -86,12 +86,12 @@ func (sc *simpleContext) GetTemplateMappingCopy() map[string]interface{} { } // GetTickerFactory returns tuning set factory. -func (sc *simpleContext) GetTuningSetFactory() tuningset.TuningSetFactory { +func (sc *simpleContext) GetFactory() tuningset.Factory { return sc.tuningSetFactory } -// GetMeasurementManager returns measurement manager. -func (sc *simpleContext) GetMeasurementManager() measurement.MeasurementManager { +// GetManager returns measurement manager. +func (sc *simpleContext) GetManager() measurement.Manager { return sc.measurementManager } diff --git a/clusterloader2/pkg/test/simple_test_executor.go b/clusterloader2/pkg/test/simple_test_executor.go index ae03149e7..af6e9c6c6 100644 --- a/clusterloader2/pkg/test/simple_test_executor.go +++ b/clusterloader2/pkg/test/simple_test_executor.go @@ -44,19 +44,19 @@ const ( namespacePlaceholder = "Namespace" ) -type simpleTestExecutor struct{} +type simpleExecutor struct{} -func createSimpleTestExecutor() TestExecutor { - return &simpleTestExecutor{} +func createSimpleExecutor() Executor { + return &simpleExecutor{} } // ExecuteTest executes test based on provided configuration. -func (ste *simpleTestExecutor) ExecuteTest(ctx Context, conf *api.Config) *errors.ErrorList { +func (ste *simpleExecutor) ExecuteTest(ctx Context, conf *api.Config) *errors.ErrorList { ctx.GetClusterFramework().SetAutomanagedNamespacePrefix(conf.Namespace.Prefix) klog.Infof("AutomanagedNamespacePrefix: %s", ctx.GetClusterFramework().GetAutomanagedNamespacePrefix()) defer cleanupResources(ctx, conf) - ctx.GetTuningSetFactory().Init(conf.TuningSets) + ctx.GetFactory().Init(conf.TuningSets) stopCh := make(chan struct{}) chaosMonkeyWaitGroup, err := ctx.GetChaosMonkey().Init(conf.ChaosMonkey, stopCh) @@ -74,7 +74,7 @@ func (ste *simpleTestExecutor) ExecuteTest(ctx Context, conf *api.Config) *error klog.Info("Chaos monkey ended.") } - for _, summary := range ctx.GetMeasurementManager().GetSummaries() { + for _, summary := range ctx.GetManager().GetSummaries() { if ctx.GetClusterLoaderConfig().ReportDir == "" { klog.Infof("%v: %v", summary.SummaryName(), summary.SummaryContent()) } else { @@ -96,7 +96,7 @@ func (ste *simpleTestExecutor) ExecuteTest(ctx Context, conf *api.Config) *error } // ExecuteTestSteps executes all test steps provided in configuration -func (ste *simpleTestExecutor) ExecuteTestSteps(ctx Context, conf *api.Config) *errors.ErrorList { +func (ste *simpleExecutor) ExecuteTestSteps(ctx Context, conf *api.Config) *errors.ErrorList { automanagedNamespacesList, staleNamespaces, err := ctx.GetClusterFramework().ListAutomanagedNamespaces() if err != nil { return errors.NewErrorList(fmt.Errorf("automanaged namespaces listing failed: %v", err)) @@ -130,7 +130,7 @@ func (ste *simpleTestExecutor) ExecuteTestSteps(ctx Context, conf *api.Config) * } // ExecuteStep executes single test step based on provided step configuration. -func (ste *simpleTestExecutor) ExecuteStep(ctx Context, step *api.Step) *errors.ErrorList { +func (ste *simpleExecutor) ExecuteStep(ctx Context, step *api.Step) *errors.ErrorList { if step.Name != "" { klog.Infof("Step %q started", step.Name) } @@ -141,7 +141,7 @@ func (ste *simpleTestExecutor) ExecuteStep(ctx Context, step *api.Step) *errors. // index is created to make i value unchangeable during thread execution. index := i wg.Start(func() { - err := measurement.Execute(ctx.GetMeasurementManager(), &step.Measurements[index]) + err := measurement.Execute(ctx.GetManager(), &step.Measurements[index]) if err != nil { errList.Append(fmt.Errorf("measurement call %s - %s error: %v", step.Measurements[index].Method, step.Measurements[index].Identifier, err)) } @@ -168,11 +168,11 @@ func (ste *simpleTestExecutor) ExecuteStep(ctx Context, step *api.Step) *errors. } // ExecutePhase executes single test phase based on provided phase configuration. -func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *errors.ErrorList { +func (ste *simpleExecutor) ExecutePhase(ctx Context, phase *api.Phase) *errors.ErrorList { // TODO: add tuning set errList := errors.NewErrorList() nsList := createNamespacesList(ctx, phase.NamespaceRange) - tuningSet, err := ctx.GetTuningSetFactory().CreateTuningSet(phase.TuningSet) + tuningSet, err := ctx.GetFactory().CreateTuningSet(phase.TuningSet) if err != nil { return errors.NewErrorList(fmt.Errorf("tuning set creation error: %v", err)) } @@ -218,7 +218,7 @@ func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *erro actions = append(actions, func() { for j := len(phase.ObjectBundle) - 1; j >= 0; j-- { if replicaIndex < instancesStates[j].CurrentReplicaCount { - if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, DELETE_OBJECT); !objectErrList.IsEmpty() { + if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, deleteObject); !objectErrList.IsEmpty() { errList.Concat(objectErrList) } } @@ -232,7 +232,7 @@ func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *erro replicaIndex := replicaCounter actions = append(actions, func() { for j := range phase.ObjectBundle { - if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, PATCH_OBJECT); !objectErrList.IsEmpty() { + if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, patchObject); !objectErrList.IsEmpty() { errList.Concat(objectErrList) // If error then skip this bundle break @@ -247,7 +247,7 @@ func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *erro replicaIndex := replicaCounter actions = append(actions, func() { for j := range phase.ObjectBundle { - if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, CREATE_OBJECT); !objectErrList.IsEmpty() { + if objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, createObject); !objectErrList.IsEmpty() { errList.Concat(objectErrList) // If error then skip this bundle break @@ -271,12 +271,12 @@ func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *erro } // ExecuteObject executes single test object operation based on provided object configuration. -func (ste *simpleTestExecutor) ExecuteObject(ctx Context, object *api.Object, namespace string, replicaIndex int32, operation OperationType) *errors.ErrorList { +func (ste *simpleExecutor) ExecuteObject(ctx Context, object *api.Object, namespace string, replicaIndex int32, operation OperationType) *errors.ErrorList { objName := fmt.Sprintf("%v-%d", object.Basename, replicaIndex) var err error var obj *unstructured.Unstructured switch operation { - case CREATE_OBJECT, PATCH_OBJECT: + case createObject, patchObject: mapping := ctx.GetTemplateMappingCopy() if object.TemplateFillMap != nil { util.CopyMap(object.TemplateFillMap, mapping) @@ -289,7 +289,7 @@ func (ste *simpleTestExecutor) ExecuteObject(ctx Context, object *api.Object, na if err != nil && err != config.ErrorEmptyFile { return errors.NewErrorList(fmt.Errorf("reading template (%v) error: %v", object.ObjectTemplatePath, err)) } - case DELETE_OBJECT: + case deleteObject: obj, err = ctx.GetTemplateProvider().RawToObject(object.ObjectTemplatePath) if err != nil && err != config.ErrorEmptyFile { return errors.NewErrorList(fmt.Errorf("reading template (%v) for deletion error: %v", object.ObjectTemplatePath, err)) @@ -303,15 +303,15 @@ func (ste *simpleTestExecutor) ExecuteObject(ctx Context, object *api.Object, na } gvk := obj.GroupVersionKind() switch operation { - case CREATE_OBJECT: + case createObject: if err := ctx.GetClusterFramework().CreateObject(namespace, objName, obj); err != nil { errList.Append(fmt.Errorf("namespace %v object %v creation error: %v", namespace, objName, err)) } - case PATCH_OBJECT: + case patchObject: if err := ctx.GetClusterFramework().PatchObject(namespace, objName, obj); err != nil { errList.Append(fmt.Errorf("namespace %v object %v updating error: %v", namespace, objName, err)) } - case DELETE_OBJECT: + case deleteObject: if err := ctx.GetClusterFramework().DeleteObject(gvk, namespace, objName); err != nil { errList.Append(fmt.Errorf("namespace %v object %v deletion error: %v", namespace, objName, err)) } @@ -345,7 +345,7 @@ func getIdentifier(ctx Context, object *api.Object) (state.InstancesIdentifier, return state.InstancesIdentifier{ Basename: object.Basename, ObjectKind: gvk.Kind, - ApiGroup: gvk.Group, + APIGroup: gvk.Group, }, nil } @@ -374,7 +374,7 @@ func isErrsCritical(*errors.ErrorList) bool { func cleanupResources(ctx Context, conf *api.Config) { cleanupStartTime := time.Now() - ctx.GetMeasurementManager().Dispose() + ctx.GetManager().Dispose() if *conf.Namespace.DeleteAutomanagedNamespaces { if errList := ctx.GetClusterFramework().DeleteAutomanagedNamespaces(); !errList.IsEmpty() { klog.Errorf("Resource cleanup error: %v", errList.String()) diff --git a/clusterloader2/pkg/test/test.go b/clusterloader2/pkg/test/test.go index ee7635efb..1e8a8ca73 100644 --- a/clusterloader2/pkg/test/test.go +++ b/clusterloader2/pkg/test/test.go @@ -32,8 +32,8 @@ var ( CreateContext = createSimpleContext // Test is a singleton for test execution object. - // This object should be set by TestExecutor implementation. - Test = createSimpleTestExecutor() + // This object should be set by Executor implementation. + Test = createSimpleExecutor() ) // RunTest runs test based on provided test configuration. diff --git a/clusterloader2/pkg/tuningset/interface.go b/clusterloader2/pkg/tuningset/interface.go index 206015761..226d4a6ec 100644 --- a/clusterloader2/pkg/tuningset/interface.go +++ b/clusterloader2/pkg/tuningset/interface.go @@ -25,8 +25,8 @@ type TuningSet interface { Execute(actions []func()) } -// TuningSetFactory is a factory that creates tuning sets. -type TuningSetFactory interface { +// Factory is a factory that creates tuning sets. +type Factory interface { Init(tuningSets []api.TuningSet) CreateTuningSet(name string) (TuningSet, error) } diff --git a/clusterloader2/pkg/tuningset/qps_load.go b/clusterloader2/pkg/tuningset/qps_load.go index 66627dad8..3f39b0c5e 100644 --- a/clusterloader2/pkg/tuningset/qps_load.go +++ b/clusterloader2/pkg/tuningset/qps_load.go @@ -24,17 +24,17 @@ import ( ) type qpsLoad struct { - params *api.QpsLoad + params *api.QPSLoad } -func newQpsLoad(params *api.QpsLoad) TuningSet { +func newQPSLoad(params *api.QPSLoad) TuningSet { return &qpsLoad{ params: params, } } func (ql *qpsLoad) Execute(actions []func()) { - sleepDuration := time.Duration(int(float64(time.Second) / ql.params.Qps)) + sleepDuration := time.Duration(int(float64(time.Second) / ql.params.QPS)) var wg wait.Group for i := range actions { wg.Start(actions[i]) diff --git a/clusterloader2/pkg/tuningset/randomized_load.go b/clusterloader2/pkg/tuningset/randomized_load.go index cea7c4bf2..06f79aecc 100644 --- a/clusterloader2/pkg/tuningset/randomized_load.go +++ b/clusterloader2/pkg/tuningset/randomized_load.go @@ -38,12 +38,12 @@ func (rl *randomizedLoad) Execute(actions []func()) { var wg wait.Group for i := range actions { wg.Start(actions[i]) - time.Sleep(sleepDuration(rl.params.AverageQps)) + time.Sleep(sleepDuration(rl.params.AverageQPS)) } wg.Wait() } -func sleepDuration(avgQps float64) time.Duration { +func sleepDuration(avgQPS float64) time.Duration { randomFactor := 2 * rand.Float64() - return time.Duration(int(randomFactor * float64(time.Second) / avgQps)) + return time.Duration(int(randomFactor * float64(time.Second) / avgQPS)) } diff --git a/clusterloader2/pkg/tuningset/simple_tuning_set_factory.go b/clusterloader2/pkg/tuningset/simple_tuning_set_factory.go index a3c957f2c..148c6cb22 100644 --- a/clusterloader2/pkg/tuningset/simple_tuning_set_factory.go +++ b/clusterloader2/pkg/tuningset/simple_tuning_set_factory.go @@ -22,21 +22,21 @@ import ( "k8s.io/perf-tests/clusterloader2/api" ) -type simpleTuningSetFactory struct { +type simpleFactory struct { tuningSetMap map[string]*api.TuningSet globalQPSLoadFactory *globalQPSLoadFactory } -// NewTuningSetFactory creates new ticker factory. -func NewTuningSetFactory() TuningSetFactory { - return &simpleTuningSetFactory{ +// NewFactory creates new ticker factory. +func NewFactory() Factory { + return &simpleFactory{ tuningSetMap: make(map[string]*api.TuningSet), globalQPSLoadFactory: newGlobalQPSLoadFactory(), } } // Init sets available tuning sets. -func (tf *simpleTuningSetFactory) Init(tuningSets []api.TuningSet) { +func (tf *simpleFactory) Init(tuningSets []api.TuningSet) { tf.tuningSetMap = make(map[string]*api.TuningSet) for i := range tuningSets { tf.tuningSetMap[tuningSets[i].Name] = &tuningSets[i] @@ -44,14 +44,14 @@ func (tf *simpleTuningSetFactory) Init(tuningSets []api.TuningSet) { } // CreateTuningSet creates new tuning set based on provided tuning set name. -func (tf *simpleTuningSetFactory) CreateTuningSet(name string) (TuningSet, error) { +func (tf *simpleFactory) CreateTuningSet(name string) (TuningSet, error) { tuningSet, exists := tf.tuningSetMap[name] if !exists { return nil, fmt.Errorf("tuningset %s not found", name) } switch { - case tuningSet.QpsLoad != nil: - return newQpsLoad(tuningSet.QpsLoad), nil + case tuningSet.QPSLoad != nil: + return newQPSLoad(tuningSet.QPSLoad), nil case tuningSet.RandomizedLoad != nil: return newRandomizedLoad(tuningSet.RandomizedLoad), nil case tuningSet.SteppedLoad != nil: diff --git a/dns/jsonify/main.go b/dns/jsonify/main.go index 92c3c31a0..7ce9e13ab 100644 --- a/dns/jsonify/main.go +++ b/dns/jsonify/main.go @@ -32,8 +32,8 @@ import ( ) const ( - // S_TO_MS is a second to millisecond ratio. - S_TO_MS = float64((time.Second) / time.Millisecond) + // secToMsec is a second to millisecond ratio. + secToMsec = float64((time.Second) / time.Millisecond) ) // BenchmarkResult is a dns benchmark results structure. @@ -51,7 +51,7 @@ type BenchmarkData struct { AvgLatency float64 `yaml:"avg_latency"` MaxLatency float64 `yaml:"max_latency"` MinLatency float64 `yaml:"min_latency"` - Qps float64 `yaml:"qps"` + QPS float64 `yaml:"qps"` QueriesCompleted float64 `yaml:"queries_completed"` QueriesLost float64 `yaml:"queries_lost"` QueriesSent float64 `yaml:"queries_sent"` @@ -61,10 +61,10 @@ type BenchmarkData struct { type BenchmarkParams struct { RunLengthSeconds float64 `yaml:"run_length_seconds"` QueryFile string `yaml:"query_file"` - KubednsCpu *float64 `yaml:"kubedns_cpu"` - DnsmasqCpu *float64 `yaml:"dnsmasq_cpu"` + KubednsCPU *float64 `yaml:"kubedns_cpu"` + DnsmasqCPU *float64 `yaml:"dnsmasq_cpu"` DnsmasqCache *float64 `yaml:"dnsmasq_cache"` - MaxQps *float64 `yaml:"max_qps"` + MaxQPS *float64 `yaml:"max_qps"` PodName string `yaml:"pod_name"` } @@ -111,7 +111,7 @@ func run() error { latency.DataItems = appendLatency(latency.DataItems, labels, result) latencyPerc.DataItems = appendLatencyPerc(latencyPerc.DataItems, labels, result) queries.DataItems = appendQueries(queries.DataItems, labels, result) - qps.DataItems = appendQps(qps.DataItems, labels, result) + qps.DataItems = appendQPS(qps.DataItems, labels, result) } timeString := time.Now().Format(time.RFC3339) @@ -124,7 +124,7 @@ func run() error { if err = saveMetric(&queries, filepath.Join(jsonDirPath, "Queries_"+benchmarkName+"_"+timeString+".json")); err != nil { return err } - if err = saveMetric(&qps, filepath.Join(jsonDirPath, "Qps_"+benchmarkName+"_"+timeString+".json")); err != nil { + if err = saveMetric(&qps, filepath.Join(jsonDirPath, "QPS_"+benchmarkName+"_"+timeString+".json")); err != nil { return err } @@ -171,10 +171,10 @@ func createLabels(params *BenchmarkParams) map[string]string { labels := make(map[string]string) labels["run_length_seconds"] = fmt.Sprintf("%v", params.RunLengthSeconds) labels["query_file"] = params.QueryFile - labels["kubedns_cpu"] = toString(params.KubednsCpu) - labels["dnsmasq_cpu"] = toString(params.DnsmasqCpu) + labels["kubedns_cpu"] = toString(params.KubednsCPU) + labels["dnsmasq_cpu"] = toString(params.DnsmasqCPU) labels["dnsmasq_cache"] = toString(params.DnsmasqCache) - labels["max_qps"] = toString(params.MaxQps) + labels["max_qps"] = toString(params.MaxQPS) return labels } @@ -184,9 +184,9 @@ func appendLatency(items []perftype.DataItem, labels map[string]string, result * Unit: "ms", Labels: labels, Data: map[string]float64{ - "max_latency": result.Data.MaxLatency * S_TO_MS, - "avg_latency": result.Data.AvgLatency * S_TO_MS, - "min_latency": result.Data.MinLatency * S_TO_MS, + "max_latency": result.Data.MaxLatency * secToMsec, + "avg_latency": result.Data.AvgLatency * secToMsec, + "min_latency": result.Data.MinLatency * secToMsec, }, }) } @@ -215,12 +215,12 @@ func appendQueries(items []perftype.DataItem, labels map[string]string, result * }) } -func appendQps(items []perftype.DataItem, labels map[string]string, result *BenchmarkResult) []perftype.DataItem { +func appendQPS(items []perftype.DataItem, labels map[string]string, result *BenchmarkResult) []perftype.DataItem { return append(items, perftype.DataItem{ Unit: "1/s", Labels: labels, Data: map[string]float64{ - "qps": result.Data.Qps, + "qps": result.Data.QPS, }, }) } diff --git a/network/benchmarks/netperf/nptest/nptest.go b/network/benchmarks/netperf/nptest/nptest.go index aa6ba68ee..95a53797a 100644 --- a/network/benchmarks/netperf/nptest/nptest.go +++ b/network/benchmarks/netperf/nptest/nptest.go @@ -83,14 +83,14 @@ const ( ) const ( - iperfTcpTest = iota - iperfUdpTest = iota + iperfTCPTest = iota + iperfUDPTest = iota iperfSctpTest = iota netperfTest = iota ) -// NetPerfRpc service that exposes RegisterClient and ReceiveOutput for clients -type NetPerfRpc int +// NetPerfRPC service that exposes RegisterClient and ReceiveOutput for clients +type NetPerfRPC int // ClientRegistrationData stores a data about a single client type ClientRegistrationData struct { @@ -158,20 +158,20 @@ func init() { workerStateMap = make(map[string]*workerState) testcases = []*testcase{ - {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "1 iperf TCP. Same VM using Pod IP", Type: iperfTcpTest, ClusterIP: false, MSS: mssMin}, - {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "2 iperf TCP. Same VM using Virtual IP", Type: iperfTcpTest, ClusterIP: true, MSS: mssMin}, - {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "3 iperf TCP. Remote VM using Pod IP", Type: iperfTcpTest, ClusterIP: false, MSS: mssMin}, - {SourceNode: "netperf-w3", DestinationNode: "netperf-w2", Label: "4 iperf TCP. Remote VM using Virtual IP", Type: iperfTcpTest, ClusterIP: true, MSS: mssMin}, - {SourceNode: "netperf-w2", DestinationNode: "netperf-w2", Label: "5 iperf TCP. Hairpin Pod to own Virtual IP", Type: iperfTcpTest, ClusterIP: true, MSS: mssMin}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "1 iperf TCP. Same VM using Pod IP", Type: iperfTCPTest, ClusterIP: false, MSS: mssMin}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "2 iperf TCP. Same VM using Virtual IP", Type: iperfTCPTest, ClusterIP: true, MSS: mssMin}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "3 iperf TCP. Remote VM using Pod IP", Type: iperfTCPTest, ClusterIP: false, MSS: mssMin}, + {SourceNode: "netperf-w3", DestinationNode: "netperf-w2", Label: "4 iperf TCP. Remote VM using Virtual IP", Type: iperfTCPTest, ClusterIP: true, MSS: mssMin}, + {SourceNode: "netperf-w2", DestinationNode: "netperf-w2", Label: "5 iperf TCP. Hairpin Pod to own Virtual IP", Type: iperfTCPTest, ClusterIP: true, MSS: mssMin}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "6 iperf SCTP. Same VM using Pod IP", Type: iperfSctpTest, ClusterIP: false, MSS: mssMin}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "7 iperf SCTP. Same VM using Virtual IP", Type: iperfSctpTest, ClusterIP: true, MSS: mssMin}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "8 iperf SCTP. Remote VM using Pod IP", Type: iperfSctpTest, ClusterIP: false, MSS: mssMin}, {SourceNode: "netperf-w3", DestinationNode: "netperf-w2", Label: "9 iperf SCTP. Remote VM using Virtual IP", Type: iperfSctpTest, ClusterIP: true, MSS: mssMin}, {SourceNode: "netperf-w2", DestinationNode: "netperf-w2", Label: "10 iperf SCTP. Hairpin Pod to own Virtual IP", Type: iperfSctpTest, ClusterIP: true, MSS: mssMin}, - {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "11 iperf UDP. Same VM using Pod IP", Type: iperfUdpTest, ClusterIP: false, MSS: mssMax}, - {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "12 iperf UDP. Same VM using Virtual IP", Type: iperfUdpTest, ClusterIP: true, MSS: mssMax}, - {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "13 iperf UDP. Remote VM using Pod IP", Type: iperfUdpTest, ClusterIP: false, MSS: mssMax}, - {SourceNode: "netperf-w3", DestinationNode: "netperf-w2", Label: "14 iperf UDP. Remote VM using Virtual IP", Type: iperfUdpTest, ClusterIP: true, MSS: mssMax}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "11 iperf UDP. Same VM using Pod IP", Type: iperfUDPTest, ClusterIP: false, MSS: mssMax}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "12 iperf UDP. Same VM using Virtual IP", Type: iperfUDPTest, ClusterIP: true, MSS: mssMax}, + {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "13 iperf UDP. Remote VM using Pod IP", Type: iperfUDPTest, ClusterIP: false, MSS: mssMax}, + {SourceNode: "netperf-w3", DestinationNode: "netperf-w2", Label: "14 iperf UDP. Remote VM using Virtual IP", Type: iperfUDPTest, ClusterIP: true, MSS: mssMax}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "15 netperf. Same VM using Pod IP", Type: netperfTest, ClusterIP: false}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w2", Label: "16 netperf. Same VM using Virtual IP", Type: netperfTest, ClusterIP: true}, {SourceNode: "netperf-w1", DestinationNode: "netperf-w3", Label: "17 netperf. Remote VM using Pod IP", Type: netperfTest, ClusterIP: false}, @@ -290,7 +290,7 @@ func allocateWorkToClient(workerS *workerState, reply *WorkItem) { } switch { - case v.Type == iperfTcpTest || v.Type == iperfUdpTest || v.Type == iperfSctpTest: + case v.Type == iperfTCPTest || v.Type == iperfUDPTest || v.Type == iperfSctpTest: reply.ClientItem.Port = "5201" reply.ClientItem.MSS = v.MSS @@ -322,7 +322,7 @@ func allocateWorkToClient(workerS *workerState, reply *WorkItem) { } // RegisterClient registers a single and assign a work item to it -func (t *NetPerfRpc) RegisterClient(data *ClientRegistrationData, reply *WorkItem) error { +func (t *NetPerfRPC) RegisterClient(data *ClientRegistrationData, reply *WorkItem) error { globalLock.Lock() defer globalLock.Unlock() @@ -403,7 +403,7 @@ func flushDataPointsToCsv() { fmt.Println("END CSV DATA") } -func parseIperfTcpBandwidth(output string) string { +func parseIperfTCPBandwidth(output string) string { // Parses the output of iperf3 and grabs the group Mbits/sec from the output match := iperfTCPOutputRegexp.FindStringSubmatch(output) if match != nil && len(match) > 1 { @@ -421,7 +421,7 @@ func parseIperfSctpBandwidth(output string) string { return "0" } -func parseIperfUdpBandwidth(output string) string { +func parseIperfUDPBandwidth(output string) string { // Parses the output of iperf3 (UDP mode) and grabs the Mbits/sec from the output match := iperfUDPOutputRegexp.FindStringSubmatch(output) if match != nil && len(match) > 1 { @@ -430,7 +430,7 @@ func parseIperfUdpBandwidth(output string) string { return "0" } -func parseIperfCpuUsage(output string) (string, string) { +func parseIperfCPUUsage(output string) (string, string) { // Parses the output of iperf and grabs the CPU usage on sender and receiver side from the output match := iperfCPUOutputRegexp.FindStringSubmatch(output) if match != nil && len(match) > 1 { @@ -449,7 +449,7 @@ func parseNetperfBandwidth(output string) string { } // ReceiveOutput processes a data received from a single client -func (t *NetPerfRpc) ReceiveOutput(data *WorkerOutput, reply *int) error { +func (t *NetPerfRPC) ReceiveOutput(data *WorkerOutput, reply *int) error { globalLock.Lock() defer globalLock.Unlock() @@ -461,13 +461,13 @@ func (t *NetPerfRpc) ReceiveOutput(data *WorkerOutput, reply *int) error { var cpuReceiver string switch data.Type { - case iperfTcpTest: + case iperfTCPTest: mss := testcases[currentJobIndex].MSS - mssStepSize outputLog = outputLog + fmt.Sprintln("Received TCP output from worker", data.Worker, "for test", testcase.Label, "from", testcase.SourceNode, "to", testcase.DestinationNode, "MSS:", mss) + data.Output writeOutputFile(outputCaptureFile, outputLog) - bw = parseIperfTcpBandwidth(data.Output) - cpuSender, cpuReceiver = parseIperfCpuUsage(data.Output) + bw = parseIperfTCPBandwidth(data.Output) + cpuSender, cpuReceiver = parseIperfCPUUsage(data.Output) registerDataPoint(testcase.Label, mss, bw, currentJobIndex) case iperfSctpTest: @@ -476,15 +476,15 @@ func (t *NetPerfRpc) ReceiveOutput(data *WorkerOutput, reply *int) error { "from", testcase.SourceNode, "to", testcase.DestinationNode, "MSS:", mss) + data.Output writeOutputFile(outputCaptureFile, outputLog) bw = parseIperfSctpBandwidth(data.Output) - cpuSender, cpuReceiver = parseIperfCpuUsage(data.Output) + cpuSender, cpuReceiver = parseIperfCPUUsage(data.Output) registerDataPoint(testcase.Label, mss, bw, currentJobIndex) - case iperfUdpTest: + case iperfUDPTest: mss := testcases[currentJobIndex].MSS - mssStepSize outputLog = outputLog + fmt.Sprintln("Received UDP output from worker", data.Worker, "for test", testcase.Label, "from", testcase.SourceNode, "to", testcase.DestinationNode, "MSS:", mss) + data.Output writeOutputFile(outputCaptureFile, outputLog) - bw = parseIperfUdpBandwidth(data.Output) + bw = parseIperfUDPBandwidth(data.Output) registerDataPoint(testcase.Label, mss, bw, currentJobIndex) case netperfTest: @@ -498,7 +498,7 @@ func (t *NetPerfRpc) ReceiveOutput(data *WorkerOutput, reply *int) error { } switch data.Type { - case iperfTcpTest, iperfSctpTest: + case iperfTCPTest, iperfSctpTest: fmt.Println("Jobdone from worker", data.Worker, "Bandwidth was", bw, "Mbits/sec. CPU usage sender was", cpuSender, "%. CPU usage receiver was", cpuReceiver, "%.") default: fmt.Println("Jobdone from worker", data.Worker, "Bandwidth was", bw, "Mbits/sec") @@ -508,7 +508,7 @@ func (t *NetPerfRpc) ReceiveOutput(data *WorkerOutput, reply *int) error { } func serveRPCRequests(port string) { - baseObject := new(NetPerfRpc) + baseObject := new(NetPerfRPC) rpc.Register(baseObject) rpc.HandleHTTP() listener, e := net.Listen("tcp", ":"+port) @@ -552,14 +552,14 @@ func getMyIP() string { func handleClientWorkItem(client *rpc.Client, workItem *WorkItem) { fmt.Println("Orchestrator requests worker run item Type:", workItem.ClientItem.Type) switch { - case workItem.ClientItem.Type == iperfTcpTest || workItem.ClientItem.Type == iperfUdpTest || workItem.ClientItem.Type == iperfSctpTest: + case workItem.ClientItem.Type == iperfTCPTest || workItem.ClientItem.Type == iperfUDPTest || workItem.ClientItem.Type == iperfSctpTest: outputString := iperfClient(workItem.ClientItem.Host, workItem.ClientItem.Port, workItem.ClientItem.MSS, workItem.ClientItem.Type) var reply int - client.Call("NetPerfRpc.ReceiveOutput", WorkerOutput{Output: outputString, Worker: worker, Type: workItem.ClientItem.Type}, &reply) + client.Call("NetPerfRPC.ReceiveOutput", WorkerOutput{Output: outputString, Worker: worker, Type: workItem.ClientItem.Type}, &reply) case workItem.ClientItem.Type == netperfTest: outputString := netperfClient(workItem.ClientItem.Host, workItem.ClientItem.Port, workItem.ClientItem.Type) var reply int - client.Call("NetPerfRpc.ReceiveOutput", WorkerOutput{Output: outputString, Worker: worker, Type: workItem.ClientItem.Type}, &reply) + client.Call("NetPerfRPC.ReceiveOutput", WorkerOutput{Output: outputString, Worker: worker, Type: workItem.ClientItem.Type}, &reply) } // Client COOLDOWN period before asking for next work item to replenish burst allowance policers etc time.Sleep(10 * time.Second) @@ -587,7 +587,7 @@ func startWork() { clientData := ClientRegistrationData{Host: podname, KubeNode: kubenode, Worker: worker, IP: getMyIP()} var workItem WorkItem - if err := client.Call("NetPerfRpc.RegisterClient", clientData, &workItem); err != nil { + if err := client.Call("NetPerfRPC.RegisterClient", clientData, &workItem); err != nil { // RPC server has probably gone away - attempt to reconnect fmt.Println("Error attempting RPC call", err) break @@ -630,7 +630,7 @@ func netperfServer() { // Invoke and run an iperf client and return the output if successful. func iperfClient(serverHost, serverPort string, mss int, workItemType int) (rv string) { switch { - case workItemType == iperfTcpTest: + case workItemType == iperfTCPTest: output, success := cmdExec(iperf3Path, []string{iperf3Path, "-c", serverHost, "-V", "-N", "-i", "30", "-t", "10", "-f", "m", "-w", "512M", "-Z", "-P", parallelStreams, "-M", strconv.Itoa(mss)}, 15) if success { rv = output @@ -642,7 +642,7 @@ func iperfClient(serverHost, serverPort string, mss int, workItemType int) (rv s rv = output } - case workItemType == iperfUdpTest: + case workItemType == iperfUDPTest: output, success := cmdExec(iperf3Path, []string{iperf3Path, "-c", serverHost, "-i", "30", "-t", "10", "-f", "m", "-b", "0", "-u"}, 15) if success { rv = output diff --git a/perfdash/github-configs-fetcher.go b/perfdash/github-configs-fetcher.go index e8272f49a..4e4f2f41f 100644 --- a/perfdash/github-configs-fetcher.go +++ b/perfdash/github-configs-fetcher.go @@ -29,9 +29,9 @@ import ( type githubDirContent struct { Name string `yaml:"name"` Path string `yaml:"path"` - DownloadUrl string `yaml:"download_url"` + DownloadURL string `yaml:"download_url"` Type string `yaml:"type"` - Url string `yaml:"url"` + URL string `yaml:"url"` } // GetConfigsFromGithub gets config paths from github directory. It uses github API, @@ -51,7 +51,7 @@ func GetConfigsFromGithub(url string) ([]string, error) { // Dirs and non-yaml files are ignored; this means that there is no // recursive search, it should be good enough for now. if c.Type == "file" && strings.HasSuffix(c.Name, ".yaml") { - result = append(result, c.DownloadUrl) + result = append(result, c.DownloadURL) } } return result, nil diff --git a/perfdash/google-gcs-downloader.go b/perfdash/google-gcs-downloader.go index 1df78e7da..b6513bcb6 100644 --- a/perfdash/google-gcs-downloader.go +++ b/perfdash/google-gcs-downloader.go @@ -69,8 +69,8 @@ func NewGoogleGCSDownloader(opt *GoogleGCSDownloaderOptions) (*GoogleGCSDownload func (g *GoogleGCSDownloader) getData() (JobToCategoryData, error) { configPaths := make([]string, len(g.Options.ConfigPaths)) copy(configPaths, g.Options.ConfigPaths) - for _, githubUrl := range g.Options.GithubConfigDirs { - githubConfigPaths, err := GetConfigsFromGithub(githubUrl) + for _, githubURL := range g.Options.GithubConfigDirs { + githubConfigPaths, err := GetConfigsFromGithub(githubURL) if err != nil { return nil, err } @@ -163,8 +163,8 @@ func getResultCategory(metricsFileName string, filePrefix string, category strin } // If there are more artifacts, assume that this is a test suite run. trimmed := strings.TrimPrefix(metricsFileName, filePrefix+"_") - suiteId := strings.Split(trimmed, "_")[0] - return fmt.Sprintf("%v_%v", suiteId, category) + suiteID := strings.Split(trimmed, "_")[0] + return fmt.Sprintf("%v_%v", suiteID, category) } func getBuildData(result JobToCategoryData, prefix string, category string, label string, job string, resultLock *sync.Mutex) *BuildData { diff --git a/perfdash/parser.go b/perfdash/parser.go index 7fc62f4e0..7aca2f71e 100644 --- a/perfdash/parser.go +++ b/perfdash/parser.go @@ -73,12 +73,12 @@ type resourceUsagePercentiles map[string][]resourceUsages type resourceUsages struct { Name string `json:"Name"` - Cpu float64 `json:"Cpu"` + CPU float64 `json:"CPU"` Memory int `json:"Mem"` } type resourceUsage struct { - Cpu float64 + CPU float64 Memory float64 } type usageAtPercentiles map[string]resourceUsage @@ -99,12 +99,12 @@ func parseResourceUsageData(data []byte, buildNumber int, testResult *BuildData) if _, ok := usage[name]; !ok { usage[name] = make(usageAtPercentiles) } - cpu, memory := float64(item.Cpu), float64(item.Memory) + cpu, memory := float64(item.CPU), float64(item.Memory) if otherUsage, ok := usage[name][percentile]; ok { // Note that we take max of each resource separately, potentially manufacturing a // "franken-sample" which was never seen in the wild. We do this hoping that such result // will be more stable across runs. - cpu = math.Max(cpu, otherUsage.Cpu) + cpu = math.Max(cpu, otherUsage.CPU) memory = math.Max(memory, otherUsage.Memory) } usage[name][percentile] = resourceUsage{cpu, memory} @@ -114,7 +114,7 @@ func parseResourceUsageData(data []byte, buildNumber int, testResult *BuildData) cpu := perftype.DataItem{Unit: "cores", Labels: map[string]string{"PodName": podName, "Resource": "CPU"}, Data: make(map[string]float64)} memory := perftype.DataItem{Unit: "MiB", Labels: map[string]string{"PodName": podName, "Resource": "memory"}, Data: make(map[string]float64)} for percentile, usage := range usageAtPercentiles { - cpu.Data[percentile] = usage.Cpu + cpu.Data[percentile] = usage.CPU memory.Data[percentile] = usage.Memory / (1024 * 1024) } testResult.Builds[build] = append(testResult.Builds[build], cpu) @@ -186,7 +186,7 @@ func parseApiserverRequestCount(data []byte, buildNumber int, testResult *BuildD perfData.Labels["client"] = newClient } perfData.Data[dataLabel] = float64(metric[i].Value) - key := createMapId(perfData.Labels) + key := createMapID(perfData.Labels) if result, exists := resultMap[key]; exists { result.Data[dataLabel] += perfData.Data[dataLabel] continue @@ -224,7 +224,7 @@ func parseApiserverInitEventsCount(data []byte, buildNumber int, testResult *Bui } } -func createMapId(m map[string]string) string { +func createMapID(m map[string]string) string { var keys []string for key := range m { keys = append(keys, key) diff --git a/perfdash/perfdash.go b/perfdash/perfdash.go index 3d4be6b44..286e9f5d4 100644 --- a/perfdash/perfdash.go +++ b/perfdash/perfdash.go @@ -40,7 +40,7 @@ var ( www = pflag.Bool("www", false, "If true, start a web-server to server performance data") wwwDir = pflag.String("dir", "www", "If non-empty, add a file server for this directory at the root of the web server") - storageUrl = pflag.String("storageUrl", "https://prow.k8s.io/view/gcs", "Name of the data bucket") + storageURL = pflag.String("storageURL", "https://prow.k8s.io/view/gcs", "Name of the data bucket") globalConfig = make(map[string]string) ) @@ -122,7 +122,7 @@ func run() error { func initGlobalConfig() { globalConfig["logsBucket"] = options.LogsBucket globalConfig["logsPath"] = options.LogsPath - globalConfig["storageUrl"] = *storageUrl + globalConfig["storageURL"] = *storageURL } func serveConfig(res http.ResponseWriter, req *http.Request) { diff --git a/util-images/probes/cmd/main.go b/util-images/probes/cmd/main.go index 45825b514..6ab89e631 100644 --- a/util-images/probes/cmd/main.go +++ b/util-images/probes/cmd/main.go @@ -47,7 +47,7 @@ func main() { case "ping-client": pingclient.Run(pingclient.NewDefaultPingClientConfig()) case "ping-server": - pingserver.Run(pingserver.NewDefaultPingServerConfig()) + pingserver.Run(pingserver.NewDefaultConfig()) case "dns": dns.Run() default: diff --git a/util-images/probes/pkg/ping/server/server.go b/util-images/probes/pkg/ping/server/server.go index 2739e3a3b..a256d4c65 100644 --- a/util-images/probes/pkg/ping/server/server.go +++ b/util-images/probes/pkg/ping/server/server.go @@ -27,23 +27,23 @@ var ( pingServerBindAddress = flag.String("ping-server-bind-address", "", "The address to bind for the ping server") ) -// PingServerConfig configures the "ping-server" probe. -type PingServerConfig struct { +// Config configures the "ping-server" probe. +type Config struct { pingServerBindAddress string } -// NewDefaultPingServerConfig creates a default "ping-server" config. -func NewDefaultPingServerConfig() *PingServerConfig { +// NewDefaultConfig creates a default "ping-server" config. +func NewDefaultConfig() *Config { if *pingServerBindAddress == "" { klog.Fatal("--ping-server-bind-address not set!") } - return &PingServerConfig{ + return &Config{ pingServerBindAddress: *pingServerBindAddress, } } // Run runs the ping server. -func Run(config *PingServerConfig) { +func Run(config *Config) { klog.Infof("Listening on %s \n", config.pingServerBindAddress) http.HandleFunc("/", pong) klog.Fatal(http.ListenAndServe(config.pingServerBindAddress, nil)) diff --git a/verify/verify-golint.sh b/verify/verify-golint.sh index 87a7e5162..7c7838258 100755 --- a/verify/verify-golint.sh +++ b/verify/verify-golint.sh @@ -26,7 +26,7 @@ GOLINT=${GOLINT:-"golint"} PACKAGES=($(GO111MODULE=off go list ./... | grep -v /vendor/)) bad_files=() for package in "${PACKAGES[@]}"; do - out=$("${GOLINT}" -min_confidence=0.9 "${package}") + out=$("${GOLINT}" "${package}") if [[ -n "${out}" ]]; then bad_files+=("${out}") fi