From 99e5904c4cda64112c96fb2e6bb6718671ca2b9b Mon Sep 17 00:00:00 2001 From: Nathan Baulch Date: Wed, 7 Aug 2024 09:17:25 +1000 Subject: [PATCH 1/2] Fix typos --- SECURITY.md | 2 +- backoff/backoff.go | 2 +- balancer/grpclb/grpclb.go | 2 +- balancer/grpclb/grpclb_util_test.go | 2 +- balancer/pickfirst/pickfirst.go | 2 +- balancer/rls/cache.go | 12 ++++++------ balancer/rls/control_channel_test.go | 2 +- balancer/rls/internal/keys/builder.go | 2 +- balancer/rls/internal/test/e2e/rls_child_policy.go | 2 +- balancer/rls/picker.go | 2 +- .../weightedtarget/weightedaggregator/aggregator.go | 2 +- benchmark/benchmain/main.go | 10 +++++----- benchmark/latency/latency.go | 2 +- benchmark/latency/latency_test.go | 6 +++--- benchmark/primitives/syncmap_test.go | 2 +- benchmark/stats/stats.go | 2 +- benchmark/worker/main.go | 4 ++-- binarylog/binarylog_end2end_test.go | 2 +- clientconn.go | 2 +- clientconn_test.go | 2 +- credentials/alts/internal/conn/aeadrekey.go | 2 +- credentials/alts/internal/conn/aes128gcmrekey.go | 2 +- credentials/alts/internal/conn/record_test.go | 2 +- credentials/local/local.go | 2 +- credentials/sts/sts.go | 2 +- examples/examples_test.sh | 2 +- examples/features/load_balancing/README.md | 4 ++-- examples/features/multiplex/client/main.go | 2 +- examples/features/orca/client/main.go | 2 +- gcp/observability/logging.go | 2 +- gcp/observability/logging_test.go | 2 +- gcp/observability/observability_test.go | 2 +- grpclog/grpclog.go | 2 +- .../balancer/gracefulswitch/gracefulswitch_test.go | 4 ++-- internal/balancergroup/balancergroup.go | 6 +++--- internal/balancergroup/balancergroup_test.go | 6 +++--- internal/channelz/channelmap.go | 2 +- internal/grpcsync/callback_serializer_test.go | 2 +- internal/idle/idle_test.go | 4 ++-- internal/resolver/dns/dns_resolver_test.go | 2 +- internal/tcp_keepalive_unix.go | 2 +- internal/tcp_keepalive_windows.go | 2 +- internal/testutils/balancer.go | 2 +- internal/testutils/state.go | 2 +- internal/testutils/xds/e2e/clientresources.go | 4 ++-- .../testutils/{xds_bootsrap.go => xds_bootstrap.go} | 0 internal/transport/controlbuf.go | 2 +- internal/transport/grpchttp2/errors.go | 4 ++-- internal/transport/grpchttp2/errors_test.go | 2 +- internal/transport/http2_client.go | 6 +++--- internal/transport/transport.go | 6 +++--- internal/transport/transport_test.go | 8 ++++---- internal/xds/bootstrap/bootstrap.go | 2 +- internal/xds/rbac/rbac_engine_test.go | 4 ++-- interop/client/client.go | 2 +- interop/grpclb_fallback/client_linux.go | 2 +- mem/buffers_test.go | 2 +- metadata/metadata.go | 2 +- orca/server_metrics.go | 2 +- profiling/cmd/catapult.go | 2 +- reflection/internal/internal.go | 2 +- reflection/test/serverreflection_test.go | 4 ++-- scripts/vet.sh | 2 +- security/advancedtls/testdata/openssl-ca.cnf | 2 +- stream.go | 2 +- test/balancer_switching_test.go | 2 +- test/bufconn/bufconn.go | 2 +- test/channelz_test.go | 8 ++++---- test/end2end_test.go | 8 ++++---- test/healthcheck_test.go | 2 +- test/stream_cleanup_test.go | 2 +- test/xds/xds_client_federation_test.go | 2 +- test/xds/xds_server_rbac_test.go | 2 +- testdata/README.md | 2 +- .../balancer/cdsbalancer/aggregate_cluster_test.go | 4 ++-- xds/internal/balancer/cdsbalancer/cdsbalancer.go | 2 +- .../cdsbalancer/cdsbalancer_security_test.go | 8 ++++---- .../balancer/clusterimpl/tests/balancer_test.go | 2 +- .../clusterresolver/configbuilder_childname_test.go | 2 +- .../balancer/loadstore/load_store_wrapper.go | 2 +- .../balancer/ringhash/e2e/ringhash_balancer_test.go | 10 +++++----- xds/internal/balancer/ringhash/ring_test.go | 2 +- xds/internal/httpfilter/rbac/rbac.go | 6 +++--- xds/internal/server/conn_wrapper.go | 2 +- xds/internal/xdsclient/authority.go | 2 +- xds/internal/xdsclient/transport/loadreport_test.go | 2 +- xds/internal/xdsclient/transport/transport.go | 4 ++-- xds/internal/xdsclient/xdsresource/unmarshal_cds.go | 4 ++-- .../xdsclient/xdsresource/unmarshal_cds_test.go | 2 +- 89 files changed, 136 insertions(+), 136 deletions(-) rename internal/testutils/{xds_bootsrap.go => xds_bootstrap.go} (100%) diff --git a/SECURITY.md b/SECURITY.md index be6e108705c4..abab279379ba 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/backoff/backoff.go b/backoff/backoff.go index 0787d0b50ce9..d7b40b7cb66f 100644 --- a/backoff/backoff.go +++ b/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index 47a3e938dcf5..c09876274131 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/balancer/grpclb/grpclb_util_test.go b/balancer/grpclb/grpclb_util_test.go index c09edc324e15..379b6d98c00a 100644 --- a/balancer/grpclb/grpclb_util_test.go +++ b/balancer/grpclb/grpclb_util_test.go @@ -252,7 +252,7 @@ func (s) TestLBCache_ShutdownTimer_New_Race(t *testing.T) { go func() { for i := 0; i < 1000; i++ { // Shutdown starts a timer with 1 ns timeout, the NewSubConn will - // race with with the timer. + // race with the timer. sc.Shutdown() sc, _ = ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) } diff --git a/balancer/pickfirst/pickfirst.go b/balancer/pickfirst/pickfirst.go index 07527603f1d4..5b592f48ad9d 100644 --- a/balancer/pickfirst/pickfirst.go +++ b/balancer/pickfirst/pickfirst.go @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/balancer/rls/cache.go b/balancer/rls/cache.go index d7a6a1a436c6..ee8df6c3f3b5 100644 --- a/balancer/rls/cache.go +++ b/balancer/rls/cache.go @@ -47,7 +47,7 @@ type cacheEntry struct { // headerData is received in the RLS response and is to be sent in the // X-Google-RLS-Data header for matching RPCs. headerData string - // expiryTime is the absolute time at which this cache entry entry stops + // expiryTime is the absolute time at which this cache entry stops // being valid. When an RLS request succeeds, this is set to the current // time plus the max_age field from the LB policy config. expiryTime time.Time @@ -223,7 +223,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) { backoffCancelled = true } } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.maxSize = size return backoffCancelled @@ -249,7 +249,7 @@ func (dc *dataCache) evictExpiredEntries() bool { if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { continue } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) evicted = true } return evicted @@ -339,7 +339,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { if !ok { return } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } // deleteAndCleanup performs actions required at the time of deleting an entry @@ -347,7 +347,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { // - the entry is removed from the map of entries // - current size of the data cache is update // - the key is removed from the LRU -func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { +func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { delete(dc.entries, key) dc.currentSize -= entry.size dc.keys.removeEntry(key) @@ -355,7 +355,7 @@ func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { func (dc *dataCache) stop() { for key, entry := range dc.entries { - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.shutdown.Fire() } diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index dc7acd32c863..4552bd98aab4 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -62,7 +62,7 @@ func (s) TestControlChannelThrottled(t *testing.T) { select { case <-rlsReqCh: - t.Fatal("RouteLookup RPC invoked when control channel is throtlled") + t.Fatal("RouteLookup RPC invoked when control channel is throttled") case <-time.After(defaultTestShortTimeout): } } diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index d010f74456fe..cc5ce510ad90 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -218,7 +218,7 @@ type matcher struct { names []string } -// Equal reports if m and are are equivalent headerKeys. +// Equal reports if m and a are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false diff --git a/balancer/rls/internal/test/e2e/rls_child_policy.go b/balancer/rls/internal/test/e2e/rls_child_policy.go index 8742fa135886..2bae34b60887 100644 --- a/balancer/rls/internal/test/e2e/rls_child_policy.go +++ b/balancer/rls/internal/test/e2e/rls_child_policy.go @@ -125,7 +125,7 @@ func (b *bal) Close() { // run is a dummy goroutine to make sure that child policies are closed at the // end of tests. If they are not closed, these goroutines will be picked up by -// the leakcheker and tests will fail. +// the leak checker and tests will fail. func (b *bal) run() { <-b.done.Done() } diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index 0954c09c0637..e5c86f290687 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -190,7 +190,7 @@ func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info bala state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if // it is the last one (which handles the case of delegating to the last - // child picker if all child polcies are in TRANSIENT_FAILURE). + // child picker if all child policies are in TRANSIENT_FAILURE). if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { // Any header data received from the RLS server is stored in the // cache entry and needs to be sent to the actual backend in the diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index 27279257ed13..bcc8aca8b491 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr } // Start starts the aggregator. It can be called after Stop to restart the -// aggretator. +// aggregator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() defer wbsa.mu.Unlock() diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index cbb93b2f5f28..b1753be6dc58 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -110,11 +110,11 @@ var ( useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O") enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+ "Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.") - clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a a comma-separated list") - clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list") - serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list") - serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list") - sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list") + clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a comma-separated list") + clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a comma-separated list") + serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a comma-separated list") + serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a comma-separated list") + sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a comma-separated list") connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams") recvBufferPool = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools) sharedWriteBuffer = flags.StringWithAllowedValues("sharedWriteBuffer", toggleModeOff, diff --git a/benchmark/latency/latency.go b/benchmark/latency/latency.go index d5cc44f9b5ee..99e5bc52f568 100644 --- a/benchmark/latency/latency.go +++ b/benchmark/latency/latency.go @@ -65,7 +65,7 @@ type Network struct { var ( //Local simulates local network. Local = Network{0, 0, 0} - //LAN simulates local area network network. + //LAN simulates local area network. LAN = Network{100 * 1024, 2 * time.Millisecond, 1500} //WAN simulates wide area network. WAN = Network{20 * 1024, 30 * time.Millisecond, 1500} diff --git a/benchmark/latency/latency_test.go b/benchmark/latency/latency_test.go index 787373ca30be..c866e359e807 100644 --- a/benchmark/latency/latency_test.go +++ b/benchmark/latency/latency_test.go @@ -46,9 +46,9 @@ type bufConn struct { func (bufConn) Close() error { panic("unimplemented") } func (bufConn) LocalAddr() net.Addr { panic("unimplemented") } func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") } -func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") } -func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") } -func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") } +func (bufConn) SetDeadline(t time.Time) error { panic("unimplemented") } +func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemented") } +func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemented") } func restoreHooks() func() { s := sleep diff --git a/benchmark/primitives/syncmap_test.go b/benchmark/primitives/syncmap_test.go index 3ece8d92034b..37f9e2c6f897 100644 --- a/benchmark/primitives/syncmap_test.go +++ b/benchmark/primitives/syncmap_test.go @@ -152,7 +152,7 @@ func benchmarkIncrementUint64Map(b *testing.B, f func() incrementUint64Map) { } } -func BenchmarkMapWithSyncMutexContetion(b *testing.B) { +func BenchmarkMapWithSyncMutexContention(b *testing.B) { benchmarkIncrementUint64Map(b, newMapWithLock) } diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index e42c5b6c0f24..97a203010464 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -293,7 +293,7 @@ type RunData struct { Fiftieth time.Duration // Ninetieth is the 90th percentile latency. Ninetieth time.Duration - // Ninetyninth is the 99th percentile latency. + // NinetyNinth is the 99th percentile latency. NinetyNinth time.Duration // Average is the average latency. Average time.Duration diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 793fd76bcd40..9f0a58df7b3f 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -141,7 +141,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer) var bc *benchmarkClient defer func() { // Shut down benchmark client when stream ends. - logger.Infof("shuting down benchmark client") + logger.Infof("shutting down benchmark client") if bc != nil { bc.shutdown() } @@ -160,7 +160,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer) case *testpb.ClientArgs_Setup: logger.Infof("client setup received:") if bc != nil { - logger.Infof("client setup received when client already exists, shuting down the existing client") + logger.Infof("client setup received when client already exists, shutting down the existing client") bc.shutdown() } bc, err = startBenchmarkClient(t.Setup) diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index c9e3be4c0aba..3943353a57cf 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -78,7 +78,7 @@ func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error { func (s *testBinLogSink) Close() error { return nil } -// Returns all client entris if client is true, otherwise return all server +// Returns all client entries if client is true, otherwise return all server // entries. func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry { logger := binlogpb.GrpcLogEntry_LOGGER_SERVER diff --git a/clientconn.go b/clientconn.go index cf1a7ec6895f..7f6c24ca22ee 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1576,7 +1576,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/clientconn_test.go b/clientconn_test.go index 34d22c684eee..468f8752346a 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -642,7 +642,7 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { defer conn.Close() if got := conn.dopts.minConnectTimeout(); got != mct { - t.Errorf("unexpect minConnectTimeout on the connection: %v, want %v", got, mct) + t.Errorf("unexpected minConnectTimeout on the connection: %v, want %v", got, mct) } } diff --git a/credentials/alts/internal/conn/aeadrekey.go b/credentials/alts/internal/conn/aeadrekey.go index 43726e877b8b..7e4bfee88861 100644 --- a/credentials/alts/internal/conn/aeadrekey.go +++ b/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/credentials/alts/internal/conn/aes128gcmrekey.go b/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea254f..b5bbb5497aa3 100644 --- a/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/credentials/alts/internal/conn/record_test.go b/credentials/alts/internal/conn/record_test.go index 0b4177a581ed..c50fb4c82251 100644 --- a/credentials/alts/internal/conn/record_test.go +++ b/credentials/alts/internal/conn/record_test.go @@ -248,7 +248,7 @@ func testWriteLargeData(t *testing.T, rp string) { // buffer size. clientConn, serverConn := newConnPair(rp, nil, nil) // Message size is intentionally chosen to not be multiple of - // payloadLengthLimtit. + // payloadLengthLimit. msgSize := altsWriteBufferMaxSize + (100 * 1024) clientMsg := make([]byte, msgSize) for i := 0; i < msgSize; i++ { diff --git a/credentials/local/local.go b/credentials/local/local.go index b628b2fb53e7..c0490f2c4edd 100644 --- a/credentials/local/local.go +++ b/credentials/local/local.go @@ -108,7 +108,7 @@ func (c *localTC) Clone() credentials.TransportCredentials { } // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. -// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effet for local credentials. +// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effect for local credentials. func (c *localTC) OverrideServerName(serverNameOverride string) error { c.info.ServerName = serverNameOverride return nil diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index 0110201a98f3..1b06b46e7d4b 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -367,7 +367,7 @@ type requestParameters struct { ActorTokenType string `json:"actor_token_type,omitempty"` } -// nesponseParameters stores all attributes sent as JSON in a successful STS +// responseParameters stores all attributes sent as JSON in a successful STS // response. These attributes are defined in // https://tools.ietf.org/html/rfc8693#section-2.2.1. type responseParameters struct { diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 5e95120498c7..aee88e27bb5c 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -188,7 +188,7 @@ for example in ${EXAMPLES[@]}; do $(cat $CLIENT_LOG) " else - pass "client successfully communitcated with server" + pass "client successfully communicated with server" fi # Check server log for expected output if expecting an diff --git a/examples/features/load_balancing/README.md b/examples/features/load_balancing/README.md index f874f3b8f6fe..3c9898df7838 100644 --- a/examples/features/load_balancing/README.md +++ b/examples/features/load_balancing/README.md @@ -61,8 +61,8 @@ this is examples/load_balancing (from :50051) The second client is configured to use `round_robin`. `round_robin` connects to all the addresses it sees, and sends an RPC to each backend one at a time in -order. E.g. the first RPC will be sent to backend-1, the second RPC will be be -sent to backend-2, and the third RPC will be be sent to backend-1 again. +order. E.g. the first RPC will be sent to backend-1, the second RPC will be +sent to backend-2, and the third RPC will be sent to backend-1 again. ``` this is examples/load_balancing (from :50051) diff --git a/examples/features/multiplex/client/main.go b/examples/features/multiplex/client/main.go index 27423a3f5340..3e60c05068cd 100644 --- a/examples/features/multiplex/client/main.go +++ b/examples/features/multiplex/client/main.go @@ -72,7 +72,7 @@ func main() { fmt.Println() fmt.Println("--- calling routeguide.RouteGuide/GetFeature ---") - // Make a routeguild client with the same ClientConn. + // Make a routeguide client with the same ClientConn. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "this is examples/multiplex") } diff --git a/examples/features/orca/client/main.go b/examples/features/orca/client/main.go index 16e916ec5016..414cb94f27ce 100644 --- a/examples/features/orca/client/main.go +++ b/examples/features/orca/client/main.go @@ -59,7 +59,7 @@ func main() { ticker := time.NewTicker(time.Second) for range ticker.C { func() { - // Use an anonymous function to ensure context cancelation via defer. + // Use an anonymous function to ensure context cancellation via defer. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "test echo message"}); err != nil { diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index 0ffbd93b3922..6675244c5f55 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -248,7 +248,7 @@ type binaryMethodLogger struct { clientSide bool } -// buildGCPLoggingEntry converts the binary log log entry into a gcp logging +// buildGCPLoggingEntry converts the binary log entry into a gcp logging // entry. func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog.LogEntryConfig) gcplogging.Entry { binLogEntry := bml.mlb.Build(c) diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 9ed55ccf2717..841acd69f9ca 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -548,7 +548,7 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { // Client and Server RPC Events configured to log. Both sides should log and // share the exporter, so the exporter should receive the collective amount of // calls for both a client stream (corresponding to a Client RPC Event) and a -// server stream (corresponding ot a Server RPC Event). The specificity of the +// server stream (corresponding to a Server RPC Event). The specificity of the // entries are tested in previous tests. func (s) TestBothClientAndServerRPCEvents(t *testing.T) { fle := &fakeLoggingExporter{ diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index e7da642acee6..370dc6ec484c 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -191,7 +191,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { } } -// TestRefuseStartWithExcludeAndWildCardAll tests the sceanrio where an +// TestRefuseStartWithExcludeAndWildCardAll tests the scenario where an // observability configuration is provided with client RPC event specifying to // exclude, and which matches on the '*' wildcard (any). This should cause an // error when trying to start the observability system. diff --git a/grpclog/grpclog.go b/grpclog/grpclog.go index 16928c9cb993..c95ed3197a61 100644 --- a/grpclog/grpclog.go +++ b/grpclog/grpclog.go @@ -103,7 +103,7 @@ func Fatalf(format string, args ...any) { } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. diff --git a/internal/balancer/gracefulswitch/gracefulswitch_test.go b/internal/balancer/gracefulswitch/gracefulswitch_test.go index 2f99ad9cc372..c9b17dc150f6 100644 --- a/internal/balancer/gracefulswitch/gracefulswitch_test.go +++ b/internal/balancer/gracefulswitch/gracefulswitch_test.go @@ -604,7 +604,7 @@ func (s) TestPendingReplacedByAnotherPending(t *testing.T) { if err != nil { t.Fatalf("error constructing newSubConn in gsb: %v", err) } - // This picker never returns an error, which can help this this test verify + // This picker never returns an error, which can help this test verify // whether this cached state will get cleared on a new pending balancer // (will replace it with a picker that always errors). pendBal.updateState(balancer.State{ @@ -672,7 +672,7 @@ func (p *neverErrPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro // TestUpdateSubConnStateRace tests the race condition when the graceful switch // load balancer receives a SubConnUpdate concurrently with an UpdateState() -// call, which can cause the balancer to forward the update to to be closed and +// call, which can cause the balancer to forward the update to be closed and // cleared. The balancer API guarantees to never call any method the balancer // after a Close() call, and the test verifies that doesn't happen within the // graceful switch load balancer. diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index a31f6f9a49e4..31c9cdc9d026 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -204,7 +204,7 @@ type BalancerGroup struct { // after it's closed. // // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer - // may call back to balancer group inline. It causes deaclock if they + // may call back to balancer group inline. It causes deadlock if they // require the same mutex). // // We should never need to hold multiple locks at the same time in this @@ -218,7 +218,7 @@ type BalancerGroup struct { // guards the map from SubConn to balancer ID, so updateSubConnState needs // to hold it shortly to potentially delete from the map. // - // UpdateState is called by the balancer state aggretator, and it will + // UpdateState is called by the balancer state aggregator, and it will // decide when and whether to call. // // The corresponding boolean incomingStarted is used to stop further updates @@ -292,7 +292,7 @@ func (bg *BalancerGroup) Start() { // AddWithClientConn adds a balancer with the given id to the group. The // balancer is built with a balancer builder registered with balancerName. The // given ClientConn is passed to the newly built balancer instead of the -// onepassed to balancergroup.New(). +// one passed to balancergroup.New(). // // TODO: Get rid of the existing Add() API and replace it with this. func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 9de47f54504f..e602bf660bdd 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -165,7 +165,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // - hold a lock and send updates to balancer (e.g. update resolved addresses) // - the balancer calls back (NewSubConn or update picker) in line // -// The callback will try to hold hte same lock again, which will cause a +// The callback will try to hold the same lock again, which will cause a // deadlock. // // This test starts the balancer group with a test balancer, will updates picker @@ -345,7 +345,7 @@ func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { // Sub-balancers in cache will be closed if not re-added within timeout, and // subConns will be shut down. -func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { +func (s) TestBalancerGroup_locality_caching_not_read_within_timeout(t *testing.T) { _, _, cc, addrToSC := initBalancerGroupForCachingTest(t, time.Second) // The sub-balancer is not re-added within timeout. The subconns should be @@ -385,7 +385,7 @@ func (*noopBalancerBuilderWrapper) Name() string { // After removing a sub-balancer, re-add with same ID, but different balancer // builder. Old subconns should be shut down, and new subconns should be created. -func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { +func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testing.T) { gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout) // Re-add sub-balancer-1, but with a different balancer builder. The diff --git a/internal/channelz/channelmap.go b/internal/channelz/channelmap.go index dfe18b08925d..bb531225d5f4 100644 --- a/internal/channelz/channelmap.go +++ b/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. diff --git a/internal/grpcsync/callback_serializer_test.go b/internal/grpcsync/callback_serializer_test.go index 34aa5191612e..10f5ffca7797 100644 --- a/internal/grpcsync/callback_serializer_test.go +++ b/internal/grpcsync/callback_serializer_test.go @@ -153,7 +153,7 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { <-ctx.Done() }) - // Schedule a bunch of callbacks. These should be exeuted since the are + // Schedule a bunch of callbacks. These should be executed since they are // scheduled before the serializer is closed. const numCallbacks = 10 callbackCh := make(chan int, numCallbacks) diff --git a/internal/idle/idle_test.go b/internal/idle/idle_test.go index d0fc685d3908..a4b0e42b8582 100644 --- a/internal/idle/idle_test.go +++ b/internal/idle/idle_test.go @@ -124,7 +124,7 @@ func (s) TestManager_Disabled(t *testing.T) { // The idleness manager is explicitly not closed here. But since the manager // is disabled, it will not start the run goroutine, and hence we expect the - // leakchecker to not find any leaked goroutines. + // leak checker to not find any leaked goroutines. } // TestManager_Enabled_TimerFires tests the case where the idle manager @@ -242,7 +242,7 @@ func (s) TestManager_Enabled_ActiveSinceLastCheck(t *testing.T) { case <-time.After(defaultTestShortTimeout): } - // Since the unrary RPC terminated and we have no other active RPCs, the + // Since the unary RPC terminated and we have no other active RPCs, the // channel must move to idle eventually. select { case <-enforcer.enterIdleCh: diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 57780f4d68dc..4e6a4d544629 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -1024,7 +1024,7 @@ func (s) TestCustomAuthority(t *testing.T) { wantAuthority: "[::1]:53", }, { - name: "ipv6 authority with brackers and non-default DNS port", + name: "ipv6 authority with brackets and non-default DNS port", authority: "[::1]:123", wantAuthority: "[::1]:123", }, diff --git a/internal/tcp_keepalive_unix.go b/internal/tcp_keepalive_unix.go index 078137b7fd70..7e7aaa546368 100644 --- a/internal/tcp_keepalive_unix.go +++ b/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/internal/tcp_keepalive_windows.go b/internal/tcp_keepalive_windows.go index fd7d43a8907b..d5c1085eeaec 100644 --- a/internal/tcp_keepalive_windows.go +++ b/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index c65be16be4b6..f8d3fca1682d 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -317,7 +317,7 @@ func (tcc *BalancerClientConn) WaitForPicker(ctx context.Context, f func(balance // iteration until where it goes wrong. // // Step 2. the return values of f should be repetitions of the same permutation. -// E.g. if want is {a,a,b}, the check failes if f returns: +// E.g. if want is {a,a,b}, the check fails if f returns: // - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not // repeating the first iteration. // diff --git a/internal/testutils/state.go b/internal/testutils/state.go index 246b07a7ea19..79510b7cfac4 100644 --- a/internal/testutils/state.go +++ b/internal/testutils/state.go @@ -37,7 +37,7 @@ type StateChanger interface { } // StayConnected makes sc stay connected by repeatedly calling sc.Connect() -// until the state becomes Shutdown or until ithe context expires. +// until the state becomes Shutdown or until the context expires. func StayConnected(ctx context.Context, sc StateChanger) { for { state := sc.GetState() diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index d511b473000e..c6f77c60fa72 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -63,7 +63,7 @@ const ( // is required. Only the server presents an identity certificate in this // configuration. SecurityLevelTLS - // SecurityLevelMTLS is used when security ocnfiguration corresponding to + // SecurityLevelMTLS is used when security configuration corresponding to // mTLS is required. Both client and server present identity certificates in // this configuration. SecurityLevelMTLS @@ -789,7 +789,7 @@ func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoad // DefaultServerListenerWithRouteConfigName returns a basic xds Listener // resource to be used on the server side. The returned Listener resource -// contains a RouteCongiguration resource name that needs to be resolved. +// contains a RouteConfiguration resource name that needs to be resolved. func DefaultServerListenerWithRouteConfigName(host string, port uint32, secLevel SecurityLevel, routeName string) *v3listenerpb.Listener { return defaultServerListenerCommon(host, port, secLevel, routeName, false) } diff --git a/internal/testutils/xds_bootsrap.go b/internal/testutils/xds_bootstrap.go similarity index 100% rename from internal/testutils/xds_bootsrap.go rename to internal/testutils/xds_bootstrap.go diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index adafb2c1104b..63f4f1a9b4e9 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -485,7 +485,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { diff --git a/internal/transport/grpchttp2/errors.go b/internal/transport/grpchttp2/errors.go index c9c3d18c8a38..34c4853456ca 100644 --- a/internal/transport/grpchttp2/errors.go +++ b/internal/transport/grpchttp2/errors.go @@ -44,7 +44,7 @@ const ( ErrCodeCompression ErrCode = 0x9 ErrCodeConnect ErrCode = 0xa ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeIndaequateSecurity ErrCode = 0xc + ErrCodeInadequateSecurity ErrCode = 0xc ErrCodeHTTP11Required ErrCode = 0xd ) @@ -61,7 +61,7 @@ var errorCodeNames = map[ErrCode]string{ ErrCodeCompression: "COMPRESSION_ERROR", ErrCodeConnect: "CONNECT_ERROR", ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeIndaequateSecurity: "INADEQUATE_SECURITY", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", } diff --git a/internal/transport/grpchttp2/errors_test.go b/internal/transport/grpchttp2/errors_test.go index 133efdd2ffa8..8218f0ff798d 100644 --- a/internal/transport/grpchttp2/errors_test.go +++ b/internal/transport/grpchttp2/errors_test.go @@ -50,7 +50,7 @@ func (s) TestErrorCodeString(t *testing.T) { {err: ErrCodeCompression, want: "COMPRESSION_ERROR"}, {err: ErrCodeConnect, want: "CONNECT_ERROR"}, {err: ErrCodeEnhanceYourCalm, want: "ENHANCE_YOUR_CALM"}, - {err: ErrCodeIndaequateSecurity, want: "INADEQUATE_SECURITY"}, + {err: ErrCodeInadequateSecurity, want: "INADEQUATE_SECURITY"}, {err: ErrCodeHTTP11Required, want: "HTTP_1_1_REQUIRED"}, // Type casting known error case {err: ErrCode(0x1), want: "PROTOCOL_ERROR"}, diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 3c63c706986d..f62ff72d624a 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -229,7 +229,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -1222,7 +1222,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1307,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 4b39c0ade97c..ef34e5cfff89 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -408,7 +408,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -509,7 +509,7 @@ func (s *Stream) Read(p []byte) (n int, err error) { return io.ReadFull(s.trReader, p) } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. @@ -798,7 +798,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 7887c8be8647..ad0604723270 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -718,7 +718,7 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) { t.Fatalf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) return } - // Wait for server's handerler to be initialized + // Wait for server's handler to be initialized select { case <-ready: case <-ctx.Done(): @@ -870,7 +870,7 @@ func (s) TestLargeMessageSuspension(t *testing.T) { if err != nil { t.Fatalf("failed to open stream: %v", err) } - // Launch a goroutine simillar to the stream monitoring goroutine in + // Launch a goroutine similar to the stream monitoring goroutine in // stream.go to keep track of context timeout and call CloseStream. go func() { <-ctx.Done() @@ -1315,7 +1315,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { } }() - // Test context cancelation. + // Test context cancellation. timeBefore := time.Now() connectCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) time.AfterFunc(100*time.Millisecond, cancel) @@ -1328,7 +1328,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { } t.Logf("NewClientTransport() = _, %v", err) if time.Since(timeBefore) > 3*time.Second { - t.Fatalf("NewClientTransport returned > 2.9s after context cancelation") + t.Fatalf("NewClientTransport returned > 2.9s after context cancellation") } // Test context deadline. diff --git a/internal/xds/bootstrap/bootstrap.go b/internal/xds/bootstrap/bootstrap.go index a981dcf49365..94aa375f83ec 100644 --- a/internal/xds/bootstrap/bootstrap.go +++ b/internal/xds/bootstrap/bootstrap.go @@ -513,7 +513,7 @@ func (c *Config) UnmarshalJSON(data []byte) error { } bc, err := parser.ParseConfig(nameAndConfig.Config) if err != nil { - return fmt.Errorf("xds: config parsing for certifcate provider plugin %q failed during bootstrap: %v", name, err) + return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) } cpcCfgs[instance] = bc } diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 44f0e495864e..847fe0c3a98e 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -319,7 +319,7 @@ func (s) TestNewChainEngine(t *testing.T) { }, }, { - name: "MatcherToNotPrinicipal", + name: "MatcherToNotPrincipal", policies: []*v3rbacpb.RBAC{ { Action: v3rbacpb.RBAC_ALLOW, @@ -336,7 +336,7 @@ func (s) TestNewChainEngine(t *testing.T) { }, }, }, - // PrinicpalProductViewer tests the construction of a chained engine + // PrincipalProductViewer tests the construction of a chained engine // with a policy that allows any downstream to send a GET request on a // certain path. { diff --git a/interop/client/client.go b/interop/client/client.go index 379fd017c27e..f50139fe6dbb 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -60,7 +60,7 @@ const ( ) var ( - caFile = flag.String("ca_file", "", "The file containning the CA root cert file") + caFile = flag.String("ca_file", "", "The file containing the CA root cert file") useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index b1cfde71134e..f8a1ac37b81c 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -132,7 +132,7 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin for time.Now().Before(fallbackDeadline) { g := doRPCAndGetPath(client, 20*time.Second) if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK { - infoLog.Println("Made one successul RPC to a fallback. Now expect the same for the rest.") + infoLog.Println("Made one successful RPC to a fallback. Now expect the same for the rest.") fellBack = true break } else if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { diff --git a/mem/buffers_test.go b/mem/buffers_test.go index b761fda79e38..16e9a8651b9e 100644 --- a/mem/buffers_test.go +++ b/mem/buffers_test.go @@ -136,7 +136,7 @@ func newTestBufferPool() *testBufferPool { // Tests that a buffer created with Copy, which when later freed, returns the underlying // byte slice to the buffer pool. -func (s) TestBufer_CopyAndFree(t *testing.T) { +func (s) TestBuffer_CopyAndFree(t *testing.T) { data := "abcd" testPool := newTestBufferPool() diff --git a/metadata/metadata.go b/metadata/metadata.go index 1c6db632a7a3..d2e15253bbfb 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -223,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/orca/server_metrics.go b/orca/server_metrics.go index 67d1fa9d7f2b..bb664d6a0814 100644 --- a/orca/server_metrics.go +++ b/orca/server_metrics.go @@ -108,7 +108,7 @@ type ServerMetricsRecorder interface { // SetMemoryUtilization sets the memory utilization server metric. Must be // in the range [0, 1]. SetMemoryUtilization(float64) - // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // DeleteMemoryUtilization deletes the memory utilization server metric to // prevent it from being sent. DeleteMemoryUtilization() diff --git a/profiling/cmd/catapult.go b/profiling/cmd/catapult.go index 4b3848d0d198..6669b2758f96 100644 --- a/profiling/cmd/catapult.go +++ b/profiling/cmd/catapult.go @@ -300,7 +300,7 @@ func timerBeginIsBefore(ti *ppb.Timer, tj *ppb.Timer) bool { return ti.BeginSec < tj.BeginSec } -// streamStatsCatapulJSON receives a *snapshot and the name of a JSON file to +// streamStatsCatapultJSON receives a *snapshot and the name of a JSON file to // write to. The grpc-go profiling snapshot is processed and converted to a // JSON format that can be understood by trace-viewer. func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string) (err error) { diff --git a/reflection/internal/internal.go b/reflection/internal/internal.go index 36ee65075077..902fc6d35c27 100644 --- a/reflection/internal/internal.go +++ b/reflection/internal/internal.go @@ -18,7 +18,7 @@ // Package internal contains code that is shared by both reflection package and // the test package. The packages are split in this way inorder to avoid -// depenedency to deprecated package github.com/golang/protobuf. +// dependency to deprecated package github.com/golang/protobuf. package internal import ( diff --git a/reflection/test/serverreflection_test.go b/reflection/test/serverreflection_test.go index c46a8be83d13..f53ecced0236 100644 --- a/reflection/test/serverreflection_test.go +++ b/reflection/test/serverreflection_test.go @@ -605,9 +605,9 @@ func testFileContainingSymbol(t *testing.T, stream v1reflectiongrpc.ServerReflec func testFileContainingSymbolError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ - "grpc.testing.SerchService", + "grpc.testing.SearchService", "grpc.testing.SearchService.SearchE", - "grpc.tesing.SearchResponse", + "grpc.testing.SearchResponse", "gpc.testing.ToBeExtended", } { if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ diff --git a/scripts/vet.sh b/scripts/vet.sh index 92b0b6f067cb..4ca894864926 100755 --- a/scripts/vet.sh +++ b/scripts/vet.sh @@ -120,7 +120,7 @@ XXXXX PleaseIgnoreUnused' # Error for any package comments not in generated code. noret_grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - # Ignore a false positive when operands have side affectes. + # Ignore a false positive when operands have side affects. # TODO(https://github.com/dominikh/go-tools/issues/54): Remove this once the issue is fixed in staticcheck. noret_grep "(SA4000)" "${SC_OUT}" | not grep -v -e "crl.go:[0-9]\+:[0-9]\+: identical expressions on the left and right side of the '||' operator (SA4000)" diff --git a/security/advancedtls/testdata/openssl-ca.cnf b/security/advancedtls/testdata/openssl-ca.cnf index 196a50c26471..0dc31e45b0f9 100644 --- a/security/advancedtls/testdata/openssl-ca.cnf +++ b/security/advancedtls/testdata/openssl-ca.cnf @@ -1,5 +1,5 @@ base_dir = . -certificate = $base_dir/cacert.pem # The CA certifcate +certificate = $base_dir/cacert.pem # The CA certificate private_key = $base_dir/cakey.pem # The CA private key new_certs_dir = $base_dir # Location for new certs after signing database = $base_dir/index.txt # Database index file diff --git a/stream.go b/stream.go index 2a9c83641fe7..e4699109c35c 100644 --- a/stream.go +++ b/stream.go @@ -1187,7 +1187,7 @@ func (a *csAttempt) finish(err error) { // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index 34fd871b65c7..889c00432323 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -334,7 +334,7 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { // apply the grpclb policy. But since grpclb is not registered, it should // fallback to the default LB policy which is pick_first. The ClientConn is // also expected to filter out the grpclb address when sending the addresses - // list fo pick_first. + // list for pick_first. grpclbAddr := []resolver.Address{{Addr: "non-existent-grpclb-server-address"}} grpclbConfig := parseServiceConfig(t, r, `{"loadBalancingPolicy": "grpclb"}`) state := resolver.State{ServiceConfig: grpclbConfig, Addresses: addrs} diff --git a/test/bufconn/bufconn.go b/test/bufconn/bufconn.go index 3f77f4876eb8..e6eb4feebb99 100644 --- a/test/bufconn/bufconn.go +++ b/test/bufconn/bufconn.go @@ -109,7 +109,7 @@ type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. + // with r and w pointing to the offset to read and write, respectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. diff --git a/test/channelz_test.go b/test/channelz_test.go index 5bfa0f570083..0a9b88bddcc7 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -548,7 +548,7 @@ func (s) TestCZServerListenSocketDeletion(t *testing.T) { s.Stop() } -func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { +func (s) TestCZRecursiveDeletionOfEntry(t *testing.T) { // +--+TopChan+---+ // | | // v v @@ -1525,7 +1525,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { trace := tcs[0].Trace() for _, e := range trace.Events { if e.RefID == nestedConn && e.RefType != channelz.RefChannel { - return false, fmt.Errorf("nested channel trace event shoud have RefChannel as RefType") + return false, fmt.Errorf("nested channel trace event should have RefChannel as RefType") } } ncm := channelz.GetChannel(nestedConn) @@ -1608,7 +1608,7 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { trace := tcs[0].Trace() for _, e := range trace.Events { if e.RefID == subConn && e.RefType != channelz.RefSubChannel { - return false, fmt.Errorf("subchannel trace event shoud have RefType to be RefSubChannel") + return false, fmt.Errorf("subchannel trace event should have RefType to be RefSubChannel") } } scm := channelz.GetSubChannel(subConn) @@ -1989,7 +1989,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { // example: // Channel Created - // Adressses resolved (from empty address state): "localhost:40467" + // Addresses resolved (from empty address state): "localhost:40467" // SubChannel (id: 4[]) Created // Channel's connectivity state changed to CONNECTING // Channel's connectivity state changed to READY diff --git a/test/end2end_test.go b/test/end2end_test.go index b16085398946..1c80cc9b5a6b 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -511,13 +511,13 @@ type test struct { customDialOptions []grpc.DialOption resolverScheme string - // These are are set once startServer is called. The common case is to have + // These are set once startServer is called. The common case is to have // only one testServer. srv stopper hSrv healthgrpc.HealthServer srvAddr string - // These are are set once startServers is called. + // These are set once startServers is called. srvs []stopper hSrvs []healthgrpc.HealthServer srvAddrs []string @@ -2919,7 +2919,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { } } -// TestMalformedHTTP2Metadata verfies the returned error when the client +// TestMalformedHTTP2Metadata verifies the returned error when the client // sends an illegal metadata. func (s) TestMalformedHTTP2Metadata(t *testing.T) { for _, e := range listTestEnv() { @@ -5849,7 +5849,7 @@ func (s) TestClientSettingsFloodCloseConn(t *testing.T) { t.Fatalf("Unexpected frame: %v", f) } - // Flood settings frames until a timeout occurs, indiciating the server has + // Flood settings frames until a timeout occurs, indicating the server has // stopped reading from the connection, then close the conn. for { conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond)) diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index b03c47a31426..6b77d8996dee 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -1008,7 +1008,7 @@ func testHealthWatchMultipleClients(t *testing.T, e env) { healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) } -// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server +// TestHealthWatchSameStatus makes a streaming Watch() RPC on the health server // and makes sure that the health status of the server is as expected after // multiple calls to SetServingStatus with the same status. func (s) TestHealthWatchSameStatus(t *testing.T) { diff --git a/test/stream_cleanup_test.go b/test/stream_cleanup_test.go index 0f705bab2507..ca0260af1453 100644 --- a/test/stream_cleanup_test.go +++ b/test/stream_cleanup_test.go @@ -114,7 +114,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { // It will close the connection if there's no active streams. This won't // happen because of the pending stream. But if there's a bug in stream // cleanup that causes stream to be removed too aggressively, the connection - // will be closd and the stream will be broken. + // will be closed and the stream will be broken. gracefulStopDone := make(chan struct{}) go func() { defer close(gracefulStopDone) diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 55d428d88928..dcecc87a4b05 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -150,7 +150,7 @@ func (s) TestClientSideFederation(t *testing.T) { // supported with new xdstp style names for LDS only while using the old style // for other resources. This test in addition also checks that when service name // contains escapable characters, we "fully" encode it for looking up -// VirtualHosts in xDS RouteConfigurtion. +// VirtualHosts in xDS RouteConfiguration. func (s) TestClientSideFederationWithOnlyXDSTPStyleLDS(t *testing.T) { // Start a management server as a sophisticated authority. const authority = "traffic-manager.xds.notgoogleapis.com" diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index 70000e2a8a2c..831f2512a96a 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -595,7 +595,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { // This test tests that an RBAC Config with Action.LOG configured allows // every RPC through. This maps to the line "At this time, if the // RBAC.action is Action.LOG then the policy will be completely ignored, - // as if RBAC was not configurated." from A41 + // as if RBAC was not configured." from A41 { name: "action-log", rbacCfg: &rpb.RBAC{ diff --git a/testdata/README.md b/testdata/README.md index c0cb7187098a..c621e10846ce 100644 --- a/testdata/README.md +++ b/testdata/README.md @@ -1,3 +1,3 @@ This directory contains x509 certificates used in cloud-to-prod interop tests. -For tests within gRPC-Go repo, please use the files in testsdata/x509 +For tests within gRPC-Go repo, please use the files in testdata/x509 directory. diff --git a/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go b/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go index a37b1609fe5c..ded8c13448d8 100644 --- a/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go +++ b/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go @@ -445,7 +445,7 @@ func (s) TestAggregatedClusterSuccess_SwitchBetweenLeafAndAggregate(t *testing.T } // Tests the scenario where an aggregate cluster exceeds the maximum depth, -// which is 16. Verfies that the channel moves to TRANSIENT_FAILURE, and the +// which is 16. Verifies that the channel moves to TRANSIENT_FAILURE, and the // error is propagated to RPC callers. The test then modifies the graph to no // longer exceed maximum depth, but be at the maximum allowed depth, and // verifies that an RPC can be made successfully. @@ -678,7 +678,7 @@ func (s) TestAggregatedClusterSuccess_IgnoreDups(t *testing.T) { // cluster (EDS or Logical DNS), no configuration should be pushed to the child // policy. The channel is expected to move to TRANSIENT_FAILURE and RPCs are // expected to fail with code UNAVAILABLE and an error message specifying that -// the aggregate cluster grpah no leaf clusters. Then the test updates A -> B, +// the aggregate cluster graph has no leaf clusters. Then the test updates A -> B, // where B is a leaf EDS cluster. Verifies that configuration is pushed to the // child policy and that an RPC can be successfully made. func (s) TestAggregatedCluster_NodeChildOfItself(t *testing.T) { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 438d08c4e190..df879722046e 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } -// Closes all registered cluster wathers and removes them from the internal map. +// Closes all registered cluster watchers and removes them from the internal map. // // Only executed in the context of a serializer callback. func (b *cdsBalancer) closeAllWatchers() { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 658afebe7e56..abba4bd81c58 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -372,7 +372,7 @@ func (s) TestSecurityConfigNotFoundInBootstrap(t *testing.T) { mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{}) // Create bootstrap configuration pointing to the above management server, - // and one that does not have ceritificate providers configuration. + // and one that does not have certificate providers configuration. nodeID := uuid.New().String() bootstrapContents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ Servers: []json.RawMessage{[]byte(fmt.Sprintf(`{ @@ -405,7 +405,7 @@ func (s) TestSecurityConfigNotFoundInBootstrap(t *testing.T) { testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) } -// A ceritificate provider builder that returns a nil Provider from the starter +// A certificate provider builder that returns a nil Provider from the starter // func passed to certprovider.NewBuildableConfig(). type errCertProviderBuilder struct{} @@ -433,7 +433,7 @@ func (s) TestCertproviderStoreError(t *testing.T) { mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{}) // Create bootstrap configuration pointing to the above management server - // and one that includes ceritificate providers configuration for + // and one that includes certificate providers configuration for // errCertProviderBuilder. nodeID := uuid.New().String() providerCfg := json.RawMessage(fmt.Sprintf(`{ @@ -481,7 +481,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{}) // Create bootstrap configuration pointing to the above management server - // and one that includes ceritificate providers configuration. + // and one that includes certificate providers configuration. nodeID := uuid.New().String() bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address) diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go index d2a6b6d7f757..f7d91d19597a 100644 --- a/xds/internal/balancer/clusterimpl/tests/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -70,7 +70,7 @@ func Test(t *testing.T) { // TestConfigUpdateWithSameLoadReportingServerConfig tests the scenario where // the clusterimpl LB policy receives a config update with no change in the load // reporting server configuration. The test verifies that the existing load -// repoting stream is not terminated and that a new load reporting stream is not +// reporting stream is not terminated and that a new load reporting stream is not // created. func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { // Create an xDS management server that serves ADS and LRS requests. diff --git a/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go index 6a3dbba83a4b..36106b4ad3a0 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go @@ -91,7 +91,7 @@ func Test_nameGenerator_generate(t *testing.T) { }, input2: [][]xdsresource.Locality{ {{ID: internal.LocalityID{Zone: "L0"}}}, - {{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, sice "0-0" was already picked. + {{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, since "0-0" was already picked. {{ID: internal.LocalityID{Zone: "L2"}}}, }, want: []string{"priority-0-0", "priority-0-2", "priority-0-1"}, diff --git a/xds/internal/balancer/loadstore/load_store_wrapper.go b/xds/internal/balancer/loadstore/load_store_wrapper.go index 8ce958d71ca8..f5605df83276 100644 --- a/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -36,7 +36,7 @@ func NewWrapper() *Wrapper { // update its internal perCluster store so that new stats will be added to the // correct perCluster. // -// Note that this struct is a temporary walkaround before we implement graceful +// Note that this struct is a temporary workaround before we implement graceful // switch for EDS. Any update to the clusterName and serviceName is too early, // the perfect timing is when the picker is updated with the new connection. // This early update could cause picks for the old SubConn being reported to the diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index df6b84fbfb69..c317cb2f12b6 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -148,7 +148,7 @@ func (s) TestRingHash_ReconnectToMoveOutOfTransientFailure(t *testing.T) { t.Fatal("EmptyCall RPC succeeded when the channel is in TRANSIENT_FAILURE") } - // Restart the server listener. The ring_hash LB polcy is expected to + // Restart the server listener. The ring_hash LB policy is expected to // attempt to reconnect on its own and come out of TRANSIENT_FAILURE, even // without an RPC attempt. lis.Restart() @@ -795,7 +795,7 @@ func computeIdealNumberOfRPCs(t *testing.T, p, errorTolerance float64) int { t.Fatal("p must be in (0, 1)") } numRPCs := math.Ceil(p * (1 - p) * 5. * 5. / errorTolerance / errorTolerance) - return int(numRPCs + 1000.) // add 1k as a buffer to avoid flakyness. + return int(numRPCs + 1000.) // add 1k as a buffer to avoid flakiness. } // setRingHashLBPolicyWithHighMinRingSize sets the ring hash policy with a high @@ -1394,10 +1394,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) { }) defer backend.Stop() - nonExistantServerAddr := makeNonExistentBackends(t, 1)[0] + nonExistentServerAddr := makeNonExistentBackends(t, 1)[0] const clusterName = "cluster" - endpoints := endpointResource(t, clusterName, []string{backend.Address, nonExistantServerAddr}) + endpoints := endpointResource(t, clusterName, []string{backend.Address, nonExistentServerAddr}) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: clusterName, @@ -1431,7 +1431,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) { rpcCtx, rpcCancel := context.WithCancel(ctx) go func() { - rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistantServerAddr+"_0")) + rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentServerAddr+"_0")) _, err := client.EmptyCall(rpcCtx, &testpb.Empty{}) if status.Code(err) != codes.Canceled { t.Errorf("Expected RPC to be canceled, got error: %v", err) diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go index 1c3a1985b964..8bca19baebb6 100644 --- a/xds/internal/balancer/ringhash/ring_test.go +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -55,7 +55,7 @@ func (s) TestRingNew(t *testing.T) { r := newRing(testSubConnMap, min, max, nil) totalCount := len(r.items) if totalCount < int(min) || totalCount > int(max) { - t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max) + t.Fatalf("unexpected size %v, want min %v, max %v", totalCount, min, max) } for _, a := range testAddrs { var count int diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 37de3a39b64f..f17977e6d084 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { return config{}, nil } @@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) } @@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if c.chainEngine == nil { return nil, nil } diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index 0822c6f271d8..d2a13d75c591 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -47,7 +47,7 @@ type connWrapper struct { // The specific filter chain picked for handling this connection. filterChain *xdsresource.FilterChain - // A reference fo the listenerWrapper on which this connection was accepted. + // A reference to the listenerWrapper on which this connection was accepted. parent *listenerWrapper // The certificate providers created for this connection. diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 6c2450425877..0e0a4d901316 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -596,7 +596,7 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re // sendDiscoveryRequestLocked sends a discovery request for the specified // resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be beld when +// access the resource cache, it is important that `resourcesMu` be held when // calling this method to ensure that a consistent snapshot of resource names is // being requested. func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index b4eecf1e9e7b..24770897ea29 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -211,7 +211,7 @@ func (s) TestReportLoad(t *testing.T) { } // Cancel the first load reporting call, and ensure that the stream does not - // close (because we have aother call open). + // close (because we have another call open). cancelLRS1() sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 9e0ffcf82a6a..6f156398b9bd 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -64,7 +64,7 @@ const perRPCVerbosityLevel = 9 // Transport provides a resource-type agnostic implementation of the xDS // transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be be meaningful only to the xDS data model layer +// opaque blobs which should be meaningful only to the xDS data model layer // which is implemented by the `xdsresource` package. // // Under the hood, it owns the gRPC connection to a single management server and @@ -86,7 +86,7 @@ type Transport struct { lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchorous events resulting from + // spawned by the transport, and between asynchronous events resulting from // receipt of responses from the management server. adsStreamCh chan adsStream // New ADS streams are pushed here. adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 8ede639abee6..1d649ac55180 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -278,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto @@ -477,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC case len(validationCtx.GetVerifyCertificateHash()) != 0: return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) case validationCtx.GetCrl() != nil: return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) case validationCtx.GetCustomValidatorConfig() != nil: diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 5f2858674921..9c001433aa27 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -355,7 +355,7 @@ func (s) TestSecurityConfigFromCommonTLSContextUsingNewFields_ErrorCases(t *test }, }, }, - wantErr: "unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message", + wantErr: "unsupported require_signed_certificate_timestamp field in CommonTlsContext message", }, { name: "unsupported-field-crl-in-validation-context", From d76d0fa45e82d4b9f42a6648c9b422ddc565aa7a Mon Sep 17 00:00:00 2001 From: Nathan Baulch Date: Wed, 14 Aug 2024 07:12:17 +1000 Subject: [PATCH 2/2] Fix reflecton tests --- reflection/test/serverreflection_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reflection/test/serverreflection_test.go b/reflection/test/serverreflection_test.go index f53ecced0236..29698c604a14 100644 --- a/reflection/test/serverreflection_test.go +++ b/reflection/test/serverreflection_test.go @@ -605,9 +605,9 @@ func testFileContainingSymbol(t *testing.T, stream v1reflectiongrpc.ServerReflec func testFileContainingSymbolError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ - "grpc.testing.SearchService", + "grpc.testing.SearchService_", "grpc.testing.SearchService.SearchE", - "grpc.testing.SearchResponse", + "grpc.testing_.SearchResponse", "gpc.testing.ToBeExtended", } { if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{