diff --git a/Makefile b/Makefile index 2f2f7b3845c..9859dac05cc 100644 --- a/Makefile +++ b/Makefile @@ -228,7 +228,7 @@ goimports: staticcheck: @printf $(COLOR) "Run staticcheck..." - @staticcheck -fail none ./... + @staticcheck ./... errcheck: @printf $(COLOR) "Run errcheck..." diff --git a/client/clientBean.go b/client/clientBean.go index 37576237952..2599acb93a6 100644 --- a/client/clientBean.go +++ b/client/clientBean.go @@ -42,8 +42,6 @@ import ( "go.temporal.io/server/common/cluster" ) -const clientBeanCallbackID = "clientBean" - type ( // Bean is a collection of clients Bean interface { @@ -125,14 +123,10 @@ func (h *clientBeanImpl) registerClientEviction() { continue } h.remoteAdminClientsLock.Lock() - if _, ok := h.remoteAdminClients[clusterName]; ok { - delete(h.remoteAdminClients, clusterName) - } + delete(h.remoteAdminClients, clusterName) h.remoteAdminClientsLock.Unlock() h.remoteFrontendClientsLock.Lock() - if _, ok := h.remoteFrontendClients[clusterName]; ok { - delete(h.remoteFrontendClients, clusterName) - } + delete(h.remoteFrontendClients, clusterName) h.remoteFrontendClientsLock.Unlock() } }) diff --git a/cmd/server/main.go b/cmd/server/main.go index 7c61092abab..047d73a6b10 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -158,6 +158,9 @@ func buildCLI() *cli.App { authorizer, err := authorization.GetAuthorizerFromConfig( &cfg.Global.Authorization, ) + if err != nil { + return cli.Exit(fmt.Sprintf("Unable to instantiate authorizer. Error: %v", err), 1) + } claimMapper, err := authorization.GetClaimMapperFromConfig(&cfg.Global.Authorization, logger) if err != nil { diff --git a/cmd/tools/cassandra/main.go b/cmd/tools/cassandra/main.go index 2478d010c2c..5177a8a5e5e 100644 --- a/cmd/tools/cassandra/main.go +++ b/cmd/tools/cassandra/main.go @@ -31,5 +31,5 @@ import ( ) func main() { - cassandra.RunTool(os.Args) //nolint:errcheck + _ = cassandra.RunTool(os.Args) } diff --git a/cmd/tools/sql/main.go b/cmd/tools/sql/main.go index eb9d7fa41c4..268df541493 100644 --- a/cmd/tools/sql/main.go +++ b/cmd/tools/sql/main.go @@ -34,5 +34,5 @@ import ( ) func main() { - sql.RunTool(os.Args) //nolint:errcheck + _ = sql.RunTool(os.Args) } diff --git a/common/archiver/filestore/queryParser.go b/common/archiver/filestore/queryParser.go index 8b315aad946..bb8a79f4a5b 100644 --- a/common/archiver/filestore/queryParser.go +++ b/common/archiver/filestore/queryParser.go @@ -102,15 +102,15 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: - return errors.New("only comparsion and \"and\" expression is supported") + return errors.New("only comparison and \"and\" expression is supported") } } diff --git a/common/archiver/gcloud/connector/client.go b/common/archiver/gcloud/connector/client.go index 98dbb11f9f9..06ce3bf1b26 100644 --- a/common/archiver/gcloud/connector/client.go +++ b/common/archiver/gcloud/connector/client.go @@ -32,7 +32,6 @@ import ( "errors" "io" "os" - "regexp" "cloud.google.com/go/storage" "google.golang.org/api/iterator" @@ -41,15 +40,10 @@ import ( "go.temporal.io/server/common/config" ) -const ( - bucketNameRegExpRaw = "^gs:\\/\\/[^:\\/\n?]+" -) - var ( // ErrBucketNotFound is non retryable error that is thrown when the bucket doesn't exist ErrBucketNotFound = errors.New("bucket not found") errObjectNotFound = errors.New("object not found") - bucketNameRegExp = regexp.MustCompile(bucketNameRegExpRaw) ) type ( @@ -114,7 +108,6 @@ func (s *storageWrapper) Upload(ctx context.Context, URI archiver.URI, fileName // Exist check if a bucket or an object exist // If fileName is empty, then 'Exist' function will only check if the given bucket exist. func (s *storageWrapper) Exist(ctx context.Context, URI archiver.URI, fileName string) (exists bool, err error) { - err = ErrBucketNotFound bucket := s.client.Bucket(URI.Hostname()) if _, err := bucket.Attrs(ctx); err != nil { return false, err diff --git a/common/archiver/gcloud/connector/clientDelegate.go b/common/archiver/gcloud/connector/clientDelegate.go index 14e032c5f44..6e035a48a77 100644 --- a/common/archiver/gcloud/connector/clientDelegate.go +++ b/common/archiver/gcloud/connector/clientDelegate.go @@ -102,20 +102,8 @@ type ( ObjectIteratorWrapper interface { Next() (*storage.ObjectAttrs, error) } - - objectIteratorDelegate struct { - iterator *storage.ObjectIterator - } ) -func newClientDelegate() (*clientDelegate, error) { - ctx := context.Background() - if credentialsPath := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); credentialsPath != "" { - return newClientDelegateWithCredentials(ctx, credentialsPath) - } - return newDefaultClientDelegate(ctx) -} - func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) { nativeClient, err := storage.NewClient(ctx) return &clientDelegate{nativeClient: nativeClient}, err @@ -169,17 +157,6 @@ func (b *bucketDelegate) Attrs(ctx context.Context) (*storage.BucketAttrs, error return b.bucket.Attrs(ctx) } -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will -// have a non-empty Prefix field, and a zero value for all other fields. These -// represent prefixes. -func (o *objectIteratorDelegate) Next() (*storage.ObjectAttrs, error) { - return o.iterator.Next() -} - // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // diff --git a/common/archiver/gcloud/connector/client_test.go b/common/archiver/gcloud/connector/client_test.go index baf2b7b518d..f2626e85120 100644 --- a/common/archiver/gcloud/connector/client_test.go +++ b/common/archiver/gcloud/connector/client_test.go @@ -89,6 +89,7 @@ func (s *clientSuite) TestUpload() { mockWriter.EXPECT().Close().Return(nil) URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") + s.Require().NoError(err) err = storageWrapper.Upload(ctx, URI, "myfile.history", []byte("{}")) s.Require().NoError(err) } @@ -110,6 +111,7 @@ func (s *clientSuite) TestUploadWriterCloseError() { mockWriter.EXPECT().Close().Return(errors.New("Not Found")) URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") + s.Require().NoError(err) err = storageWrapper.Upload(ctx, URI, "myfile.history", []byte("{}")) s.Require().EqualError(err, "Not Found") } @@ -216,6 +218,7 @@ func (s *clientSuite) TestGet() { mockReader.EXPECT().Close().Return(nil) URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") + s.Require().NoError(err) _, err = storageWrapper.Get(ctx, URI, "myfile.history") s.Require().NoError(err) } @@ -252,6 +255,7 @@ func (s *clientSuite) TestQuery() { var fileNames []string URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") + s.Require().NoError(err) fileNames, err = storageWrapper.Query(ctx, URI, "7478875943689868082123907395549832634615673687049942026838") s.Require().NoError(err) s.Equal(strings.Join(fileNames, ", "), "fileName_01") @@ -288,6 +292,7 @@ func (s *clientSuite) TestQueryWithFilter() { var fileNames []string URI, err := archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") + s.Require().NoError(err) fileNames, _, _, err = storageWrapper.QueryWithFilters(ctx, URI, "closeTimeout_2020-02-27T09:42:28Z", 0, 0, []connector.Precondition{newWorkflowIDPrecondition("4418294404690464320")}) s.Require().NoError(err) diff --git a/common/archiver/gcloud/queryParser.go b/common/archiver/gcloud/queryParser.go index b891007528a..73f4bdff1fb 100644 --- a/common/archiver/gcloud/queryParser.go +++ b/common/archiver/gcloud/queryParser.go @@ -96,7 +96,7 @@ func (p *queryParser) Parse(query string) (*parsedQuery, error) { } if (parsedQuery.closeTime.IsZero() && parsedQuery.startTime.IsZero()) || (!parsedQuery.closeTime.IsZero() && !parsedQuery.startTime.IsZero()) { - return nil, errors.New("Requires a StartTime or CloseTime") + return nil, errors.New("requires a StartTime or CloseTime") } if parsedQuery.searchPrecision == nil { @@ -111,15 +111,15 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: - return errors.New("only comparsion and \"and\" expression is supported") + return errors.New("only comparison and \"and\" expression is supported") } } @@ -233,21 +233,6 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, return nil } -func (p *queryParser) convertCloseTime(timestamp time.Time, op string, parsedQuery *parsedQuery) error { - switch op { - case "=": - if err := p.convertCloseTime(timestamp, ">=", parsedQuery); err != nil { - return err - } - if err := p.convertCloseTime(timestamp, "<=", parsedQuery); err != nil { - return err - } - default: - return fmt.Errorf("operator %s is not supported for close time", op) - } - return nil -} - func convertToTime(timeStr string) (time.Time, error) { timestampStr, err := extractStringValue(timeStr) if err != nil { diff --git a/common/archiver/gcloud/util.go b/common/archiver/gcloud/util.go index f72b58f1fd0..5cd838c8a35 100644 --- a/common/archiver/gcloud/util.go +++ b/common/archiver/gcloud/util.go @@ -50,11 +50,6 @@ func encode(message proto.Message) ([]byte, error) { return encoder.Encode(message) } -func constructHistoryFilename(namespaceID, workflowID, runID string, version int64) string { - combinedHash := constructHistoryFilenamePrefix(namespaceID, workflowID, runID) - return fmt.Sprintf("%s_%v.history", combinedHash, version) -} - func constructHistoryFilenameMultipart(namespaceID, workflowID, runID string, version int64, partNumber int) string { combinedHash := constructHistoryFilenamePrefix(namespaceID, workflowID, runID) return fmt.Sprintf("%s_%v_%v.history", combinedHash, version, partNumber) diff --git a/common/archiver/s3store/historyArchiver.go b/common/archiver/s3store/historyArchiver.go index 72c77736061..d3a9e273396 100644 --- a/common/archiver/s3store/historyArchiver.go +++ b/common/archiver/s3store/historyArchiver.go @@ -75,7 +75,6 @@ type ( s3cli s3iface.S3API // only set in test code historyIterator archiver.HistoryIterator - config *config.S3Archiver } getHistoryToken struct { diff --git a/common/archiver/s3store/historyArchiver_test.go b/common/archiver/s3store/historyArchiver_test.go index 7c86e18acf3..c5520f19263 100644 --- a/common/archiver/s3store/historyArchiver_test.go +++ b/common/archiver/s3store/historyArchiver_test.go @@ -77,7 +77,6 @@ type historyArchiverSuite struct { suite.Suite s3cli *mocks.MockS3API container *archiver.HistoryBootstrapContainer - logger log.Logger testArchivalURI archiver.URI historyBatchesV1 []*archiverspb.HistoryBlob historyBatchesV100 []*archiverspb.HistoryBlob diff --git a/common/archiver/s3store/queryParser.go b/common/archiver/s3store/queryParser.go index 406aed03b33..b7e4529cc64 100644 --- a/common/archiver/s3store/queryParser.go +++ b/common/archiver/s3store/queryParser.go @@ -115,15 +115,15 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: - return errors.New("only comparsion and \"and\" expression is supported") + return errors.New("only comparison and \"and\" expression is supported") } } diff --git a/common/archiver/s3store/visibilityArchiver_test.go b/common/archiver/s3store/visibilityArchiver_test.go index 1dcb1fb7ff2..391de986477 100644 --- a/common/archiver/s3store/visibilityArchiver_test.go +++ b/common/archiver/s3store/visibilityArchiver_test.go @@ -62,7 +62,6 @@ type visibilityArchiverSuite struct { s3cli *mocks.MockS3API container *archiver.VisibilityBootstrapContainer - logger log.Logger visibilityRecords []*archiverspb.VisibilityRecord controller *gomock.Controller diff --git a/common/archiver/util.go b/common/archiver/util.go index 40d20d41860..f816b8cebb2 100644 --- a/common/archiver/util.go +++ b/common/archiver/util.go @@ -33,15 +33,15 @@ import ( ) var ( - errEmptyNamespaceID = errors.New("NamespaceId is empty") - errEmptyNamespace = errors.New("Namespace is empty") - errEmptyWorkflowID = errors.New("WorkflowId is empty") - errEmptyRunID = errors.New("RunId is empty") - errInvalidPageSize = errors.New("PageSize should be greater than 0") - errEmptyWorkflowTypeName = errors.New("WorkflowTypeName is empty") - errEmptyStartTime = errors.New("StartTime is empty") - errEmptyCloseTime = errors.New("CloseTime is empty") - errEmptyQuery = errors.New("Query string is empty") + errEmptyNamespaceID = errors.New("field NamespaceId is empty") + errEmptyNamespace = errors.New("field Namespace is empty") + errEmptyWorkflowID = errors.New("field WorkflowId is empty") + errEmptyRunID = errors.New("field RunId is empty") + errInvalidPageSize = errors.New("field PageSize should be greater than 0") + errEmptyWorkflowTypeName = errors.New("field WorkflowTypeName is empty") + errEmptyStartTime = errors.New("field StartTime is empty") + errEmptyCloseTime = errors.New("field CloseTime is empty") + errEmptyQuery = errors.New("field Query is empty") ) // TagLoggerWithArchiveHistoryRequestAndURI tags logger with fields in the archive history request and the URI diff --git a/common/authorization/default_authorizer_test.go b/common/authorization/default_authorizer_test.go index 3d6bad4aa07..e9682b8ae52 100644 --- a/common/authorization/default_authorizer_test.go +++ b/common/authorization/default_authorizer_test.go @@ -25,6 +25,7 @@ package authorization import ( + "context" "reflect" "testing" @@ -102,62 +103,62 @@ func (s *defaultAuthorizerSuite) TearDownTest() { } func (s *defaultAuthorizerSuite) TestSystemAdminAuthZ() { - result, err := s.authorizer.Authorize(nil, &claimsSystemAdmin, &targetFooBar) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemAdmin, &targetFooBar) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemWriterAuthZ() { - result, err := s.authorizer.Authorize(nil, &claimsSystemWriter, &targetFooBar) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemWriter, &targetFooBar) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemReaderAuthZ() { - result, err := s.authorizer.Authorize(nil, &claimsSystemReader, &targetFooBar) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemReader, &targetFooBar) s.NoError(err) s.Equal(DecisionDeny, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemReaderBarUndefinedAuthZ() { - result, err := s.authorizer.Authorize(nil, &claimsSystemReaderNamespaceUndefined, &targetFooBar) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemReaderNamespaceUndefined, &targetFooBar) s.NoError(err) s.Equal(DecisionDeny, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemUndefinedNamespaceReaderAuthZ() { - result, err := s.authorizer.Authorize(nil, &claimsSystemUndefinedNamespaceReader, &targetFooBar) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemUndefinedNamespaceReader, &targetFooBar) s.NoError(err) s.Equal(DecisionDeny, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemUndefinedNamespaceCaseMismatch() { - result, err := s.authorizer.Authorize(nil, &claimsSystemUndefinedNamespaceReader, &targetFooBAR) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemUndefinedNamespaceReader, &targetFooBAR) s.NoError(err) s.Equal(DecisionDeny, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemUndefinedNamespaceReaderListNamespaces() { - result, err := s.authorizer.Authorize(nil, &claimsSystemUndefinedNamespaceReader, &targetListNamespaces) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemUndefinedNamespaceReader, &targetListNamespaces) s.NoError(err) s.Equal(DecisionDeny, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemUndefinedNamespaceReaderDescribeNamespace() { - result, err := s.authorizer.Authorize(nil, &claimsSystemUndefinedNamespaceReader, &targetDescribeNamespace) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemUndefinedNamespaceReader, &targetDescribeNamespace) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemWriterDescribeNamespace() { - result, err := s.authorizer.Authorize(nil, &claimsSystemWriter, &targetDescribeNamespace) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemWriter, &targetDescribeNamespace) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemWriterListNamespaces() { - result, err := s.authorizer.Authorize(nil, &claimsSystemWriter, &targetListNamespaces) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemWriter, &targetListNamespaces) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemAdminDescribeNamespace() { - result, err := s.authorizer.Authorize(nil, &claimsSystemAdmin, &targetDescribeNamespace) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemAdmin, &targetDescribeNamespace) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } func (s *defaultAuthorizerSuite) TestSystemAdminListNamespaces() { - result, err := s.authorizer.Authorize(nil, &claimsSystemAdmin, &targetListNamespaces) + result, err := s.authorizer.Authorize(context.TODO(), &claimsSystemAdmin, &targetListNamespaces) s.NoError(err) s.Equal(DecisionAllow, result.Decision) } diff --git a/common/authorization/default_jwt_claim_mapper_test.go b/common/authorization/default_jwt_claim_mapper_test.go index 569597ac379..74fda73b1e5 100644 --- a/common/authorization/default_jwt_claim_mapper_test.go +++ b/common/authorization/default_jwt_claim_mapper_test.go @@ -306,10 +306,6 @@ func (tg *tokenGenerator) generateRSAToken(subject string, permissions []string, return tg.generateToken(RSA, subject, permissions, options) } -func (tg *tokenGenerator) generateECDSAToken(subject string, permissions []string, options errorTestOptions) (string, error) { - return tg.generateToken(ECDSA, subject, permissions, options) -} - func (tg *tokenGenerator) generateToken(alg keyAlgorithm, subject string, permissions []string, options errorTestOptions) (string, error) { claims := CustomClaims{ permissions, diff --git a/common/authorization/default_token_key_provider.go b/common/authorization/default_token_key_provider.go index 68ae095822c..fddd5c5ed91 100644 --- a/common/authorization/default_token_key_provider.go +++ b/common/authorization/default_token_key_provider.go @@ -119,7 +119,7 @@ func (a *defaultTokenKeyProvider) timerCallback() { for { select { case <-a.stop: - break + return case <-a.ticker.C: } if a.config.HasSourceURIsConfigured() { diff --git a/common/backoff/retry_test.go b/common/backoff/retry_test.go index 3f334aad4e7..6be75bd83f7 100644 --- a/common/backoff/retry_test.go +++ b/common/backoff/retry_test.go @@ -204,7 +204,7 @@ func (s *RetrySuite) TestRetryContextTimeout() { start := time.Now() err := RetryContext(ctx, func(ctx context.Context) error { return &someError{} }, NewExponentialRetryPolicy(1*time.Second), retryEverything) - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) s.ErrorIs(err, context.DeadlineExceeded) s.GreaterOrEqual(elapsed, timeout, "Call to retry should take at least as long as the context timeout") diff --git a/common/cache/lru_test.go b/common/cache/lru_test.go index 4433bba23c5..053a9b4e4da 100644 --- a/common/cache/lru_test.go +++ b/common/cache/lru_test.go @@ -141,11 +141,9 @@ func TestLRUCacheConcurrentAccess(t *testing.T) { <-start for j := 0; j < 50; j++ { - var result []Entry it := cache.Iterator() for it.HasNext() { - entry := it.Next() - result = append(result, entry) //nolint:staticcheck + _ = it.Next() } it.Close() } diff --git a/common/cache/simple_test.go b/common/cache/simple_test.go index 09b204bdcc6..d8fc61fe8ef 100644 --- a/common/cache/simple_test.go +++ b/common/cache/simple_test.go @@ -117,11 +117,9 @@ func TestSimpleCacheConcurrentAccess(t *testing.T) { <-start for j := 0; j < 50; j++ { - var result []Entry it := cache.Iterator() for it.HasNext() { - entry := it.Next() - result = append(result, entry) //nolint:staticcheck + _ = it.Next() } it.Close() } diff --git a/common/collection/concurrent_tx_map_test.go b/common/collection/concurrent_tx_map_test.go index ba75f80760e..d99ba44e648 100644 --- a/common/collection/concurrent_tx_map_test.go +++ b/common/collection/concurrent_tx_map_test.go @@ -142,8 +142,7 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { func (s *ConcurrentTxMapSuite) TestRemoveIf() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) key := uuid.New() - var value intType - value = intType(1) + value := intType(1) testMap.Put(key, &value) removed := testMap.RemoveIf(key, func(key interface{}, value interface{}) bool { diff --git a/common/config/archival.go b/common/config/archival.go index e5e887fdd7f..e980701ecaa 100644 --- a/common/config/archival.go +++ b/common/config/archival.go @@ -40,11 +40,11 @@ const ( // Validate validates the archival config func (a *Archival) Validate(namespaceDefaults *ArchivalNamespaceDefaults) error { if !isArchivalConfigValid(a.History.State, a.History.EnableRead, namespaceDefaults.History.State, namespaceDefaults.History.URI, a.History.Provider != nil) { - return errors.New("Invalid history archival config") + return errors.New("invalid history archival config") } if !isArchivalConfigValid(a.Visibility.State, a.Visibility.EnableRead, namespaceDefaults.Visibility.State, namespaceDefaults.Visibility.URI, a.Visibility.Provider != nil) { - return errors.New("Invalid visibility archival config") + return errors.New("invalid visibility archival config") } return nil diff --git a/common/dynamicconfig/cmp.go b/common/dynamicconfig/cmp.go deleted file mode 100644 index 69559715c40..00000000000 --- a/common/dynamicconfig/cmp.go +++ /dev/null @@ -1,60 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package dynamicconfig - -import ( - "math" - "time" -) - -func float64CompareEquals(a, b interface{}) bool { - aVal := a.(float64) - bVal := b.(float64) - return (aVal == bVal) || math.Nextafter(aVal, bVal) == aVal -} - -func intCompareEquals(a, b interface{}) bool { - aVal := a.(int) - bVal := b.(int) - return aVal == bVal -} - -func boolCompareEquals(a, b interface{}) bool { - aVal := a.(bool) - bVal := b.(bool) - return aVal == bVal -} - -func stringCompareEquals(a, b interface{}) bool { - aVal := a.(string) - bVal := b.(string) - return aVal == bVal -} - -func durationCompareEquals(a, b interface{}) bool { - aVal := a.(time.Duration) - bVal := b.(time.Duration) - return aVal.Nanoseconds() == bVal.Nanoseconds() -} diff --git a/common/dynamicconfig/cmp_test.go b/common/dynamicconfig/cmp_test.go deleted file mode 100644 index 73046870cb8..00000000000 --- a/common/dynamicconfig/cmp_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package dynamicconfig - -import ( - "math" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/suite" -) - -type compareEqualsTestSuite struct { - suite.Suite -} - -func TestCompareEqualsSuite(t *testing.T) { - suite.Run(t, new(compareEqualsTestSuite)) -} - -func (s *compareEqualsTestSuite) TestIntCompareEquals() { - s.True(intCompareEquals(0, 0)) - s.True(intCompareEquals(math.MaxInt32, math.MaxInt32)) - s.True(intCompareEquals(math.MinInt32, math.MinInt32)) - s.False(intCompareEquals(0, math.MaxInt32)) - s.False(intCompareEquals(math.MaxInt32, math.MinInt32)) -} - -func (s *compareEqualsTestSuite) TestFloat64CompareEquals() { - s.True(float64CompareEquals(0.0, 0.0)) - s.True(float64CompareEquals(0.123456, 0.123456)) - s.True(float64CompareEquals(12345.123456, 12345.123456)) - s.False(float64CompareEquals(0.0, 0.1)) - s.False(float64CompareEquals(0.123456, 0.1234561)) - s.False(float64CompareEquals(12345.123456, 12345.1234567)) -} - -func (s *compareEqualsTestSuite) TestDurationCompareEquals() { - s.True(durationCompareEquals(time.Second, time.Second)) - s.True(durationCompareEquals(time.Microsecond, time.Microsecond)) - s.True(durationCompareEquals(time.Millisecond, time.Millisecond)) - s.True(durationCompareEquals(time.Nanosecond, time.Nanosecond)) - s.True(durationCompareEquals(3600*time.Second, 3600*time.Second)) - s.False(durationCompareEquals(time.Nanosecond, time.Microsecond)) - s.False(durationCompareEquals(time.Microsecond+1, time.Microsecond)) -} - -func (s *compareEqualsTestSuite) TestMapCompareEquals() { - buildMap := func() map[int]int { - m := make(map[int]int) - for i := 0; i < 10; i++ { - m[rand.Int()] = rand.Int() - } - return m - } - - cloneMap := func(src map[int]int) map[int]int { - dst := make(map[int]int) - for k, v := range src { - dst[k] = v - } - return dst - } - - for i := 0; i < 10; i++ { - m := buildMap() - s.True(reflect.DeepEqual(m, cloneMap(m))) - } -} diff --git a/common/dynamicconfig/noop_client.go b/common/dynamicconfig/noop_client.go index 49baa519751..1dd3b05a774 100644 --- a/common/dynamicconfig/noop_client.go +++ b/common/dynamicconfig/noop_client.go @@ -47,36 +47,36 @@ func NewNoopCollection() *Collection { } func (mc *noopClient) GetValue(name Key, defaultValue interface{}) (interface{}, error) { - return nil, errors.New("Noop key search") + return nil, errors.New("noop key search") } func (mc *noopClient) GetValueWithFilters(name Key, filters []map[Filter]interface{}, defaultValue interface{}) (interface{}, error) { - return nil, errors.New("Noop key search") + return nil, errors.New("noop key search") } func (mc *noopClient) GetIntValue(name Key, filters []map[Filter]interface{}, defaultValue int) (int, error) { - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } func (mc *noopClient) GetFloatValue(name Key, filters []map[Filter]interface{}, defaultValue float64) (float64, error) { - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } func (mc *noopClient) GetBoolValue(name Key, filters []map[Filter]interface{}, defaultValue bool) (bool, error) { if len(filters) > 0 && filters[0][Namespace] == "TestRawHistoryNamespace" { - return true, errors.New("Noop key search") + return true, errors.New("noop key search") } - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } func (mc *noopClient) GetStringValue(name Key, filters []map[Filter]interface{}, defaultValue string) (string, error) { - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } func (mc *noopClient) GetMapValue(name Key, filters []map[Filter]interface{}, defaultValue map[string]interface{}) (map[string]interface{}, error) { - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } func (mc *noopClient) GetDurationValue(name Key, filters []map[Filter]interface{}, defaultValue time.Duration) (time.Duration, error) { - return defaultValue, errors.New("Noop key search") + return defaultValue, errors.New("noop key search") } diff --git a/common/future/future_test.go b/common/future/future_test.go index a78dcddf2c0..d0abdea8b6d 100644 --- a/common/future/future_test.go +++ b/common/future/future_test.go @@ -57,7 +57,7 @@ func BenchmarkFutureAvailable(b *testing.B) { for n := 0; n < b.N; n++ { future := futures[n] future.Set(nil, nil) - _, _ = future.Get(ctx) //nolint:errcheck + _, _ = future.Get(ctx) } } @@ -68,7 +68,7 @@ func BenchmarkFutureGet(b *testing.B) { future.Set(nil, nil) ctx := context.Background() for n := 0; n < b.N; n++ { - _, _ = future.Get(ctx) //nolint:errcheck + _, _ = future.Get(ctx) } } @@ -78,7 +78,7 @@ func BenchmarkFutureReady(b *testing.B) { future := NewFuture[interface{}]() future.Set(nil, nil) for n := 0; n < b.N; n++ { - _ = future.Ready() //nolint:errcheck + _ = future.Ready() } } diff --git a/common/locks/condition_variable_test.go b/common/locks/condition_variable_test.go index ea205c1b448..61e7c53cc16 100644 --- a/common/locks/condition_variable_test.go +++ b/common/locks/condition_variable_test.go @@ -87,6 +87,7 @@ func (s *conditionVariableSuite) TestSignal() { signalWaitGroup.Wait() s.lock.Lock() + func() {}() s.lock.Unlock() s.cv.Signal() waitGroup.Wait() @@ -113,6 +114,7 @@ func (s *conditionVariableSuite) TestInterrupt() { interruptWaitGroup.Wait() s.lock.Lock() + func() {}() s.lock.Unlock() interruptChan <- struct{}{} waitGroup.Wait() @@ -142,6 +144,7 @@ func (s *conditionVariableSuite) TestBroadcast() { broadcastWaitGroup.Wait() s.lock.Lock() + func() {}() s.lock.Unlock() s.cv.Broadcast() waitGroup.Wait() diff --git a/common/locks/id_mutex_test.go b/common/locks/id_mutex_test.go index 6fc95862a60..3c248ffc84f 100644 --- a/common/locks/id_mutex_test.go +++ b/common/locks/id_mutex_test.go @@ -51,7 +51,8 @@ func BenchmarkGolangMutex(b *testing.B) { lock := &sync.Mutex{} for i := 0; i < b.N; i++ { lock.Lock() - lock.Unlock() //nolint:staticcheck + func() {}() + lock.Unlock() } } diff --git a/common/locks/priority_mutex_impl.go b/common/locks/priority_mutex_impl.go index b6ef15f0f1f..167f6c18995 100644 --- a/common/locks/priority_mutex_impl.go +++ b/common/locks/priority_mutex_impl.go @@ -173,7 +173,5 @@ func (c *PriorityMutexImpl) notify() { c.highCV.Signal() } else if c.lowWait > 0 { c.lowCV.Signal() - } else { - // noop } } diff --git a/common/locks/priority_mutex_test.go b/common/locks/priority_mutex_test.go index eb09f9049d2..b325065ef82 100644 --- a/common/locks/priority_mutex_test.go +++ b/common/locks/priority_mutex_test.go @@ -49,8 +49,8 @@ func BenchmarkPriorityMutex_High(b *testing.B) { lock := NewPriorityMutex() ctx := context.Background() for n := 0; n < b.N; n++ { - _ = lock.LockHigh(ctx) //nolint:errcheck - lock.UnlockHigh() //nolint:staticcheck + _ = lock.LockHigh(ctx) + lock.UnlockHigh() } } @@ -60,8 +60,8 @@ func BenchmarkPriorityMutex_Low(b *testing.B) { lock := NewPriorityMutex() ctx := context.Background() for n := 0; n < b.N; n++ { - _ = lock.LockLow(ctx) //nolint:errcheck - lock.UnlockLow() //nolint:staticcheck + _ = lock.LockLow(ctx) + lock.UnlockLow() } } diff --git a/common/membership/interfaces.go b/common/membership/interfaces.go index bd3c7f8fc0e..f77314b535d 100644 --- a/common/membership/interfaces.go +++ b/common/membership/interfaces.go @@ -35,16 +35,16 @@ import ( ) // ErrUnknownService is thrown for a service that is not tracked by this instance -var ErrUnknownService = errors.New("Service not tracked by Monitor") +var ErrUnknownService = errors.New("service not tracked by Monitor") // ErrInsufficientHosts is thrown when there are not enough hosts to serve the request var ErrInsufficientHosts = serviceerror.NewUnavailable("Not enough hosts to serve the request") // ErrListenerAlreadyExist is thrown on a duplicate AddListener call from the same listener -var ErrListenerAlreadyExist = errors.New("Listener already exist for the service") +var ErrListenerAlreadyExist = errors.New("listener already exist for the service") // ErrIncorrectAddressFormat is thrown on incorrect address format -var ErrIncorrectAddressFormat = errors.New("Incorrect address format") +var ErrIncorrectAddressFormat = errors.New("incorrect address format") type ( diff --git a/common/metrics/baggage_bench_test.go b/common/metrics/baggage_bench_test.go index de2ff67664f..507653eef44 100644 --- a/common/metrics/baggage_bench_test.go +++ b/common/metrics/baggage_bench_test.go @@ -98,7 +98,7 @@ func (b *baggageMutexMap) Add(k string, v int64) { b.Lock() defer b.Unlock() - value, _ := b.data[k] + value := b.data[k] value += v b.data[k] = value } @@ -110,9 +110,9 @@ func (b *baggageMutexMap) Get(k string) int64 { } // roughly 1.7s/7.5s for mutex/sync -//baggageCount := 1000 -//threadCount := 20 -//updatesPerThread := 1000 +// baggageCount := 1000 +// threadCount := 20 +// updatesPerThread := 1000 func testMapBaggage(createTestObj func() testBaggage) { baggageCount := 10 threadCount := 10 diff --git a/common/metrics/common.go b/common/metrics/common.go index 1881835c586..89be905b008 100644 --- a/common/metrics/common.go +++ b/common/metrics/common.go @@ -26,36 +26,12 @@ package metrics import ( "fmt" - "time" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/primitives" ) -const ( - distributionToTimerRatio = int(time.Millisecond / time.Nanosecond) -) - -func mergeMapToRight(src map[string]string, dest map[string]string) { - for k, v := range src { - dest[k] = v - } -} - -func getMetricDefs(serviceIdx ServiceIdx) map[int]metricDefinition { - defs := make(map[int]metricDefinition) - for idx, def := range MetricDefs[Common] { - defs[idx] = def - } - - for idx, def := range MetricDefs[serviceIdx] { - defs[idx] = def - } - - return defs -} - // GetMetricsServiceIdx returns service id corresponding to serviceName func GetMetricsServiceIdx(serviceName string, logger log.Logger) ServiceIdx { switch serviceName { diff --git a/common/metrics/config.go b/common/metrics/config.go index b3722243fbc..80b83b9fb8a 100644 --- a/common/metrics/config.go +++ b/common/metrics/config.go @@ -186,8 +186,6 @@ var ( ReplacementCharacter: tally.DefaultReplacementCharacter, } - defaultQuantiles = []float64{50, 75, 90, 95, 99} - defaultPerUnitHistogramBoundaries = map[string][]float64{ Dimensionless: { 1, @@ -318,7 +316,12 @@ func newStatsdScope(logger log.Logger, c *Config) tally.Scope { if len(config.HostPort) == 0 { return tally.NoopScope } - statter, err := statsd.NewBufferedClient(config.HostPort, config.Prefix, config.FlushInterval, config.FlushBytes) + statter, err := statsd.NewClientWithConfig(&statsd.ClientConfig{ + Address: config.HostPort, + Prefix: config.Prefix, + FlushInterval: config.FlushInterval, + FlushBytes: config.FlushBytes, + }) if err != nil { logger.Fatal("error creating statsd client", tag.Error(err)) } diff --git a/common/metrics/config_test.go b/common/metrics/config_test.go index f29200950c4..62338b05338 100644 --- a/common/metrics/config_test.go +++ b/common/metrics/config_test.go @@ -36,32 +36,6 @@ import ( "go.temporal.io/server/common/log" ) -type nullStatsReporter struct{} - -func (r nullStatsReporter) Capabilities() tally.Capabilities { - panic("implement me") -} - -func (r nullStatsReporter) Flush() { - panic("implement me") -} - -func (r nullStatsReporter) AllocateCounter(name string, tags map[string]string) tally.CachedCount { - panic("implement me") -} - -func (r nullStatsReporter) AllocateGauge(name string, tags map[string]string) tally.CachedGauge { - panic("implement me") -} - -func (r nullStatsReporter) AllocateTimer(name string, tags map[string]string) tally.CachedTimer { - panic("implement me") -} - -func (r nullStatsReporter) AllocateHistogram(name string, tags map[string]string, buckets tally.Buckets) tally.CachedHistogram { - panic("implement me") -} - type MetricsSuite struct { *require.Assertions suite.Suite diff --git a/common/metrics/defs.go b/common/metrics/defs.go index 1c54e4bc01d..71d0aae0fca 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -24,10 +24,6 @@ package metrics -import ( - "github.com/uber-go/tally/v4" -) - // types used/defined by the package type ( // MetricName is the name of the metric @@ -40,11 +36,9 @@ type ( // metricDefinition contains the definition for a metric metricDefinition struct { - // nolint - metricType MetricType // metric type - metricName MetricName // metric name - metricRollupName MetricName // optional. if non-empty, this name must be used for rolled-up version of this metric - buckets tally.Buckets // buckets if we are emitting histograms + metricType MetricType // metric type + metricName MetricName // metric name + metricRollupName MetricName // optional. if non-empty, this name must be used for rolled-up version of this metric unit MetricUnit } diff --git a/common/metrics/grpc.go b/common/metrics/grpc.go index a3eb4b9b49f..104731c2c5d 100644 --- a/common/metrics/grpc.go +++ b/common/metrics/grpc.go @@ -199,6 +199,6 @@ func ContextCounterGet(ctx context.Context, name string) (int64, bool) { return 0, false } - result, _ := metricsCtx.CountersInt[name] + result := metricsCtx.CountersInt[name] return result, true } diff --git a/common/metrics/tags.go b/common/metrics/tags.go index d3d4e01bd0b..e838888c9f3 100644 --- a/common/metrics/tags.go +++ b/common/metrics/tags.go @@ -29,7 +29,6 @@ import ( "strconv" "strings" - "go.temporal.io/api/enums/v1" enumspb "go.temporal.io/api/enums/v1" ) @@ -74,13 +73,6 @@ type ( } ) -func newExcludedTag(key string) Tag { - return &tagImpl{ - key: key, - value: tagExcludedValue, - } -} - func (v *tagImpl) Key() string { return v.key } @@ -137,7 +129,7 @@ func TaskQueueTag(value string) Tag { return &tagImpl{key: taskQueue, value: sanitizer.Value(value)} } -func TaskQueueTypeTag(tqType enums.TaskQueueType) Tag { +func TaskQueueTypeTag(tqType enumspb.TaskQueueType) Tag { return &tagImpl{key: TaskTypeTagName, value: tqType.String()} } diff --git a/common/namespace/errors.go b/common/namespace/errors.go index 3cadd564b41..7058cff56f5 100644 --- a/common/namespace/errors.go +++ b/common/namespace/errors.go @@ -31,7 +31,6 @@ import ( var ( // err indicating that this cluster is not the master, so cannot do namespace registration or update errNotMasterCluster = serviceerror.NewInvalidArgument("Cluster is not master cluster, cannot do namespace registration or namespace update.") - errCannotRemoveClustersFromNamespace = serviceerror.NewInvalidArgument("Cannot remove existing replicated clusters from a namespace.") errActiveClusterNotInClusters = serviceerror.NewInvalidArgument("Active cluster is not contained in all clusters.") errCannotDoNamespaceFailoverAndUpdate = serviceerror.NewInvalidArgument("Cannot set active cluster to current cluster when other parameters are set.") errInvalidRetentionPeriod = serviceerror.NewInvalidArgument("A valid retention period is not set on request.") diff --git a/common/persistence/cassandra/execution_store.go b/common/persistence/cassandra/execution_store.go index 2a4b9196a52..dfba4d5dee1 100644 --- a/common/persistence/cassandra/execution_store.go +++ b/common/persistence/cassandra/execution_store.go @@ -40,10 +40,7 @@ import ( // R represents row type in executions table, valid values are: // R = {Shard = 1, Execution = 2, Transfer = 3, Timer = 4, Replication = 5} const ( - // Special Namespaces related constants - emptyNamespaceID = "10000000-0000-f000-f000-000000000000" // Special Run IDs - emptyRunID = "30000000-0000-f000-f000-000000000000" permanentRunID = "30000000-0000-f000-f000-000000000001" // Row Constants for Shard Row rowTypeShardNamespaceID = "10000000-1000-f000-f000-000000000000" @@ -75,7 +72,6 @@ const ( // Special TaskId constants rowTypeExecutionTaskID = int64(-10) rowTypeShardTaskID = int64(-11) - emptyInitiatedID = int64(-7) ) const ( diff --git a/common/persistence/cassandra/history_store.go b/common/persistence/cassandra/history_store.go index 6c72e705fc8..d7af757762f 100644 --- a/common/persistence/cassandra/history_store.go +++ b/common/persistence/cassandra/history_store.go @@ -146,7 +146,7 @@ func (h *HistoryStore) DeleteHistoryNodes( if nodeID < p.GetBeginNodeID(branchInfo) { return &p.InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot delete from ancestors' nodes"), + Msg: "cannot delete from ancestors' nodes", } } diff --git a/common/persistence/cassandra/matching_task_store.go b/common/persistence/cassandra/matching_task_store.go index 03af7395cf9..ad3a9f7e48d 100644 --- a/common/persistence/cassandra/matching_task_store.go +++ b/common/persistence/cassandra/matching_task_store.go @@ -283,7 +283,7 @@ func (d *MatchingTaskStore) ListTaskQueue( _ context.Context, _ *p.ListTaskQueueRequest, ) (*p.InternalListTaskQueueResponse, error) { - return nil, serviceerror.NewUnavailable(fmt.Sprintf("unsupported operation")) + return nil, serviceerror.NewUnavailable("unsupported operation") } func (d *MatchingTaskStore) DeleteTaskQueue( diff --git a/common/persistence/cassandra/metadata_store.go b/common/persistence/cassandra/metadata_store.go index 9a2f3d58e63..6e26d0816ff 100644 --- a/common/persistence/cassandra/metadata_store.go +++ b/common/persistence/cassandra/metadata_store.go @@ -177,7 +177,7 @@ func (m *MetadataStore) CreateNamespaceInV2Table( return nil, serviceerror.NewNamespaceAlreadyExists(msg) } - return nil, serviceerror.NewNamespaceAlreadyExists(fmt.Sprintf("CreateNamespace operation failed because of conditional failure.")) + return nil, serviceerror.NewNamespaceAlreadyExists("CreateNamespace operation failed because of conditional failure.") } return &p.CreateNamespaceResponse{ID: request.ID}, nil @@ -206,7 +206,7 @@ func (m *MetadataStore) UpdateNamespace( defer func() { _ = iter.Close() }() if !applied { - return serviceerror.NewUnavailable(fmt.Sprintf("UpdateNamespace operation failed because of conditional failure.")) + return serviceerror.NewUnavailable("UpdateNamespace operation failed because of conditional failure.") } return nil @@ -259,7 +259,7 @@ func (m *MetadataStore) RenameNamespace( defer func() { _ = iter.Close() }() if !applied { - return serviceerror.NewUnavailable(fmt.Sprintf("RenameNamespace operation failed because of conditional failure.")) + return serviceerror.NewUnavailable("RenameNamespace operation failed because of conditional failure.") } return nil diff --git a/common/persistence/cassandra/mutable_state_store.go b/common/persistence/cassandra/mutable_state_store.go index e17b2d0cc79..43a02103ca0 100644 --- a/common/persistence/cassandra/mutable_state_store.go +++ b/common/persistence/cassandra/mutable_state_store.go @@ -604,7 +604,7 @@ func (d *MutableStateStore) UpdateWorkflowExecution( newRunID := newWorkflow.RunID if namespaceID != newNamespaceID { - return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another namespace")) + return serviceerror.NewInternal("UpdateWorkflowExecution: cannot continue as new to another namespace") } batch.Query(templateUpdateCurrentWorkflowExecutionQuery, diff --git a/common/persistence/cassandra/queue_store.go b/common/persistence/cassandra/queue_store.go index 59681fa8bee..73103481744 100644 --- a/common/persistence/cassandra/queue_store.go +++ b/common/persistence/cassandra/queue_store.go @@ -340,6 +340,9 @@ func (q *QueueStore) updateAckLevel( // TODO: remove this once cluster_ack_level is removed from DB metadataStruct, err := serialization.QueueMetadataFromBlob(metadata.Blob.Data, metadata.Blob.EncodingType.String()) + if err != nil { + return gocql.ConvertError("updateAckLevel", err) + } query := q.session.Query(templateUpdateQueueMetadataQuery, metadataStruct.ClusterAckLevels, @@ -351,7 +354,7 @@ func (q *QueueStore) updateAckLevel( ).WithContext(ctx) applied, err := query.MapScanCAS(make(map[string]interface{})) if err != nil { - gocql.ConvertError("updateAckLevel", err) + return gocql.ConvertError("updateAckLevel", err) } if !applied { return &persistence.ConditionFailedError{Msg: "UpdateAckLevel operation encountered concurrent write."} diff --git a/common/persistence/data_blob.go b/common/persistence/data_blob.go index b8a98fc26e0..3ed1aa50852 100644 --- a/common/persistence/data_blob.go +++ b/common/persistence/data_blob.go @@ -40,7 +40,7 @@ func NewDataBlob(data []byte, encodingTypeStr string) *commonpb.DataBlob { encodingType, ok := enumspb.EncodingType_value[encodingTypeStr] if !ok || (enumspb.EncodingType(encodingType) != enumspb.ENCODING_TYPE_PROTO3 && enumspb.EncodingType(encodingType) != enumspb.ENCODING_TYPE_JSON) { - panic(fmt.Sprintf("Invalid encoding: \"%v\"", encodingTypeStr)) + panic(fmt.Sprintf("Invalid encoding: %v", encodingTypeStr)) } return &commonpb.DataBlob{ diff --git a/common/persistence/history_manager.go b/common/persistence/history_manager.go index 0199cd3a189..b4dcbbccdf0 100644 --- a/common/persistence/history_manager.go +++ b/common/persistence/history_manager.go @@ -59,7 +59,7 @@ func (m *executionManagerImpl) ForkHistoryBranch( if request.ForkNodeID <= 1 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("ForkNodeID must be > 1"), + Msg: "ForkNodeID must be > 1", } } @@ -350,7 +350,7 @@ func (m *executionManagerImpl) serializeAppendHistoryNodesRequest( if len(request.Events) == 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("events to be appended cannot be empty"), + Msg: "events to be appended cannot be empty", } } sortAncestors(branch.Ancestors) @@ -361,18 +361,18 @@ func (m *executionManagerImpl) serializeAppendHistoryNodesRequest( if nodeID <= 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("eventID cannot be less than 1"), + Msg: "eventID cannot be less than 1", } } for _, e := range request.Events { if e.Version != version { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("event version must be the same inside a batch"), + Msg: "event version must be the same inside a batch", } } if e.EventId != lastID+1 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("event ID must be continous"), + Msg: "event ID must be continous", } } lastID++ @@ -419,7 +419,7 @@ func (m *executionManagerImpl) serializeAppendHistoryNodesRequest( if nodeID < GetBeginNodeID(branch) { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot append to ancestors' nodes"), + Msg: "cannot append to ancestors' nodes", } } @@ -436,7 +436,7 @@ func (m *executionManagerImpl) serializeAppendRawHistoryNodesRequest( if len(request.History.Data) == 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("events to be appended cannot be empty"), + Msg: "events to be appended cannot be empty", } } sortAncestors(branch.Ancestors) @@ -444,7 +444,7 @@ func (m *executionManagerImpl) serializeAppendRawHistoryNodesRequest( nodeID := request.NodeID if nodeID <= 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("eventID cannot be less than 1"), + Msg: "eventID cannot be less than 1", } } // nodeID will be the first eventID @@ -484,7 +484,7 @@ func (m *executionManagerImpl) serializeAppendRawHistoryNodesRequest( if nodeID < GetBeginNodeID(branch) { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot append to ancestors' nodes"), + Msg: "cannot append to ancestors' nodes", } } @@ -934,7 +934,7 @@ func (m *executionManagerImpl) readHistoryBranch( } if len(events) == 0 { m.logger.Error("Empty events in a batch") - return nil, nil, nil, nil, dataSize, serviceerror.NewDataLoss(fmt.Sprintf("corrupted history event batch, empty events")) + return nil, nil, nil, nil, dataSize, serviceerror.NewDataLoss("corrupted history event batch, empty events") } firstEvent := events[0] // first @@ -992,7 +992,7 @@ func (m *executionManagerImpl) readHistoryBranchReverse( } if len(events) == 0 { m.logger.Error("Empty events in a batch") - return nil, nil, nil, dataSize, serviceerror.NewDataLoss(fmt.Sprintf("corrupted history event batch, empty events")) + return nil, nil, nil, dataSize, serviceerror.NewDataLoss("corrupted history event batch, empty events") } firstEvent := events[0] // first @@ -1059,9 +1059,9 @@ func (m *executionManagerImpl) filterHistoryNodes( switch { case node.NodeID < lastNodeID: - return nil, serviceerror.NewUnavailable(fmt.Sprintf("corrupted data, nodeID cannot decrease")) + return nil, serviceerror.NewUnavailable("corrupted data, nodeID cannot decrease") case node.NodeID == lastNodeID: - return nil, serviceerror.NewUnavailable(fmt.Sprintf("corrupted data, same nodeID must have smaller txnID")) + return nil, serviceerror.NewUnavailable("corrupted data, same nodeID must have smaller txnID") default: // row.NodeID > lastNodeID: // NOTE: when row.nodeID > lastNodeID, we expect the one with largest txnID comes first lastTransactionID = node.TransactionID @@ -1088,7 +1088,7 @@ func (m *executionManagerImpl) filterHistoryNodesReverse( switch { case node.NodeID > lastNodeID: - return nil, serviceerror.NewUnavailable(fmt.Sprintf("corrupted data, nodeID cannot decrease")) + return nil, serviceerror.NewUnavailable("corrupted data, nodeID cannot decrease") default: lastTransactionID = node.PrevTransactionID lastNodeID = node.NodeID diff --git a/common/persistence/namespaceReplicationQueue.go b/common/persistence/namespaceReplicationQueue.go index aa57a954a61..4470d60c112 100644 --- a/common/persistence/namespaceReplicationQueue.go +++ b/common/persistence/namespaceReplicationQueue.go @@ -232,19 +232,22 @@ func (q *namespaceReplicationQueueImpl) updateAckLevel( clusterName string, isDLQ bool, ) error { - var err error + var ackLevelErr error var internalMetadata *InternalQueueMetadata if isDLQ { - internalMetadata, err = q.queue.GetDLQAckLevels(ctx) + internalMetadata, ackLevelErr = q.queue.GetDLQAckLevels(ctx) } else { - internalMetadata, err = q.queue.GetAckLevels(ctx) + internalMetadata, ackLevelErr = q.queue.GetAckLevels(ctx) } - if err != nil { - return err + if ackLevelErr != nil { + return ackLevelErr } ackLevels, err := q.ackLevelsFromBlob(internalMetadata.Blob) + if err != nil { + return err + } // Ignore possibly delayed message if ack, ok := ackLevels[clusterName]; ok && ack > lastProcessedMessageID { diff --git a/common/persistence/nosql/nosqlplugin/cassandra/gocql/client.go b/common/persistence/nosql/nosqlplugin/cassandra/gocql/client.go index db7586b158b..3f03b2e955d 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/gocql/client.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/gocql/client.go @@ -68,15 +68,15 @@ func NewCassandraCluster( } if cfg.TLS != nil && cfg.TLS.Enabled { if cfg.TLS.CertData != "" && cfg.TLS.CertFile != "" { - return nil, errors.New("Cannot specify both certData and certFile properties") + return nil, errors.New("only one of certData or certFile properties should be specified") } if cfg.TLS.KeyData != "" && cfg.TLS.KeyFile != "" { - return nil, errors.New("Cannot specify both keyData and keyFile properties") + return nil, errors.New("only one of keyData or keyFile properties should be specified") } if cfg.TLS.CaData != "" && cfg.TLS.CaFile != "" { - return nil, errors.New("Cannot specify both caData and caFile properties") + return nil, errors.New("only one of caData or caFile properties should be specified") } cluster.SslOpts = &gocql.SslOptions{ @@ -161,17 +161,6 @@ func NewCassandraCluster( return cluster, nil } -// regionHostFilter returns a gocql host filter for the given region name -func regionHostFilter(region string) gocql.HostFilter { - return gocql.HostFilterFunc(func(host *gocql.HostInfo) bool { - applicationRegion := region - if len(host.DataCenter()) < 3 { - return false - } - return host.DataCenter()[:3] == applicationRegion - }) -} - // parseHosts returns parses a list of hosts separated by comma func parseHosts(input string) []string { var hosts []string diff --git a/common/persistence/nosql/nosqlplugin/cassandra/gocql/client_test.go b/common/persistence/nosql/nosqlplugin/cassandra/gocql/client_test.go index 3251b394048..c99b8e44125 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/gocql/client_test.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/gocql/client_test.go @@ -106,7 +106,7 @@ func TestNewCassandraCluster(t *testing.T) { CertFile: "/a/b/c", }, }, - err: errors.New("Cannot specify both certData and certFile properties"), + err: errors.New("only one of certData or certFile properties should be specified"), }, "clientCert_duplicate_key": { cfg: config.Cassandra{ @@ -116,7 +116,7 @@ func TestNewCassandraCluster(t *testing.T) { KeyFile: "/a/b/c", }, }, - err: errors.New("Cannot specify both keyData and keyFile properties"), + err: errors.New("only one of keyData or keyFile properties should be specified"), }, "clientCert_duplicate_ca": { cfg: config.Cassandra{ @@ -126,7 +126,7 @@ func TestNewCassandraCluster(t *testing.T) { CaFile: "/a/b/c", }, }, - err: errors.New("Cannot specify both caData and caFile properties"), + err: errors.New("only one of caData or caFile properties should be specified"), }, } diff --git a/common/persistence/nosql/nosqlplugin/cassandra/gocql/consistency.go b/common/persistence/nosql/nosqlplugin/cassandra/gocql/consistency.go index b0f31f882e3..88bc446dcd0 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/gocql/consistency.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/gocql/consistency.go @@ -73,14 +73,3 @@ func mustConvertConsistency(c Consistency) gocql.Consistency { panic(fmt.Sprintf("Unknown gocql Consistency level: %v", c)) } } - -func mustConvertSerialConsistency(c SerialConsistency) gocql.SerialConsistency { - switch c { - case Serial: - return gocql.Serial - case LocalSerial: - return gocql.LocalSerial - default: - panic(fmt.Sprintf("Unknown gocql SerialConsistency level: %v", c)) - } -} diff --git a/common/persistence/persistence-tests/clusterMetadataManagerTest.go b/common/persistence/persistence-tests/clusterMetadataManagerTest.go index a096458394c..99d43c1f11f 100644 --- a/common/persistence/persistence-tests/clusterMetadataManagerTest.go +++ b/common/persistence/persistence-tests/clusterMetadataManagerTest.go @@ -458,6 +458,7 @@ func (s *ClusterMetadataManagerSuite) TestInitImmutableMetadataReadWrite() { // Case 10 - Get, data persisted // Fetch the persisted values getResp, err = s.ClusterMetadataManager.GetClusterMetadata(s.ctx, &p.GetClusterMetadataRequest{ClusterName: clusterNameToPersist}) + s.NoError(err) s.Equal("2.0", getResp.ClusterMetadata.VersionInfo.Current.Version) // Case 11 - List diff --git a/common/persistence/persistence-tests/historyV2PersistenceTest.go b/common/persistence/persistence-tests/historyV2PersistenceTest.go index 503c3246cd1..f51eee3eb9e 100644 --- a/common/persistence/persistence-tests/historyV2PersistenceTest.go +++ b/common/persistence/persistence-tests/historyV2PersistenceTest.go @@ -159,8 +159,8 @@ func (s *HistoryV2PersistenceSuite) TestScanAllTrees() { s.Nil(err) for _, br := range resp.Branches { uuidTreeId := br.TreeID - if trees[string(uuidTreeId)] == true { - delete(trees, string(uuidTreeId)) + if trees[uuidTreeId] { + delete(trees, uuidTreeId) s.True(br.ForkTime.UnixNano() > 0) s.True(len(br.BranchID) > 0) @@ -718,12 +718,10 @@ func (s *HistoryV2PersistenceSuite) newHistoryBranch(treeID string) ([]byte, err func (s *HistoryV2PersistenceSuite) deleteHistoryBranch(branch []byte) error { op := func() error { - var err error - err = s.ExecutionManager.DeleteHistoryBranch(s.ctx, &p.DeleteHistoryBranchRequest{ + return s.ExecutionManager.DeleteHistoryBranch(s.ctx, &p.DeleteHistoryBranchRequest{ BranchToken: branch, ShardID: s.ShardInfo.GetShardId(), }) - return err } return backoff.ThrottleRetry(op, historyTestRetryPolicy, isConditionFail) diff --git a/common/persistence/persistence-tests/metadataPersistenceV2Test.go b/common/persistence/persistence-tests/metadataPersistenceV2Test.go index 78dbbea0a15..c6ece67b235 100644 --- a/common/persistence/persistence-tests/metadataPersistenceV2Test.go +++ b/common/persistence/persistence-tests/metadataPersistenceV2Test.go @@ -1177,9 +1177,7 @@ func (m *MetadataPersistenceSuiteV2) TestListNamespaces_DeletedNamespace() { resp, err := m.ListNamespaces(2, token) m.NoError(err) token = resp.NextPageToken - for _, namespace := range resp.Namespaces { - listNamespacesPageSize2 = append(listNamespacesPageSize2, namespace) - } + listNamespacesPageSize2 = append(listNamespacesPageSize2, resp.Namespaces...) pageCount++ if len(token) == 0 { break @@ -1199,9 +1197,7 @@ func (m *MetadataPersistenceSuiteV2) TestListNamespaces_DeletedNamespace() { resp, err := m.ListNamespaces(1, token) m.NoError(err) token = resp.NextPageToken - for _, namespace := range resp.Namespaces { - listNamespacesPageSize1 = append(listNamespacesPageSize1, namespace) - } + listNamespacesPageSize1 = append(listNamespacesPageSize1, resp.Namespaces...) pageCount++ if len(token) == 0 { break diff --git a/common/persistence/serialization/blob.go b/common/persistence/serialization/blob.go index 7fa01208a42..31e2b4d7377 100644 --- a/common/persistence/serialization/blob.go +++ b/common/persistence/serialization/blob.go @@ -117,7 +117,7 @@ func encode( switch encoding { case enumspb.ENCODING_TYPE_JSON: - blob, err := codec.NewJSONPBEncoder().Encode(object.(proto.Message)) + blob, err := codec.NewJSONPBEncoder().Encode(object) if err != nil { return commonpb.DataBlob{}, err } diff --git a/common/persistence/serialization/serializer.go b/common/persistence/serialization/serializer.go index b0815ed41c7..366903693a2 100644 --- a/common/persistence/serialization/serializer.go +++ b/common/persistence/serialization/serializer.go @@ -497,7 +497,7 @@ func encodeBlob(o proto.Message, encoding enumspb.EncodingType) (*commonpb.DataB switch encoding { case enumspb.ENCODING_TYPE_JSON: - blob, err := codec.NewJSONPBEncoder().Encode(o.(proto.Message)) + blob, err := codec.NewJSONPBEncoder().Encode(o) if err != nil { return nil, err } diff --git a/common/persistence/shard_manager.go b/common/persistence/shard_manager.go index daeff476ea7..0a7993a81bc 100644 --- a/common/persistence/shard_manager.go +++ b/common/persistence/shard_manager.go @@ -63,8 +63,7 @@ func (m *shardManagerImpl) GetOrCreateShard( ctx context.Context, request *GetOrCreateShardRequest, ) (*GetOrCreateShardResponse, error) { - var createShardInfo func() (int64, *commonpb.DataBlob, error) - createShardInfo = func() (int64, *commonpb.DataBlob, error) { + createShardInfo := func() (int64, *commonpb.DataBlob, error) { shardInfo := request.InitialShardInfo if shardInfo == nil { shardInfo = &persistencespb.ShardInfo{} diff --git a/common/persistence/sql/common.go b/common/persistence/sql/common.go index 65059d9c508..1dc5f085655 100644 --- a/common/persistence/sql/common.go +++ b/common/persistence/sql/common.go @@ -121,7 +121,7 @@ func serializePageToken(offset int64) []byte { func deserializePageToken(payload []byte) (int64, error) { if len(payload) != 8 { - return 0, fmt.Errorf("Invalid token of %v length", len(payload)) + return 0, fmt.Errorf("invalid token of %v length", len(payload)) } return int64(binary.LittleEndian.Uint64(payload)), nil } diff --git a/common/persistence/sql/execution.go b/common/persistence/sql/execution.go index c1d3b824a2c..e47f23a4c25 100644 --- a/common/persistence/sql/execution.go +++ b/common/persistence/sql/execution.go @@ -394,7 +394,7 @@ func (m *sqlExecutionStore) updateWorkflowExecutionTx( newRunID := primitives.MustParseUUID(newWorkflow.ExecutionState.RunId) if !bytes.Equal(namespaceID, newNamespaceID) { - return serviceerror.NewUnavailable(fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another namespace")) + return serviceerror.NewUnavailable("UpdateWorkflowExecution: cannot continue as new to another namespace") } if err := assertRunIDAndUpdateCurrentExecution(ctx, diff --git a/common/persistence/sql/factory.go b/common/persistence/sql/factory.go index 45c0b96f03e..aff808e3ad4 100644 --- a/common/persistence/sql/factory.go +++ b/common/persistence/sql/factory.go @@ -27,7 +27,6 @@ package sql import ( "fmt" "sync" - "time" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" @@ -36,10 +35,6 @@ import ( "go.temporal.io/server/common/resolver" ) -const ( - executionTimeout = 8 * time.Second -) - type ( // Factory vends store objects backed by MySQL Factory struct { diff --git a/common/persistence/sql/history_store.go b/common/persistence/sql/history_store.go index 0c604a156ee..ae70ee6999e 100644 --- a/common/persistence/sql/history_store.go +++ b/common/persistence/sql/history_store.go @@ -131,7 +131,7 @@ func (m *sqlExecutionStore) DeleteHistoryNodes( if nodeID < p.GetBeginNodeID(branchInfo) { return &p.InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot append to ancestors' nodes"), + Msg: "cannot append to ancestors' nodes", } } @@ -268,7 +268,7 @@ func (m *sqlExecutionStore) ReadHistoryBranch( // \ // 8[8] // -//Now we want to fork a new branch B3 from B2. +// Now we want to fork a new branch B3 from B2. // The only valid forking nodeIDs are 3,6 or 8. // 1 is not valid because we can't fork from first node. // 2/4/5 is NOT valid either because they are inside a batch. diff --git a/common/persistence/sql/queue.go b/common/persistence/sql/queue.go index 7f4bc1ca784..267216d797d 100644 --- a/common/persistence/sql/queue.go +++ b/common/persistence/sql/queue.go @@ -222,7 +222,7 @@ func (q *sqlQueue) ReadMessagesFromDLQ( pageSize int, pageToken []byte, ) ([]*persistence.QueueMessage, []byte, error) { - if pageToken != nil && len(pageToken) != 0 { + if len(pageToken) != 0 { lastReadMessageID, err := deserializePageToken(pageToken) if err != nil { return nil, nil, serviceerror.NewInternal(fmt.Sprintf("invalid next page token %v", pageToken)) diff --git a/common/persistence/sql/shard.go b/common/persistence/sql/shard.go index 2805b5399c7..f60b9c347e4 100644 --- a/common/persistence/sql/shard.go +++ b/common/persistence/sql/shard.go @@ -70,7 +70,6 @@ func (m *sqlShardStore) GetOrCreateShard( ShardInfo: persistence.NewDataBlob(row.Data, row.DataEncoding), }, nil case sql.ErrNoRows: - break default: return nil, serviceerror.NewUnavailable(fmt.Sprintf("GetOrCreateShard: failed to get ShardID %v. Error: %v", request.ShardID, err)) } @@ -80,6 +79,9 @@ func (m *sqlShardStore) GetOrCreateShard( } rangeID, shardInfo, err := request.CreateShardInfo() + if err != nil { + return nil, serviceerror.NewUnavailable(fmt.Sprintf("GetOrCreateShard: failed to encode shard info for ShardID %v. Error: %v", request.ShardID, err)) + } row = &sqlplugin.ShardsRow{ ShardID: request.ShardID, RangeID: rangeID, diff --git a/common/persistence/sql/sqlplugin/sqlite/conn_pool.go b/common/persistence/sql/sqlplugin/sqlite/conn_pool.go index 380e6399304..14ee28bbd02 100644 --- a/common/persistence/sql/sqlplugin/sqlite/conn_pool.go +++ b/common/persistence/sql/sqlplugin/sqlite/conn_pool.go @@ -99,11 +99,11 @@ func (cp *connPool) Close(cfg *config.SQL) { } e.refCount-- - if e.refCount == 0 { - // todo: at the moment pool will persist a single connection to the DB for the whole duration of application - // temporal will start and stop DB connections multiple times, which will cause the loss of the cache - // and "db is closed" error - // e.db.Close() - // delete(cp.pool, dsn) - } + // todo: at the moment pool will persist a single connection to the DB for the whole duration of application + // temporal will start and stop DB connections multiple times, which will cause the loss of the cache + // and "db is closed" error + // if e.refCount == 0 { + // e.db.Close() + // delete(cp.pool, dsn) + // } } diff --git a/common/persistence/sql/sqlplugin/tests/history_node_test.go b/common/persistence/sql/sqlplugin/tests/history_node_test.go index dac5b9b9d7c..31aa51acfbd 100644 --- a/common/persistence/sql/sqlplugin/tests/history_node_test.go +++ b/common/persistence/sql/sqlplugin/tests/history_node_test.go @@ -334,7 +334,6 @@ func (s *historyNodeSuite) TestInsertDeleteSelect_Multiple() { nodeID := int64(1) minNodeID := nodeID - var nodes []sqlplugin.HistoryNodeRow for i := 0; i < numNodeIDs; i++ { for j := 0; j < nodePerNodeID; j++ { node := s.newRandomNodeRow(shardID, treeID, branchID, nodeID, rand.Int63(), rand.Int63()) @@ -343,7 +342,6 @@ func (s *historyNodeSuite) TestInsertDeleteSelect_Multiple() { rowsAffected, err := result.RowsAffected() s.NoError(err) s.Equal(1, int(rowsAffected)) - nodes = append(nodes, node) } nodeID++ } diff --git a/common/persistence/sql/sqlplugin/tests/history_shard_test.go b/common/persistence/sql/sqlplugin/tests/history_shard_test.go index afe88bc8845..a6089845071 100644 --- a/common/persistence/sql/sqlplugin/tests/history_shard_test.go +++ b/common/persistence/sql/sqlplugin/tests/history_shard_test.go @@ -141,6 +141,7 @@ func (s *historyShardSuite) TestInsertUpdate_Success() { result, err = s.store.UpdateShards(newExecutionContext(), &shard) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) } @@ -152,6 +153,7 @@ func (s *historyShardSuite) TestUpdate_Fail() { result, err := s.store.UpdateShards(newExecutionContext(), &shard) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) } @@ -171,6 +173,7 @@ func (s *historyShardSuite) TestInsertUpdateSelect() { result, err = s.store.UpdateShards(newExecutionContext(), &shard) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) filter := sqlplugin.ShardsFilter{ diff --git a/common/persistence/sql/sqlplugin/tests/matching_task_queue_test.go b/common/persistence/sql/sqlplugin/tests/matching_task_queue_test.go index 4950a4645a4..702075213dc 100644 --- a/common/persistence/sql/sqlplugin/tests/matching_task_queue_test.go +++ b/common/persistence/sql/sqlplugin/tests/matching_task_queue_test.go @@ -147,6 +147,7 @@ func (s *matchingTaskQueueSuite) TestInsertUpdate_Success() { result, err = s.store.UpdateTaskQueues(newExecutionContext(), &taskQueue) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) } @@ -158,6 +159,7 @@ func (s *matchingTaskQueueSuite) TestUpdate_Fail() { result, err := s.store.UpdateTaskQueues(newExecutionContext(), &taskQueue) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) } @@ -177,6 +179,7 @@ func (s *matchingTaskQueueSuite) TestInsertUpdateSelect() { result, err = s.store.UpdateTaskQueues(newExecutionContext(), &taskQueue) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) filter := sqlplugin.TaskQueuesFilter{ @@ -200,6 +203,7 @@ func (s *matchingTaskQueueSuite) TestDeleteSelect() { result, err := s.store.DeleteFromTaskQueues(newExecutionContext(), filter) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) filter = sqlplugin.TaskQueuesFilter{ @@ -230,6 +234,7 @@ func (s *matchingTaskQueueSuite) TestInsertDeleteSelect_Success() { result, err = s.store.DeleteFromTaskQueues(newExecutionContext(), filter) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) filter = sqlplugin.TaskQueuesFilter{ @@ -260,6 +265,7 @@ func (s *matchingTaskQueueSuite) TestInsertDeleteSelect_Fail() { result, err = s.store.DeleteFromTaskQueues(newExecutionContext(), filter) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) filter = sqlplugin.TaskQueuesFilter{ diff --git a/common/persistence/sql/sqlplugin/tests/namespace_test.go b/common/persistence/sql/sqlplugin/tests/namespace_test.go index b5e3dc32aaf..0e882e428a4 100644 --- a/common/persistence/sql/sqlplugin/tests/namespace_test.go +++ b/common/persistence/sql/sqlplugin/tests/namespace_test.go @@ -175,6 +175,7 @@ func (s *namespaceSuite) TestUpdate_Fail() { result, err := s.store.UpdateNamespace(newExecutionContext(), &namespace) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) } diff --git a/common/persistence/sql/sqlplugin/tests/queue_metadata_test.go b/common/persistence/sql/sqlplugin/tests/queue_metadata_test.go index 05dd6c01226..0b480be932c 100644 --- a/common/persistence/sql/sqlplugin/tests/queue_metadata_test.go +++ b/common/persistence/sql/sqlplugin/tests/queue_metadata_test.go @@ -86,6 +86,7 @@ func (s *queueMetadataSuite) TestInsert_Success() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) } @@ -96,6 +97,7 @@ func (s *queueMetadataSuite) TestInsert_Fail_Duplicate() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) queueMetadata = s.newRandomQueueMetadataRow(queueType) @@ -110,6 +112,7 @@ func (s *queueMetadataSuite) TestInsertSelect() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) filter := sqlplugin.QueueMetadataFilter{ @@ -128,12 +131,14 @@ func (s *queueMetadataSuite) TestInsertUpdate_Success() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) queueMetadata = s.newRandomQueueMetadataRow(queueType) result, err = s.store.UpdateQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) } @@ -144,6 +149,7 @@ func (s *queueMetadataSuite) TestUpdate_Fail() { result, err := s.store.UpdateQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(0, int(rowsAffected)) } @@ -154,12 +160,14 @@ func (s *queueMetadataSuite) TestInsertUpdateSelect() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) queueMetadata = s.newRandomQueueMetadataRow(queueType) result, err = s.store.UpdateQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err = result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) filter := sqlplugin.QueueMetadataFilter{ @@ -179,6 +187,7 @@ func (s *queueMetadataSuite) TestSelectReadLock() { result, err := s.store.InsertIntoQueueMetadata(newExecutionContext(), &queueMetadata) s.NoError(err) rowsAffected, err := result.RowsAffected() + s.NoError(err) s.Equal(1, int(rowsAffected)) // NOTE: lock without transaction is equivalent to select diff --git a/common/persistence/sql/sqlplugin/tests/visibility_test.go b/common/persistence/sql/sqlplugin/tests/visibility_test.go index b546f051a76..edc22f470e2 100644 --- a/common/persistence/sql/sqlplugin/tests/visibility_test.go +++ b/common/persistence/sql/sqlplugin/tests/visibility_test.go @@ -60,10 +60,6 @@ var ( testVisibilityData = []byte("random history execution activity data") ) -var testVisibilityOpenStatus = []enumspb.WorkflowExecutionStatus{ - enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, -} - var testVisibilityCloseStatus = []enumspb.WorkflowExecutionStatus{ enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, diff --git a/common/persistence/sql/task.go b/common/persistence/sql/task.go index 470a062b79a..fd2d0b758a7 100644 --- a/common/persistence/sql/task.go +++ b/common/persistence/sql/task.go @@ -57,7 +57,7 @@ type ( var ( // minUUID = primitives.MustParseUUID("00000000-0000-0000-0000-000000000000") - minTaskQueueId = make([]byte, 0, 0) + minTaskQueueId = make([]byte, 0) ) // newTaskPersistence creates a new instance of TaskManager @@ -477,15 +477,6 @@ func (m *sqlTaskManager) CompleteTasksLessThan( return int(nRows), nil } -// Returns uint32 hash for a particular TaskQueue/Task given a Namespace, TaskQueueName and TaskQueueType -func (m *sqlTaskManager) calculateTaskQueueHash( - namespaceID primitives.UUID, - name string, - taskType enumspb.TaskQueueType, -) uint32 { - return farm.Fingerprint32(m.taskQueueId(namespaceID, name, taskType)) -} - // Returns uint32 hash for a particular TaskQueue/Task given a Namespace, TaskQueueName and TaskQueueType func (m *sqlTaskManager) taskQueueIdAndHash( namespaceID primitives.UUID, diff --git a/common/persistence/tests/task_queue.go b/common/persistence/tests/task_queue.go index 56773ac1a41..053fa3740f2 100644 --- a/common/persistence/tests/task_queue.go +++ b/common/persistence/tests/task_queue.go @@ -142,10 +142,10 @@ func (s *TaskQueueSuite) TestCreate_Sticky_Dup() { func (s *TaskQueueSuite) TestUpdate_Normal() { prevRangeID := rand.Int63() - taskQueue := s.createTaskQueue(prevRangeID, enumspb.TASK_QUEUE_KIND_NORMAL) + _ = s.createTaskQueue(prevRangeID, enumspb.TASK_QUEUE_KIND_NORMAL) rangID := rand.Int63() - taskQueue = s.randomTaskQueueInfo(enumspb.TASK_QUEUE_KIND_NORMAL) + taskQueue := s.randomTaskQueueInfo(enumspb.TASK_QUEUE_KIND_NORMAL) _, err := s.taskManager.UpdateTaskQueue(s.ctx, &p.UpdateTaskQueueRequest{ RangeID: rangID, TaskQueueInfo: taskQueue, @@ -175,10 +175,10 @@ func (s *TaskQueueSuite) TestUpdate_Normal_Conflict() { func (s *TaskQueueSuite) TestUpdate_Sticky() { prevRangeID := rand.Int63() - taskQueue := s.createTaskQueue(prevRangeID, enumspb.TASK_QUEUE_KIND_STICKY) + _ = s.createTaskQueue(prevRangeID, enumspb.TASK_QUEUE_KIND_STICKY) rangID := rand.Int63() - taskQueue = s.randomTaskQueueInfo(enumspb.TASK_QUEUE_KIND_STICKY) + taskQueue := s.randomTaskQueueInfo(enumspb.TASK_QUEUE_KIND_STICKY) _, err := s.taskManager.UpdateTaskQueue(s.ctx, &p.UpdateTaskQueueRequest{ RangeID: rangID, TaskQueueInfo: taskQueue, diff --git a/common/persistence/visibility/persistence-tests/visibility_persistence_suite_test.go b/common/persistence/visibility/persistence-tests/visibility_persistence_suite_test.go index 5ce7a1e226a..7b190b776d5 100644 --- a/common/persistence/visibility/persistence-tests/visibility_persistence_suite_test.go +++ b/common/persistence/visibility/persistence-tests/visibility_persistence_suite_test.go @@ -331,7 +331,7 @@ func (s *VisibilityPersistenceSuite) TestFilteringByType() { resp, err := s.VisibilityMgr.ListWorkflowExecutions(s.ctx, &manager.ListWorkflowExecutionsRequestV2{ NamespaceID: testNamespaceUUID, PageSize: 2, - Query: fmt.Sprintf(`WorkflowType = "visibility-workflow-1"`), + Query: `WorkflowType = "visibility-workflow-1"`, }) s.Nil(err) s.Equal(1, len(resp.Executions)) @@ -359,7 +359,7 @@ func (s *VisibilityPersistenceSuite) TestFilteringByType() { resp, err = s.VisibilityMgr.ListWorkflowExecutions(s.ctx, &manager.ListWorkflowExecutionsRequestV2{ NamespaceID: testNamespaceUUID, PageSize: 2, - Query: fmt.Sprintf(`WorkflowType = "visibility-workflow-2"`), + Query: `WorkflowType = "visibility-workflow-2"`, }) s.Nil(err) s.Equal(1, len(resp.Executions)) @@ -393,7 +393,7 @@ func (s *VisibilityPersistenceSuite) TestFilteringByWorkflowID() { resp, err := s.VisibilityMgr.ListWorkflowExecutions(s.ctx, &manager.ListWorkflowExecutionsRequestV2{ NamespaceID: testNamespaceUUID, PageSize: 2, - Query: fmt.Sprintf(`WorkflowId = "visibility-filtering-test1"`), + Query: `WorkflowId = "visibility-filtering-test1"`, }) s.Nil(err) s.Equal(1, len(resp.Executions)) @@ -421,7 +421,7 @@ func (s *VisibilityPersistenceSuite) TestFilteringByWorkflowID() { resp, err = s.VisibilityMgr.ListWorkflowExecutions(s.ctx, &manager.ListWorkflowExecutionsRequestV2{ NamespaceID: testNamespaceUUID, PageSize: 2, - Query: fmt.Sprintf(`WorkflowId = "visibility-filtering-test2"`), + Query: `WorkflowId = "visibility-filtering-test2"`, }) s.Nil(err) s.Equal(1, len(resp.Executions)) diff --git a/common/persistence/visibility/store/elasticsearch/client/client_v7.go b/common/persistence/visibility/store/elasticsearch/client/client_v7.go index 424594077ac..5dcca3d2046 100644 --- a/common/persistence/visibility/store/elasticsearch/client/client_v7.go +++ b/common/persistence/visibility/store/elasticsearch/client/client_v7.go @@ -277,6 +277,7 @@ func (c *clientV7) Bulk() BulkService { } func (c *clientV7) IndexPutTemplate(ctx context.Context, templateName string, bodyString string) (bool, error) { + //lint:ignore SA1019 Changing to IndexPutIndexTemplate requires template changes and will be done separately. resp, err := c.esClient.IndexPutTemplate(templateName).BodyString(bodyString).Do(ctx) if err != nil { return false, err diff --git a/common/persistence/visibility/store/elasticsearch/client/config.go b/common/persistence/visibility/store/elasticsearch/client/config.go index 376711c2245..07bc6836801 100644 --- a/common/persistence/visibility/store/elasticsearch/client/config.go +++ b/common/persistence/visibility/store/elasticsearch/client/config.go @@ -40,10 +40,10 @@ const ( type ( Config struct { Version string `yaml:"version"` - URL url.URL `yaml:"url"` //nolint:govet + URL url.URL `yaml:"url"` Username string `yaml:"username"` Password string `yaml:"password"` - Indices map[string]string `yaml:"indices"` //nolint:govet + Indices map[string]string `yaml:"indices"` LogLevel string `yaml:"logLevel"` AWSRequestSigning ESAWSRequestSigningConfig `yaml:"aws-request-signing"` CloseIdleConnectionsInterval time.Duration `yaml:"closeIdleConnectionsInterval"` diff --git a/common/persistence/visibility/store/elasticsearch/visibility_store.go b/common/persistence/visibility/store/elasticsearch/visibility_store.go index 02285e38f15..0a82ca754a7 100644 --- a/common/persistence/visibility/store/elasticsearch/visibility_store.go +++ b/common/persistence/visibility/store/elasticsearch/visibility_store.go @@ -58,8 +58,6 @@ const ( delimiter = "~" pointInTimeKeepAliveInterval = "1m" scrollKeepAliveInterval = "1m" - - readTimeout = 16 * time.Second ) // Default sort by uses the sorting order defined in the index template, so no diff --git a/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go b/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go index 21d52794ddc..399cf0f209e 100644 --- a/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go +++ b/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go @@ -73,7 +73,6 @@ var ( testLatestTime = time.Unix(0, 2547596872371000000).UTC() testWorkflowType = "test-wf-type" testWorkflowID = "test-wid" - testRunID = "1601da05-4db9-4eeb-89e4-da99481bdfc9" testStatus = enumspb.WORKFLOW_EXECUTION_STATUS_FAILED testSearchResult = &elastic.SearchResult{ @@ -85,7 +84,6 @@ var ( filterClose = fmt.Sprintf("must_not:map[term:map[ExecutionStatus:%s]]", enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING.String()) filterByType = fmt.Sprintf("map[term:map[WorkflowType:%s]", testWorkflowType) filterByWID = fmt.Sprintf("map[term:map[WorkflowId:%s]", testWorkflowID) - filterByRunID = fmt.Sprintf("map[term:map[RunId:%s]", testRunID) filterByExecutionStatus = fmt.Sprintf("map[term:map[ExecutionStatus:%s]", testStatus.String()) ) @@ -531,7 +529,7 @@ func (s *ESVisibilitySuite) Test_convertQuery() { s.Equal(`[{"StartTime":{"order":"desc"}},{"CloseTime":{"order":"asc"}}]`, s.sorterToJSON(srt)) query = `order by CustomTextField desc` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) s.Equal(err.(*serviceerror.InvalidArgument).Error(), "invalid query: unable to convert 'order by' column name: unable to sort by field of Text type, use field of type Keyword") @@ -581,14 +579,14 @@ func (s *ESVisibilitySuite) Test_convertQuery_Mapper() { s.Nil(srt) query = `CustomKeywordField = 'pid'` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) var invalidArgumentErr *serviceerror.InvalidArgument s.ErrorAs(err, &invalidArgumentErr) s.EqualError(err, "mapper error") query = `AliasForUnknownField = 'pid'` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) s.ErrorAs(err, &invalidArgumentErr) s.EqualError(err, "invalid query: unable to convert filter expression: unable to convert left part of comparison expression: invalid search attribute: AliasForUnknownField") @@ -606,13 +604,13 @@ func (s *ESVisibilitySuite) Test_convertQuery_Mapper() { s.Equal(`[{"CustomKeywordField":{"order":"asc"}}]`, s.sorterToJSON(srt)) query = `order by CustomKeywordField asc` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) s.ErrorAs(err, &invalidArgumentErr) s.EqualError(err, "mapper error") query = `order by AliasForUnknownField asc` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) s.ErrorAs(err, &invalidArgumentErr) s.EqualError(err, "invalid query: unable to convert 'order by' column name: invalid search attribute: AliasForUnknownField") @@ -634,7 +632,7 @@ func (s *ESVisibilitySuite) Test_convertQuery_Mapper_Error() { s.Nil(srt) query = `ProductId = 'pid'` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) var invalidArgumentErr *serviceerror.InvalidArgument s.ErrorAs(err, &invalidArgumentErr) @@ -647,7 +645,7 @@ func (s *ESVisibilitySuite) Test_convertQuery_Mapper_Error() { s.Equal(`[{"ExecutionTime":{"order":"asc"}}]`, s.sorterToJSON(srt)) query = `order by CustomIntField asc` - qry, srt, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) + _, _, err = s.visibilityStore.convertQuery(testNamespace, testNamespaceID, query) s.Error(err) s.ErrorAs(err, &invalidArgumentErr) s.EqualError(err, "mapper error") diff --git a/common/persistence/visibility/store/query/interceptors_test.go b/common/persistence/visibility/store/query/interceptors_test.go index c779947dc26..bca4246164d 100644 --- a/common/persistence/visibility/store/query/interceptors_test.go +++ b/common/persistence/visibility/store/query/interceptors_test.go @@ -83,7 +83,7 @@ func TestNameInterceptor(t *testing.T) { actualSorterJson, _ := json.Marshal(actualSorterMaps) assert.Equal(t, `[{"StartTime1":{"order":"asc"}}]`, string(actualSorterJson)) - query, sorters, err = c.ConvertWhereOrderBy("error='Running' order by StartTime") + _, _, err = c.ConvertWhereOrderBy("error='Running' order by StartTime") assert.Error(t, err) assert.Contains(t, err.Error(), "interceptor error") } @@ -108,7 +108,7 @@ func TestValuesInterceptor(t *testing.T) { actualQueryJson, _ = json.Marshal(actualQueryMap) assert.Equal(t, `{"bool":{"filter":{"range":{"ExecutionStatus":{"from":"Status5","include_lower":true,"include_upper":true,"to":"Status7"}}}}}`, string(actualQueryJson)) - query, _, err = c.ConvertWhereOrderBy("error='Running'") + _, _, err = c.ConvertWhereOrderBy("error='Running'") assert.Error(t, err) assert.Contains(t, err.Error(), "interceptor error") } diff --git a/common/persistence/visibility/store/standard/sql/visibility_store.go b/common/persistence/visibility/store/standard/sql/visibility_store.go index 07c312a499f..e131ded2354 100644 --- a/common/persistence/visibility/store/standard/sql/visibility_store.go +++ b/common/persistence/visibility/store/standard/sql/visibility_store.go @@ -43,10 +43,6 @@ import ( "go.temporal.io/server/common/resolver" ) -const ( - visibilityTimeout = 16 * time.Second -) - type ( visibilityStore struct { sqlStore persistencesql.SqlStore diff --git a/common/persistence/visibility/visibility_manager_test.go b/common/persistence/visibility/visibility_manager_test.go index ec78473724d..2438819916e 100644 --- a/common/persistence/visibility/visibility_manager_test.go +++ b/common/persistence/visibility/visibility_manager_test.go @@ -63,8 +63,6 @@ var ( RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52", } testWorkflowTypeName = "visibility-workflow" - - listErrMsg = "Persistence Max QPS Reached." ) func TestVisibilityManagerSuite(t *testing.T) { diff --git a/common/primitives/timestamp/parseDuration.go b/common/primitives/timestamp/parseDuration.go index 7e4a2392591..3a95fbfa015 100644 --- a/common/primitives/timestamp/parseDuration.go +++ b/common/primitives/timestamp/parseDuration.go @@ -31,8 +31,8 @@ import ( ) var ( - reUnitless = regexp.MustCompile("^(\\d+(\\.\\d*)?|(\\.\\d+))$") - reDays = regexp.MustCompile("(\\d+(\\.\\d*)?|(\\.\\d+))d") + reUnitless = regexp.MustCompile(`^(\d+(\.\d*)?|(\.\d+))$`) + reDays = regexp.MustCompile(`(\d+(\.\d*)?|(\.\d+))d`) ) // ParseDuration is like time.ParseDuration, but supports unit "d" for days diff --git a/common/primitives/timestamp/time.go b/common/primitives/timestamp/time.go index b38d4a50565..5c5c013e77c 100644 --- a/common/primitives/timestamp/time.go +++ b/common/primitives/timestamp/time.go @@ -32,14 +32,6 @@ import ( "go.temporal.io/server/common/util" ) -var ( - // This is the maximal time value we support - maxValidTimeGo = time.Unix(0, MaxValidTimeNanoseconds).UTC() - maxValidTimestamp = TimestampFromTimePtr(&maxValidTimeGo) -) - -const MaxValidTimeNanoseconds = (2 ^ (64 - 1)) - 1 - // Timestamp provides easy conversions and utility comparison functions // making go to proto time comparison straightforward type Timestamp struct { diff --git a/common/ringpop/ringpop_test.go b/common/ringpop/ringpop_test.go index 2c484b6cfcf..6ada7e27244 100644 --- a/common/ringpop/ringpop_test.go +++ b/common/ringpop/ringpop_test.go @@ -27,7 +27,6 @@ package ringpop import ( "context" "crypto/tls" - "fmt" "os" "testing" "time" @@ -144,18 +143,6 @@ func (s *RingpopSuite) TestHostsMode() { s.NotNil(f) } -type mockResolver struct { - Hosts map[string][]string -} - -func (resolver *mockResolver) LookupHost(ctx context.Context, host string) ([]string, error) { - addrs, ok := resolver.Hosts[host] - if !ok { - return nil, fmt.Errorf("Host was not resolved: %s", host) - } - return addrs, nil -} - func (s *RingpopSuite) TestInvalidConfig() { var cfg config.Membership cfg.MaxJoinDuration = time.Minute diff --git a/common/rpc/encryption/localStoreCertProvider.go b/common/rpc/encryption/localStoreCertProvider.go index 35dce858f08..55c448e3c12 100644 --- a/common/rpc/encryption/localStoreCertProvider.go +++ b/common/rpc/encryption/localStoreCertProvider.go @@ -286,7 +286,7 @@ func (s *localStoreCertProvider) fetchCertificate( } if certFile != "" && certData != "" { - return nil, errors.New("Cannot specify both certFile and certData properties") + return nil, errors.New("only one of certFile or certData properties should be spcified") } var certBytes []byte @@ -387,14 +387,6 @@ func (s *localStoreCertProvider) fetchCAs( return certPool, certs, nil } -func (s *localStoreCertProvider) fetchClientCert() (*tls.Certificate, error) { - return s.FetchClientCertificate(false) -} - -func (s *localStoreCertProvider) fetchWorkerCert() (*tls.Certificate, error) { - return s.FetchClientCertificate(true) -} - func checkTLSCertForExpiration( cert *tls.Certificate, when time.Time, @@ -533,7 +525,7 @@ func (s *localStoreCertProvider) refreshCerts() { for { select { case <-s.stop: - break + return case <-s.ticker.C: } @@ -585,7 +577,7 @@ func equal(a, b [][]byte) bool { return false } for i := range a { - if bytes.Compare(a[i], b[i]) != 0 { + if !bytes.Equal(a[i], b[i]) { return false } } diff --git a/common/rpc/encryption/localStoreTlsProvider.go b/common/rpc/encryption/localStoreTlsProvider.go index 2561132972a..b7d403abb45 100644 --- a/common/rpc/encryption/localStoreTlsProvider.go +++ b/common/rpc/encryption/localStoreTlsProvider.go @@ -426,7 +426,7 @@ func (s *localStoreTlsProvider) timerCallback() { for { select { case <-s.stop: - break + return case <-s.ticker.C: } diff --git a/common/rpc/encryption/testDynamicCertProvider.go b/common/rpc/encryption/testDynamicCertProvider.go index 036a740c158..4ad95e69b58 100644 --- a/common/rpc/encryption/testDynamicCertProvider.go +++ b/common/rpc/encryption/testDynamicCertProvider.go @@ -37,7 +37,6 @@ type TestDynamicCertProvider struct { caCerts *x509.CertPool wrongCACerts *x509.CertPool serverCertIndex int - caCertIndex int config *config.GroupTLS serverName string } diff --git a/common/rpc/encryption/testDynamicTLSConfigProvider.go b/common/rpc/encryption/testDynamicTLSConfigProvider.go index 279ad413433..a6caf160128 100644 --- a/common/rpc/encryption/testDynamicTLSConfigProvider.go +++ b/common/rpc/encryption/testDynamicTLSConfigProvider.go @@ -44,11 +44,6 @@ type TestDynamicTLSConfigProvider struct { FrontendPerHostCertProviderMap PerHostCertProviderMap - internodeServerConfig *tls.Config - internodeClientConfig *tls.Config - frontendServerConfig *tls.Config - frontendClientConfig *tls.Config - logger log.Logger } @@ -73,7 +68,6 @@ func (t *TestDynamicTLSConfigProvider) GetExpiringCerts(timeWindow time.Duration } func (t *TestDynamicTLSConfigProvider) GetRemoteClusterClientConfig(hostName string) (*tls.Config, error) { - //TODO implement me panic("implement me") } diff --git a/common/rpc/encryption/tls_config_test.go b/common/rpc/encryption/tls_config_test.go index 99f7245ea57..9c89f5542d4 100644 --- a/common/rpc/encryption/tls_config_test.go +++ b/common/rpc/encryption/tls_config_test.go @@ -125,22 +125,6 @@ func (s *tlsConfigTest) testGroupTLS(f func(*config.RootTLS, *config.GroupTLS)) f(cfg, &cfg.Frontend) } -func (s *tlsConfigTest) testClientTLS(f func(*config.RootTLS, *config.ClientTLS)) { - - cfg := &config.RootTLS{Internode: config.GroupTLS{}} - f(cfg, &cfg.Internode.Client) - cfg = &config.RootTLS{Frontend: config.GroupTLS{}} - f(cfg, &cfg.Frontend.Client) -} - -func (s *tlsConfigTest) testServerTLS(f func(*config.RootTLS, *config.ServerTLS)) { - - cfg := &config.RootTLS{Internode: config.GroupTLS{}} - f(cfg, &cfg.Internode.Server) - cfg = &config.RootTLS{Frontend: config.GroupTLS{}} - f(cfg, &cfg.Frontend.Server) -} - func (s *tlsConfigTest) testCertFileAndData(cfg *config.RootTLS, group *config.GroupTLS) { group.Server = config.ServerTLS{} diff --git a/common/rpc/grpc.go b/common/rpc/grpc.go index fabdbc5c3c6..c1afbe29902 100644 --- a/common/rpc/grpc.go +++ b/common/rpc/grpc.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" @@ -62,9 +63,10 @@ const ( // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func Dial(hostName string, tlsConfig *tls.Config, logger log.Logger, interceptors ...grpc.UnaryClientInterceptor) (*grpc.ClientConn, error) { - // Default to insecure - grpcSecureOpt := grpc.WithInsecure() - if tlsConfig != nil { + var grpcSecureOpt grpc.DialOption + if tlsConfig == nil { + grpcSecureOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) + } else { grpcSecureOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)) } diff --git a/common/rpc/interceptor/namespace_count_limit.go b/common/rpc/interceptor/namespace_count_limit.go index e0307f5f431..dd32a0443bd 100644 --- a/common/rpc/interceptor/namespace_count_limit.go +++ b/common/rpc/interceptor/namespace_count_limit.go @@ -81,17 +81,17 @@ func (ni *NamespaceCountLimitInterceptor) Intercept( ) (interface{}, error) { _, methodName := splitMethodName(info.FullMethod) // token will default to 0 - token, _ := ni.tokens[methodName] + token := ni.tokens[methodName] if token != 0 { - namespace := GetNamespace(ni.namespaceRegistry, req) - counter := ni.counter(namespace) + nsName := GetNamespace(ni.namespaceRegistry, req) + counter := ni.counter(nsName) count := atomic.AddInt32(counter, int32(token)) defer atomic.AddInt32(counter, -int32(token)) scope := MetricsScope(ctx, ni.logger) scope.UpdateGauge(metrics.ServicePendingRequests, float64(count)) - if int(count) > ni.countFn(namespace.String()) { + if int(count) > ni.countFn(nsName.String()) { return nil, ErrNamespaceCountLimitServerBusy } } diff --git a/common/rpc/interceptor/telemetry.go b/common/rpc/interceptor/telemetry.go index 8597c72ddd3..0bfabed0dc8 100644 --- a/common/rpc/interceptor/telemetry.go +++ b/common/rpc/interceptor/telemetry.go @@ -214,16 +214,16 @@ func (ti *TelemetryInterceptor) metricsScopeLogTags( // if the method name is not defined, will default to // unknown scope, which carries value 0 - scopeDef, _ := ti.scopes[methodName] + scopeDef := ti.scopes[methodName] scopeDef = ti.overrideScope(scopeDef, req) - namespace := GetNamespace(ti.namespaceRegistry, req) - if namespace == "" { + nsName := GetNamespace(ti.namespaceRegistry, req) + if nsName == "" { return ti.metricsClient.Scope(scopeDef).Tagged(metrics.NamespaceUnknownTag()), []tag.Tag{tag.Operation(methodName)} } - return ti.metricsClient.Scope(scopeDef).Tagged(metrics.NamespaceTag(namespace.String())), []tag.Tag{ + return ti.metricsClient.Scope(scopeDef).Tagged(metrics.NamespaceTag(nsName.String())), []tag.Tag{ tag.Operation(methodName), - tag.WorkflowNamespace(namespace.String()), + tag.WorkflowNamespace(nsName.String()), } } diff --git a/common/rpc/test/rpc_common_test.go b/common/rpc/test/rpc_common_test.go index b8dcb0e4df4..6b2d610d5a8 100644 --- a/common/rpc/test/rpc_common_test.go +++ b/common/rpc/test/rpc_common_test.go @@ -90,7 +90,7 @@ var ( } ) -func startHelloWorldServer(s suite.Suite, factory *TestFactory) (*grpc.Server, string) { +func startHelloWorldServer(s *suite.Suite, factory *TestFactory) (*grpc.Server, string) { var opts []grpc.ServerOption var err error if factory.serverUsage == Internode { @@ -109,13 +109,13 @@ func startHelloWorldServer(s suite.Suite, factory *TestFactory) (*grpc.Server, s port := strings.Split(listener.Addr().String(), ":")[1] s.NoError(err) go func() { - err := server.Serve(listener) - s.NoError(err) + err = server.Serve(listener) }() + s.NoError(err) return server, port } -func runHelloWorldTest(s suite.Suite, host string, serverFactory *TestFactory, clientFactory *TestFactory, isValid bool) { +func runHelloWorldTest(s *suite.Suite, host string, serverFactory *TestFactory, clientFactory *TestFactory, isValid bool) { server, port := startHelloWorldServer(s, serverFactory) defer server.Stop() err := dialHello(s, host+":"+port, clientFactory, serverFactory.serverUsage) @@ -128,7 +128,7 @@ func runHelloWorldTest(s suite.Suite, host string, serverFactory *TestFactory, c } func runHelloWorldMultipleDials( - s suite.Suite, + s *suite.Suite, host string, serverFactory *TestFactory, clientFactory *TestFactory, @@ -145,28 +145,13 @@ func runHelloWorldMultipleDials( } } -func runHelloWorldWithRefresh( - s suite.Suite, - host string, - serverFactory *TestFactory, - clientFactory *TestFactory, - validator func(*credentials.TLSInfo, error), -) { - - server, port := startHelloWorldServer(s, serverFactory) - defer server.Stop() - - tlsInfo, err := dialHelloAndGetTLSInfo(s, host+":"+port, clientFactory, serverFactory.serverUsage) - validator(tlsInfo, err) -} - -func dialHello(s suite.Suite, hostport string, clientFactory *TestFactory, serverType ServerUsageType) error { +func dialHello(s *suite.Suite, hostport string, clientFactory *TestFactory, serverType ServerUsageType) error { _, err := dialHelloAndGetTLSInfo(s, hostport, clientFactory, serverType) return err } func dialHelloAndGetTLSInfo( - s suite.Suite, + s *suite.Suite, hostport string, clientFactory *TestFactory, serverType ServerUsageType, @@ -178,14 +163,16 @@ func dialHelloAndGetTLSInfo( switch serverType { case Internode: cfg, err = clientFactory.GetInternodeClientTlsConfig() + s.NoError(err) case Frontend: cfg, err = clientFactory.GetFrontendClientTlsConfig() + s.NoError(err) case RemoteCluster: host, _, err := net.SplitHostPort(hostport) s.NoError(err) cfg, err = clientFactory.GetRemoteClusterClientConfig(host) + s.NoError(err) } - s.NoError(err) clientConn, err := rpc.Dial(hostport, cfg, logger) s.NoError(err) diff --git a/common/rpc/test/rpc_localstore_tls_test.go b/common/rpc/test/rpc_localstore_tls_test.go index e913c8d99d3..579a8b4e77a 100644 --- a/common/rpc/test/rpc_localstore_tls_test.go +++ b/common/rpc/test/rpc_localstore_tls_test.go @@ -55,7 +55,7 @@ var noExtraInterceptors = []grpc.UnaryClientInterceptor{} type localStoreRPCSuite struct { *require.Assertions - suite.Suite + *suite.Suite controller *gomock.Controller @@ -117,7 +117,9 @@ type localStoreRPCSuite struct { } func TestLocalStoreTLSSuite(t *testing.T) { - suite.Run(t, &localStoreRPCSuite{}) + suite.Run(t, &localStoreRPCSuite{ + Suite: &suite.Suite{}, + }) } func (s *localStoreRPCSuite) TearDownSuite() { @@ -368,6 +370,7 @@ func (s *localStoreRPCSuite) setupFrontend() { s.frontendRollingCerts, s.dynamicCACertPool, s.wrongCACertPool) + s.NoError(err) dynamicServerTLSFactory := rpc.NewFactory(rpcTestCfgDefault, "tester", s.logger, s.dynamicConfigProvider, dynamicconfig.NewNoopCollection(), clusterMetadata, noExtraInterceptors) s.frontendDynamicTLSFactory = f(dynamicServerTLSFactory) s.internodeDynamicTLSFactory = i(dynamicServerTLSFactory) @@ -560,10 +563,9 @@ func (s *localStoreRPCSuite) TestCertExpiration() { func (s *localStoreRPCSuite) testCertExpiration(factory *TestFactory, timeWindow time.Duration, nExpiring int) { expiring, expired, err := factory.GetTLSConfigProvider().GetExpiringCerts(timeWindow) - if len(expired) > 0 { - } s.NotNil(expiring) - s.Nil(err) + s.Empty(expired) + s.NoError(err) s.Equal(nExpiring, len(expiring)) } diff --git a/common/searchattribute/encode_value_test.go b/common/searchattribute/encode_value_test.go index cc611efc71e..6ca7652f6b7 100644 --- a/common/searchattribute/encode_value_test.go +++ b/common/searchattribute/encode_value_test.go @@ -74,6 +74,7 @@ func Test_DecodeValue_FromMetadata_Success(t *testing.T) { var expectedEncodedRepresentation = "2022-03-07T21:27:35.986848-05:00" timeValue, err := time.Parse(time.RFC3339, expectedEncodedRepresentation) + assert.NoError(err) payloadDatetime, err := payload.Encode(timeValue) assert.NoError(err) payloadDatetime.Metadata["type"] = []byte("Datetime") @@ -116,6 +117,7 @@ func Test_DecodeValue_FromParameter_Success(t *testing.T) { var expectedEncodedRepresentation = "2022-03-07T21:27:35.986848-05:00" timeValue, err := time.Parse(time.RFC3339, expectedEncodedRepresentation) + assert.NoError(err) payloadDatetime, err := payload.Encode(timeValue) assert.NoError(err) decodedDatetime, err := DecodeValue(payloadDatetime, enumspb.INDEXED_VALUE_TYPE_DATETIME) diff --git a/common/testing/event_generator.go b/common/testing/event_generator.go index fb9bf26c48a..4a064967bf4 100644 --- a/common/testing/event_generator.go +++ b/common/testing/event_generator.go @@ -81,8 +81,6 @@ type ( // RevokeFunc is the condition inside edge // The function used to check if the edge is accessible at a certain state RevokeFunc struct { - methodName string //nolint - input []interface{} //nolint } ) @@ -360,12 +358,6 @@ func (g *EventGenerator) pickRandomVertex( return endVertex.DeepCopy() } -func (g *EventGenerator) shouldBumpVersion() bool { - // 1//1000 to bump the version - //return g.dice.Intn(1000) == 500 - return false -} - // NewHistoryEventEdge initials a new edge between two HistoryEventVertexes func NewHistoryEventEdge( start Vertex, @@ -419,7 +411,7 @@ func (c HistoryEventEdge) GetCondition() func(...interface{}) bool { } // SetAction sets an action to perform when the end vertex hits -func (c HistoryEventEdge) SetAction(action func()) { +func (c *HistoryEventEdge) SetAction(action func()) { c.action = action } @@ -468,12 +460,12 @@ func (he *HistoryEventVertex) SetName( } // Equals compares two vertex -//func (he *HistoryEventVertex) Equals( +// func (he *HistoryEventVertex) Equals( // v Vertex, -//) bool { +// ) bool { // // return strings.EqualFold(he.name, v.GetName()) && he.data == v.GetData() -//} +// } // SetIsStrictOnNextVertex sets if a vertex can be added between the current vertex and its child Vertices func (he *HistoryEventVertex) SetIsStrictOnNextVertex( diff --git a/common/timer/local_gate_test.go b/common/timer/local_gate_test.go index 4c95a354b40..8ef05fb67ec 100644 --- a/common/timer/local_gate_test.go +++ b/common/timer/local_gate_test.go @@ -145,9 +145,8 @@ func (s *localGateSuite) TestTimerFireAfterUpdate_NotActive_Updated() { deadline := now.Add(3 * time.Second) s.localTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.localTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.localTimerGate.FireChan() // test setup up complete s.True(s.localTimerGate.Update(updatedNewTimer)) @@ -165,9 +164,8 @@ func (s *localGateSuite) TestTimerFireAfterUpdate_NotActive_NotUpdated() { deadline := now.Add(1 * time.Second) s.localTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.localTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.localTimerGate.FireChan() // test setup up complete s.True(s.localTimerGate.Update(updatedNewTimer)) diff --git a/common/timer/remote_gate_test.go b/common/timer/remote_gate_test.go index 50f65e8fa6f..ce4877c3e7d 100644 --- a/common/timer/remote_gate_test.go +++ b/common/timer/remote_gate_test.go @@ -159,9 +159,8 @@ func (s *remoteGateSuite) TestTimerFireAfterUpdate_NotActive_Updated() { deadline := now.Add(2 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() // test setup up complete s.True(s.remoteTimerGate.Update(updatedNewTimer)) @@ -185,9 +184,8 @@ func (s *remoteGateSuite) TestTimerFireAfterUpdate_NotActive_NotUpdated() { updatedNewTimer := now.Add(-1 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() // test setup up complete s.True(s.remoteTimerGate.Update(updatedNewTimer)) @@ -215,9 +213,8 @@ func (s *remoteGateSuite) TestTimerSetCurrentTime_Update_TimerAlreadyFired() { newCurrentTime := now.Add(1 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() // test setup up complete s.True(s.remoteTimerGate.SetCurrentTime(newCurrentTime)) diff --git a/host/activity_test.go b/host/activity_test.go index 0bf6fb3eeb1..b76448ef802 100644 --- a/host/activity_test.go +++ b/host/activity_test.go @@ -36,6 +36,7 @@ import ( "go.temporal.io/sdk/workflow" "go.temporal.io/server/common/convert" + "go.temporal.io/server/service/history/consts" "github.com/pborman/uuid" enumspb "go.temporal.io/api/enums/v1" @@ -588,6 +589,9 @@ func (s *integrationSuite) TestActivityHeartBeatWorkflow_Timeout() { s.True(err == nil || err == matching.ErrNoTasks) err = poller.PollAndProcessActivityTask(false) + // Not s.ErrorIs() because error goes through RPC. + s.IsType(consts.ErrActivityTaskNotFound, err) + s.Equal(consts.ErrActivityTaskNotFound.Error(), err.Error()) s.Logger.Info("Waiting for workflow to complete", tag.WorkflowRunID(we.RunId)) diff --git a/host/client_integration_test.go b/host/client_integration_test.go index 7a9a20f6b6f..4bd997c18f5 100644 --- a/host/client_integration_test.go +++ b/host/client_integration_test.go @@ -41,7 +41,6 @@ import ( "github.com/stretchr/testify/suite" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" - historypb "go.temporal.io/api/history/v1" "go.temporal.io/sdk/activity" sdkclient "go.temporal.io/sdk/client" "go.temporal.io/sdk/converter" @@ -50,7 +49,6 @@ import ( "go.temporal.io/sdk/workflow" "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/common" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/rpc" ) @@ -960,16 +958,17 @@ func (s *clientIntegrationSuite) Test_BufferedQuery() { s.NoError(err) } -func (s *clientIntegrationSuite) printHistory(workflowID string, runID string) { - iter := s.sdkClient.GetWorkflowHistory(context.Background(), workflowID, runID, false, 0) - history := &historypb.History{} - for iter.HasNext() { - event, err := iter.Next() - s.NoError(err) - history.Events = append(history.Events, event) - } - common.PrettyPrintHistory(history, s.Logger) -} +// Uncomment if you need to debug history. +// func (s *clientIntegrationSuite) printHistory(workflowID string, runID string) { +// iter := s.sdkClient.GetWorkflowHistory(context.Background(), workflowID, runID, false, 0) +// history := &historypb.History{} +// for iter.HasNext() { +// event, err := iter.Next() +// s.NoError(err) +// history.Events = append(history.Events, event) +// } +// common.PrettyPrintHistory(history, s.Logger) +// } func (s *clientIntegrationSuite) assertHistory(wid, rid string, expected []enumspb.EventType) { iter := s.sdkClient.GetWorkflowHistory(context.Background(), wid, rid, false, 0) diff --git a/host/continue_as_new_test.go b/host/continue_as_new_test.go index 864e9521035..89b6eff54e2 100644 --- a/host/continue_as_new_test.go +++ b/host/continue_as_new_test.go @@ -351,7 +351,7 @@ func (s *integrationSuite) TestContinueAsNewWorkflow_Timeout() { // Only PollForWorkflowTask if the last event is WorkflowTaskScheduled and we have at least 2 seconds left // (to account for potential delay from queueing and matching task forwarding). expiration := firstEvent.GetWorkflowExecutionStartedEventAttributes().GetWorkflowExecutionExpirationTime() - timeLeft := expiration.Sub(time.Now()) + timeLeft := time.Until(*expiration) if lastEvent.GetEventType() == enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED && timeLeft > 2*time.Second { s.Logger.Info(fmt.Sprintf("Execution not timed out yet. PollForWorkflowTask. Last event is %v", lastEvent)) _, err := poller.PollAndProcessWorkflowTaskWithoutRetry(false, false) diff --git a/host/cron_test.go b/host/cron_test.go index 9ba86796587..ac3d0457758 100644 --- a/host/cron_test.go +++ b/host/cron_test.go @@ -506,7 +506,6 @@ func (s *clientIntegrationSuite) TestCronWorkflowCompletionStates() { // wait for fifth run s.Equal(<-wfCh, 5) s.DurationNear(time.Since(ts), targetBackoffDuration, tolerance) - ts = time.Now() // let fifth run finish and sixth get scheduled time.Sleep(500 * time.Millisecond) diff --git a/host/onebox.go b/host/onebox.go index c3126d83229..1a9c08bdac6 100644 --- a/host/onebox.go +++ b/host/onebox.go @@ -72,8 +72,6 @@ import ( "go.temporal.io/server/service/history/workflow" "go.temporal.io/server/service/matching" "go.temporal.io/server/service/worker" - "go.temporal.io/server/service/worker/archiver" - "go.temporal.io/server/service/worker/replicator" "go.temporal.io/server/temporal" ) @@ -122,8 +120,6 @@ type ( shutdownCh chan struct{} shutdownWG sync.WaitGroup clusterNo int // cluster number - replicator *replicator.Replicator - clientWorker archiver.ClientWorker archiverMetadata carchiver.ArchivalMetadata archiverProvider provider.ArchiverProvider historyConfig *HistoryConfig @@ -806,7 +802,6 @@ type rpcFactoryImpl struct { sync.RWMutex listener net.Listener ringpopChannel *tchannel.Channel - serverCfg config.GroupTLS } func (c *rpcFactoryImpl) GetFrontendGRPCServerOptions() ([]grpc.ServerOption, error) { diff --git a/host/rate_limit_test.go b/host/rate_limit_test.go index ca7fcffa6ff..4690e268d4a 100644 --- a/host/rate_limit_test.go +++ b/host/rate_limit_test.go @@ -123,7 +123,8 @@ func (s *integrationSuite) TestTaskProcessingProtectionForRateLimitError() { // Send one signal to create a new workflow task buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 0) + err = binary.Write(buf, binary.LittleEndian, byte(0)) + s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "SignalName", payloads.EncodeBytes(buf.Bytes()), identity)) // Drop workflow task to cause all events to be buffered from now on @@ -134,15 +135,17 @@ func (s *integrationSuite) TestTaskProcessingProtectionForRateLimitError() { // Buffered 100 Signals for i := 1; i < 101; i++ { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, i) + err := binary.Write(buf, binary.LittleEndian, byte(i)) + s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "SignalName", payloads.EncodeBytes(buf.Bytes()), identity)) } // 101 signal, which will fail the workflow task buf = new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 101) + err = binary.Write(buf, binary.LittleEndian, byte(101)) + s.NoError(err) signalErr := s.sendSignal(s.namespace, workflowExecution, "SignalName", payloads.EncodeBytes(buf.Bytes()), identity) - s.Nil(signalErr) + s.NoError(signalErr) // Process signal in workflow _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, false, 1) diff --git a/host/stickytq_test.go b/host/stickytq_test.go index 4ed095c7887..bbf69a37cf4 100644 --- a/host/stickytq_test.go +++ b/host/stickytq_test.go @@ -143,6 +143,7 @@ func (s *integrationSuite) TestStickyTimeout_NonTransientWorkflowTask() { Identity: identity, RequestId: uuid.New(), }) + s.NoError(err) // Wait for workflow task timeout stickyTimeout := false @@ -194,6 +195,7 @@ WaitForStickyTimeoutLoop: // Complete workflow execution _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, 3) + s.NoError(err) // Assert for single workflow task failed and workflow completion failedWorkflowTasks := 0 @@ -301,12 +303,14 @@ func (s *integrationSuite) TestStickyTaskqueueResetThenTimeout() { Identity: identity, RequestId: uuid.New(), }) + s.NoError(err) // Reset sticky taskqueue before sticky workflow task starts - s.engine.ResetStickyTaskQueue(NewContext(), &workflowservice.ResetStickyTaskQueueRequest{ + _, err = s.engine.ResetStickyTaskQueue(NewContext(), &workflowservice.ResetStickyTaskQueueRequest{ Namespace: s.namespace, Execution: workflowExecution, }) + s.NoError(err) // Wait for workflow task timeout stickyTimeout := false @@ -358,6 +362,7 @@ WaitForStickyTimeoutLoop: // Complete workflow execution _, err = poller.PollAndProcessWorkflowTaskWithAttempt(true, false, false, true, 3) + s.NoError(err) // Assert for single workflow task failed and workflow completion failedWorkflowTasks := 0 diff --git a/host/taskpoller.go b/host/taskpoller.go index 25d4c8dbdc6..f400b6dda1f 100644 --- a/host/taskpoller.go +++ b/host/taskpoller.go @@ -178,11 +178,13 @@ Loop: history := response.History if history == nil { p.Logger.Fatal("History is nil") + return false, nil, errors.New("history is nil") } events = history.Events - if events == nil || len(events) == 0 { + if len(events) == 0 { p.Logger.Fatal("History Events are empty") + return false, nil, errors.New("history events are empty") } nextPageToken := response.NextPageToken @@ -310,11 +312,13 @@ func (p *TaskPoller) HandlePartialWorkflowTask(response *workflowservice.PollWor history := response.History if history == nil { p.Logger.Fatal("History is nil") + return nil, errors.New("history is nil") } events = history.Events - if events == nil || len(events) == 0 { + if len(events) == 0 { p.Logger.Fatal("History Events are empty") + return nil, errors.New("history events are empty") } commands, err := p.WorkflowTaskHandler(response.WorkflowExecution, response.WorkflowType, diff --git a/host/workflow_buffered_events_test.go b/host/workflow_buffered_events_test.go index b33189cc285..38550a8ad5a 100644 --- a/host/workflow_buffered_events_test.go +++ b/host/workflow_buffered_events_test.go @@ -91,16 +91,18 @@ func (s *integrationSuite) TestRateLimitBufferedEvents() { // Buffered Signals for i := 0; i < 100; i++ { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, i) + err := binary.Write(buf, binary.LittleEndian, byte(i)) + s.NoError(err) s.Nil(s.sendSignal(s.namespace, workflowExecution, "SignalName", payloads.EncodeBytes(buf.Bytes()), identity)) } buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 101) + err := binary.Write(buf, binary.LittleEndian, byte(101)) + s.NoError(err) signalErr := s.sendSignal(s.namespace, workflowExecution, "SignalName", payloads.EncodeBytes(buf.Bytes()), identity) - s.Nil(signalErr) + s.NoError(signalErr) - // this command will be ignored as he workflow task is already failed + // this command will be ignored as workflow task has already failed return []*commandpb.Command{}, nil } diff --git a/host/xdc/integration_failover_test.go b/host/xdc/integration_failover_test.go index ca8813b0cb8..f281ebcf541 100644 --- a/host/xdc/integration_failover_test.go +++ b/host/xdc/integration_failover_test.go @@ -1972,14 +1972,15 @@ func (s *integrationClustersTestSuite) TestActivityHeartbeatFailover() { s.Equal(2, lastAttemptCount) } -func (s *integrationClustersTestSuite) printHistory(frontendClient workflowservice.WorkflowServiceClient, namespace, workflowID, runID string) { - events := s.getHistory(frontendClient, namespace, &commonpb.WorkflowExecution{ - WorkflowId: workflowID, - RunId: runID, - }) - history := &historypb.History{Events: events} - common.PrettyPrintHistory(history, s.logger) -} +// Uncomment if you need to debug history. +// func (s *integrationClustersTestSuite) printHistory(frontendClient workflowservice.WorkflowServiceClient, namespace, workflowID, runID string) { +// events := s.getHistory(frontendClient, namespace, &commonpb.WorkflowExecution{ +// WorkflowId: workflowID, +// RunId: runID, +// }) +// history := &historypb.History{Events: events} +// common.PrettyPrintHistory(history, s.logger) +// } func (s *integrationClustersTestSuite) TestLocalNamespaceMigration() { testCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) @@ -2237,6 +2238,7 @@ func (s *integrationClustersTestSuite) TestLocalNamespaceMigration() { HostPort: s.cluster1.GetHost().FrontendGRPCAddress(), Namespace: "temporal-system", }) + s.NoError(err) workflowID4 := "force-replication-wf-4" run4, err := sysClient.ExecuteWorkflow(testCtx, sdkclient.StartWorkflowOptions{ ID: workflowID4, @@ -2353,6 +2355,7 @@ func (s *integrationClustersTestSuite) TestForceMigration_ClosedWorkflow() { HostPort: s.cluster1.GetHost().FrontendGRPCAddress(), Namespace: "temporal-system", }) + s.NoError(err) forceReplicationWorkflowID := "force-replication-wf" sysWfRun, err := sysClient.ExecuteWorkflow(testCtx, sdkclient.StartWorkflowOptions{ ID: forceReplicationWorkflowID, @@ -2491,6 +2494,7 @@ func (s *integrationClustersTestSuite) TestForceMigration_ResetWorkflow() { HostPort: s.cluster1.GetHost().FrontendGRPCAddress(), Namespace: "temporal-system", }) + s.NoError(err) forceReplicationWorkflowID := "force-replication-wf" sysWfRun, err := sysClient.ExecuteWorkflow(testCtx, sdkclient.StartWorkflowOptions{ ID: forceReplicationWorkflowID, diff --git a/schema/sqlite/setup.go b/schema/sqlite/setup.go index 3a13b4c714b..abfaf099720 100644 --- a/schema/sqlite/setup.go +++ b/schema/sqlite/setup.go @@ -121,7 +121,7 @@ func CreateNamespaces(cfg *config.SQL, namespaces ...*NamespaceConfig) error { for _, ns := range namespaces { if err := createNamespaceIfNotExists(db, ns); err != nil { - return fmt.Errorf("error creating namespace %q: %w", ns, err) + return fmt.Errorf("error creating namespace %q: %w", ns.Detail.Info.Name, err) } } diff --git a/service/frontend/dcRedirectionHandler.go b/service/frontend/dcRedirectionHandler.go index f2ebaaf76b7..364c2e93ef1 100644 --- a/service/frontend/dcRedirectionHandler.go +++ b/service/frontend/dcRedirectionHandler.go @@ -1589,14 +1589,15 @@ func (handler *DCRedirectionHandlerImpl) UpdateWorkerBuildIdOrdering( if targetDC == handler.currentClusterName { resp, err = handler.frontendHandler.UpdateWorkerBuildIdOrdering(ctx, request) + return err } else { remoteClient, err := handler.clientBean.GetRemoteFrontendClient(targetDC) if err != nil { return err } resp, err = remoteClient.UpdateWorkerBuildIdOrdering(ctx, request) + return err } - return err }) return resp, err @@ -1621,14 +1622,15 @@ func (handler *DCRedirectionHandlerImpl) GetWorkerBuildIdOrdering( switch { case targetDC == handler.currentClusterName: resp, err = handler.frontendHandler.GetWorkerBuildIdOrdering(ctx, request) + return err default: remoteClient, err := handler.clientBean.GetRemoteFrontendClient(targetDC) if err != nil { return err } resp, err = remoteClient.GetWorkerBuildIdOrdering(ctx, request) + return err } - return err }) return resp, err diff --git a/service/frontend/operator_handler.go b/service/frontend/operator_handler.go index 5f98ad37d09..6d86270002d 100644 --- a/service/frontend/operator_handler.go +++ b/service/frontend/operator_handler.go @@ -60,7 +60,6 @@ type ( OperatorHandlerImpl struct { status int32 - healthStatus int32 logger log.Logger config *Config esConfig *esclient.Config diff --git a/service/frontend/workflowHandler.go b/service/frontend/workflowHandler.go index 6f7d1d1c9ee..0adcf8c1ab2 100644 --- a/service/frontend/workflowHandler.go +++ b/service/frontend/workflowHandler.go @@ -3349,6 +3349,9 @@ func (wh *WorkflowHandler) UpdateSchedule(ctx context.Context, request *workflow input.ConflictToken = int64(binary.BigEndian.Uint64(request.ConflictToken)) } inputPayloads, err := payloads.Encode(input) + if err != nil { + return nil, err + } sizeLimitError := wh.config.BlobSizeLimitError(request.GetNamespace()) sizeLimitWarn := wh.config.BlobSizeLimitWarn(request.GetNamespace()) @@ -3419,6 +3422,9 @@ func (wh *WorkflowHandler) PatchSchedule(ctx context.Context, request *workflows } inputPayloads, err := payloads.Encode(request.Patch) + if err != nil { + return nil, err + } sizeLimitError := wh.config.BlobSizeLimitError(request.GetNamespace()) sizeLimitWarn := wh.config.BlobSizeLimitWarn(request.GetNamespace()) @@ -4174,10 +4180,6 @@ func (wh *WorkflowHandler) verifyHistoryIsComplete( pageSize)) } -func (wh *WorkflowHandler) isFailoverRequest(updateRequest *workflowservice.UpdateNamespaceRequest) bool { - return updateRequest.ReplicationConfig != nil && updateRequest.ReplicationConfig.GetActiveClusterName() != "" -} - func (wh *WorkflowHandler) historyArchived(ctx context.Context, request *workflowservice.GetWorkflowExecutionHistoryRequest, namespaceID namespace.ID) bool { if request.GetExecution() == nil || request.GetExecution().GetRunId() == "" { return false diff --git a/service/history/api/update_workflow_util.go b/service/history/api/update_workflow_util.go index 44c117e8d85..c06e0de487e 100644 --- a/service/history/api/update_workflow_util.go +++ b/service/history/api/update_workflow_util.go @@ -44,7 +44,6 @@ func UpdateWorkflowWithNew( newWorkflowFn func() (workflow.Context, workflow.MutableState, error), ) (retError error) { -UpdateHistoryLoop: for attempt := 1; attempt <= conditionalRetryCount; attempt++ { // conduct caller action postActions, err := action(workflowContext) @@ -59,7 +58,7 @@ UpdateHistoryLoop: return err } } - continue UpdateHistoryLoop + continue } // Returned error back to the caller @@ -81,10 +80,9 @@ UpdateHistoryLoop: } } + var updateErr error if newWorkflowFn != nil { - var newContext workflow.Context - var newMutableState workflow.MutableState - newContext, newMutableState, err = newWorkflowFn() + newContext, newMutableState, err := newWorkflowFn() if err != nil { return err } @@ -96,29 +94,29 @@ UpdateHistoryLoop: return err } - err = workflowContext.GetContext().UpdateWorkflowExecutionWithNewAsActive( + updateErr = workflowContext.GetContext().UpdateWorkflowExecutionWithNewAsActive( ctx, shard.GetTimeSource().Now(), newContext, newMutableState, ) } else { - err = workflowContext.GetContext().UpdateWorkflowExecutionAsActive( + updateErr = workflowContext.GetContext().UpdateWorkflowExecutionAsActive( ctx, shard.GetTimeSource().Now(), ) } - if err == consts.ErrConflict { + if updateErr == consts.ErrConflict { if attempt != conditionalRetryCount { _, err = workflowContext.ReloadMutableState(ctx) if err != nil { return err } } - continue UpdateHistoryLoop + continue } - return err + return updateErr } return consts.ErrMaxAttemptsExceeded } diff --git a/service/history/events/notifier.go b/service/history/events/notifier.go index ca65cd77b89..ccfda851ca3 100644 --- a/service/history/events/notifier.go +++ b/service/history/events/notifier.go @@ -199,7 +199,7 @@ func (notifier *NotifierImpl) dispatchHistoryEventNotification(event *Notificati timer := notifier.metrics.StartTimer(metrics.HistoryEventNotificationScope, metrics.HistoryEventNotificationFanoutLatency) defer timer.Stop() - notifier.eventsPubsubs.GetAndDo(identifier, func(key interface{}, value interface{}) error { //nolint:errcheck + _, _, _ = notifier.eventsPubsubs.GetAndDo(identifier, func(key interface{}, value interface{}) error { subscribers := value.(map[string]chan *Notification) for _, channel := range subscribers { diff --git a/service/history/events/notifier_test.go b/service/history/events/notifier_test.go index a921138f3f2..0bd2003cd26 100644 --- a/service/history/events/notifier_test.go +++ b/service/history/events/notifier_test.go @@ -105,10 +105,8 @@ func (s *notifierSuite) TestSingleSubscriberWatchingEvents() { s.notifier.NotifyNewHistoryEvent(historyEvent) }() - select { - case msg := <-channel: - s.Equal(historyEvent, msg) - } + msg := <-channel + s.Equal(historyEvent, msg) err = s.notifier.UnwatchHistoryEvent(definition.NewWorkflowKey(namespaceID, execution.GetWorkflowId(), execution.GetRunId()), subscriberID) s.Nil(err) diff --git a/service/history/handler.go b/service/history/handler.go index 1bb0344f4cd..6888fad3d6f 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -143,7 +143,6 @@ var ( errSourceClusterNotSet = serviceerror.NewInvalidArgument("Source Cluster not set on request.") errShardIDNotSet = serviceerror.NewInvalidArgument("ShardId not set on request.") errTimestampNotSet = serviceerror.NewInvalidArgument("Timestamp not set on request.") - errInvalidTaskType = serviceerror.NewInvalidArgument("Invalid task type") errDeserializeTaskTokenMessage = "Error to deserialize task token. Error: %v." diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index e49217c4c22..5c153a7e91a 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -398,8 +398,7 @@ func (e *historyEngineImpl) registerNamespaceFailoverCallback() { e.NotifyNewTasks(e.currentClusterName, fakeTasks) } - // nolint:errcheck - e.shard.UpdateNamespaceNotificationVersion(newNotificationVersion) + _ = e.shard.UpdateNamespaceNotificationVersion(newNotificationVersion) }, ) } @@ -651,7 +650,7 @@ func (e *historyEngineImpl) getMutableStateOrPolling( if err != nil { return nil, err } - defer e.eventNotifier.UnwatchHistoryEvent(workflowKey, subscriberID) // nolint:errcheck + defer func() { _ = e.eventNotifier.UnwatchHistoryEvent(workflowKey, subscriberID) }() // check again in case the next event ID is updated response, err = e.getMutableState(ctx, workflowKey) if err != nil { diff --git a/service/history/historyEngine3_eventsv2_test.go b/service/history/historyEngine3_eventsv2_test.go index 5aeae41ce6b..9450384908c 100644 --- a/service/history/historyEngine3_eventsv2_test.go +++ b/service/history/historyEngine3_eventsv2_test.go @@ -50,7 +50,6 @@ import ( "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/persistence" - p "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/service/history/api" "go.temporal.io/server/service/history/configs" @@ -113,7 +112,7 @@ func (s *engine3Suite) SetupTest() { s.mockShard = shard.NewTestContext( s.controller, - &p.ShardInfoWithFailover{ShardInfo: &persistencespb.ShardInfo{ + &persistence.ShardInfoWithFailover{ShardInfo: &persistencespb.ShardInfo{ ShardId: 1, RangeId: 1, }}, @@ -190,7 +189,7 @@ func (s *engine3Suite) TestRecordWorkflowTaskStartedSuccessStickyEnabled() { ms := workflow.TestCloneToProto(msBuilder) - gwmsResponse := &p.GetWorkflowExecutionResponse{State: ms} + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) @@ -312,8 +311,8 @@ func (s *engine3Suite) TestSignalWithStartWorkflowExecution_JustSignal() { }, "wType", "testTaskQueue", payloads.EncodeString("input"), 25*time.Second, 20*time.Second, 200*time.Second, identity) _ = addWorkflowTaskScheduledEvent(msBuilder) ms := workflow.TestCloneToProto(msBuilder) - gwmsResponse := &p.GetWorkflowExecutionResponse{State: ms} - gceResponse := &p.GetCurrentExecutionResponse{RunID: runID} + gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} + gceResponse := &persistence.GetCurrentExecutionResponse{RunID: runID} s.mockExecutionMgr.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()).Return(gceResponse, nil) s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) diff --git a/service/history/historyEngine_test.go b/service/history/historyEngine_test.go index 281e31b1a9d..e0163273bc3 100644 --- a/service/history/historyEngine_test.go +++ b/service/history/historyEngine_test.go @@ -5340,7 +5340,7 @@ func addWorkflowTaskCompletedEvent(builder workflow.MutableState, scheduledEvent Identity: identity, }, configs.DefaultHistoryMaxAutoResetPoints) - builder.FlushBufferedEvents() // nolint:errcheck + builder.FlushBufferedEvents() return event } diff --git a/service/history/nDCBranchMgr_test.go b/service/history/nDCBranchMgr_test.go index ba38fa7eb26..33562e523a0 100644 --- a/service/history/nDCBranchMgr_test.go +++ b/service/history/nDCBranchMgr_test.go @@ -263,6 +263,7 @@ func (s *nDCBranchMgrSuite) TestFlushBufferedEvents() { ctx := context.Background() _, _, err = s.nDCBranchMgr.flushBufferedEvents(ctx, incomingVersionHistory) + s.NoError(err) } func (s *nDCBranchMgrSuite) TestPrepareVersionHistory_BranchAppendable_NoMissingEventInBetween() { diff --git a/service/history/nDCHistoryReplicator.go b/service/history/nDCHistoryReplicator.go index 92f780e247e..843a0ea71bc 100644 --- a/service/history/nDCHistoryReplicator.go +++ b/service/history/nDCHistoryReplicator.go @@ -61,7 +61,6 @@ import ( var ( workflowTerminationReason = "Terminate Workflow Due To Version Conflict." workflowTerminationIdentity = "worker-service" - workflowResetReason = "Reset Workflow Due To Events Re-application." ) const ( diff --git a/service/history/nDCHistoryReplicator_test.go b/service/history/nDCHistoryReplicator_test.go index 3d4ac1d3339..f8005b54657 100644 --- a/service/history/nDCHistoryReplicator_test.go +++ b/service/history/nDCHistoryReplicator_test.go @@ -30,6 +30,7 @@ import ( "time" historypb "go.temporal.io/api/history/v1" + "go.temporal.io/server/common" "go.temporal.io/server/service/history/events" @@ -40,6 +41,7 @@ import ( commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/adminservicemock/v1" enumsspb "go.temporal.io/server/api/enums/v1" @@ -70,10 +72,9 @@ type ( mockExecutionManager *persistence.MockExecutionManager logger log.Logger - namespaceID namespace.ID - workflowID string - runID string - now time.Time + workflowID string + runID string + now time.Time historyReplicator *nDCHistoryReplicatorImpl } @@ -88,7 +89,7 @@ func (s *nDCHistoryReplicatorSuite) SetupTest() { s.Assertions = require.New(s.T()) s.controller = gomock.NewController(s.T()) - //s.mockTaskRefresher = workflow.NewMockTaskRefresher(s.controller) + // s.mockTaskRefresher = workflow.NewMockTaskRefresher(s.controller) s.mockShard = shard.NewTestContext( s.controller, diff --git a/service/history/nDCReplicationTask.go b/service/history/nDCReplicationTask.go index 8d1b67deb91..c014fc3cc26 100644 --- a/service/history/nDCReplicationTask.go +++ b/service/history/nDCReplicationTask.go @@ -346,10 +346,7 @@ func validateReplicateEventsRequest( } func validateUUID(input string) bool { - if uuid.Parse(input) == nil { - return false - } - return true + return uuid.Parse(input) != nil } func validateEvents(events []*historypb.HistoryEvent) (int64, error) { diff --git a/service/history/nDCWorkflowResetter.go b/service/history/nDCWorkflowResetter.go index 6051875bbea..d65a1add404 100644 --- a/service/history/nDCWorkflowResetter.go +++ b/service/history/nDCWorkflowResetter.go @@ -126,6 +126,9 @@ func (r *nDCWorkflowResetterImpl) resetWorkflow( } resetBranchToken, err := r.getResetBranchToken(ctx, baseBranchToken, baseLastEventID) + if err != nil { + return nil, err + } requestID := uuid.New() rebuildMutableState, rebuiltHistorySize, err := r.stateRebuilder.rebuild( diff --git a/service/history/replication/task_processor_manager.go b/service/history/replication/task_processor_manager.go index 87eeb1ad260..9d165f807b1 100644 --- a/service/history/replication/task_processor_manager.go +++ b/service/history/replication/task_processor_manager.go @@ -28,13 +28,11 @@ import ( "context" "sync" "sync/atomic" - "time" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/client" "go.temporal.io/server/client/history" "go.temporal.io/server/common" - "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -154,30 +152,6 @@ func (r *taskProcessorManagerImpl) Stop() { r.taskProcessorLock.Unlock() } -func (r *taskProcessorManagerImpl) completeReplicationTaskLoop() { - shardID := r.shard.GetShardID() - cleanupTimer := time.NewTimer(backoff.JitDuration( - r.config.ReplicationTaskProcessorCleanupInterval(shardID), - r.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID), - )) - defer cleanupTimer.Stop() - for { - select { - case <-cleanupTimer.C: - if err := r.cleanupReplicationTasks(); err != nil { - r.logger.Error("Failed to clean up replication messages.", tag.Error(err)) - r.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupFailure) - } - cleanupTimer.Reset(backoff.JitDuration( - r.config.ReplicationTaskProcessorCleanupInterval(shardID), - r.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID), - )) - case <-r.shutdownChan: - return - } - } -} - func (r *taskProcessorManagerImpl) listenToClusterMetadataChange() { clusterMetadata := r.shard.GetClusterMetadata() clusterMetadata.RegisterMetadataChangeCallback( diff --git a/service/history/replication/task_processor_manager_test.go b/service/history/replication/task_processor_manager_test.go index 364b274a99e..29f12f34598 100644 --- a/service/history/replication/task_processor_manager_test.go +++ b/service/history/replication/task_processor_manager_test.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "go.temporal.io/server/api/adminservicemock/v1" "go.temporal.io/server/api/historyservicemock/v1" "go.temporal.io/server/client" "go.temporal.io/server/common/cluster" @@ -56,9 +55,7 @@ type ( controller *gomock.Controller mockShard *shard.MockContext mockEngine *shard.MockEngine - mockNamespaceCache *namespace.MockRegistry mockClientBean *client.MockBean - mockAdminClient *adminservicemock.MockAdminServiceClient mockClusterMetadata *cluster.MockMetadata mockHistoryClient *historyservicemock.MockHistoryServiceClient mockReplicationTaskExecutor *MockTaskExecutor diff --git a/service/history/service.go b/service/history/service.go index acca04e7fbe..eddd0ba901a 100644 --- a/service/history/service.go +++ b/service/history/service.go @@ -147,7 +147,7 @@ func (s *Service) Stop() { remainingTime = s.sleep(shardOwnershipTransferDelay, remainingTime) logger.Info("ShutdownHandler: No longer taking rpc requests") - remainingTime = s.sleep(gracePeriod, remainingTime) + _ = s.sleep(gracePeriod, remainingTime) // TODO: Change this to GracefulStop when integration tests are refactored. s.server.Stop() diff --git a/service/history/timerQueueProcessor.go b/service/history/timerQueueProcessor.go index bf080b0b1be..7ba82472da5 100644 --- a/service/history/timerQueueProcessor.go +++ b/service/history/timerQueueProcessor.go @@ -289,8 +289,8 @@ func (t *timerQueueProcessorImpl) completeTimersLoop() { for { select { case <-t.shutdownChan: - // before shutdown, make sure the ack level is up to date - t.completeTimers() //nolint:errcheck + // before shutdown, make sure the ack level is up-to-date + _ = t.completeTimers() return case <-timer.C: CompleteLoop: diff --git a/service/history/timerQueueStandbyProcessor.go b/service/history/timerQueueStandbyProcessor.go index afbd0d56a3f..2701e801678 100644 --- a/service/history/timerQueueStandbyProcessor.go +++ b/service/history/timerQueueStandbyProcessor.go @@ -201,11 +201,6 @@ func (t *timerQueueStandbyProcessorImpl) getAckLevel() tasks.Key { return t.timerQueueProcessorBase.timerQueueAckMgr.getAckLevel() } -//nolint:unused -func (t *timerQueueStandbyProcessorImpl) getReadLevel() tasks.Key { - return t.timerQueueProcessorBase.timerQueueAckMgr.getReadLevel() -} - // NotifyNewTimers - Notify the processor about the new standby timer events arrival. // This should be called each time new timer events arrives, otherwise timers maybe fired unexpected. func (t *timerQueueStandbyProcessorImpl) notifyNewTimers( diff --git a/service/history/timerQueueStandbyTaskExecutor.go b/service/history/timerQueueStandbyTaskExecutor.go index b0c1f9061da..1057e362af3 100644 --- a/service/history/timerQueueStandbyTaskExecutor.go +++ b/service/history/timerQueueStandbyTaskExecutor.go @@ -123,9 +123,9 @@ func (t *timerQueueStandbyTaskExecutor) executeUserTimerTimeoutTask( ) error { actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { timerSequence := t.getTimerSequence(mutableState) - - Loop: - for _, timerSequenceID := range timerSequence.LoadAndSortUserTimers() { + timerSequenceIDs := timerSequence.LoadAndSortUserTimers() + if len(timerSequenceIDs) > 0 { + timerSequenceID := timerSequenceIDs[0] _, ok := mutableState.GetUserTimerInfoByEventID(timerSequenceID.EventID) if !ok { errString := fmt.Sprintf("failed to find in user timer event ID: %v", timerSequenceID.EventID) @@ -139,11 +139,10 @@ func (t *timerQueueStandbyTaskExecutor) executeUserTimerTimeoutTask( ); isExpired { return getHistoryResendInfo(mutableState) } - // since the user timer are already sorted, so if there is one timer which will not expired - // all user timer after this timer will not expired - break Loop //nolint:staticcheck + // Since the user timers are already sorted, then if there is one timer which is not expired, + // all user timers after that timer are not expired. } - // if there is no user timer expired, then we are good + // If there is no user timer expired, then we are good. return nil, nil } @@ -181,9 +180,9 @@ func (t *timerQueueStandbyTaskExecutor) executeActivityTimeoutTask( actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { timerSequence := t.getTimerSequence(mutableState) updateMutableState := false - - Loop: - for _, timerSequenceID := range timerSequence.LoadAndSortActivityTimers() { + timerSequenceIDs := timerSequence.LoadAndSortActivityTimers() + if len(timerSequenceIDs) > 0 { + timerSequenceID := timerSequenceIDs[0] _, ok := mutableState.GetActivityInfo(timerSequenceID.EventID) if !ok { errString := fmt.Sprintf("failed to find in memory activity timer: %v", timerSequenceID.EventID) @@ -197,9 +196,8 @@ func (t *timerQueueStandbyTaskExecutor) executeActivityTimeoutTask( ); isExpired { return getHistoryResendInfo(mutableState) } - // since the activity timer are already sorted, so if there is one timer which will not expired - // all activity timer after this timer will not expire - break Loop //nolint:staticcheck + // Since the activity timers are already sorted, then if there is one timer which is not expired, + // all activity timers after that timer are not expired. } // for reason to update mutable state & generate a new activity task, diff --git a/service/history/visibilityQueueTaskExecutor_test.go b/service/history/visibilityQueueTaskExecutor_test.go index 47d588fbde2..a8052a51eb1 100644 --- a/service/history/visibilityQueueTaskExecutor_test.go +++ b/service/history/visibilityQueueTaskExecutor_test.go @@ -347,7 +347,7 @@ func (s *visibilityQueueTaskExecutorSuite) createRecordWorkflowExecutionStartedR startEvent *historypb.HistoryEvent, task *tasks.StartExecutionVisibilityTask, mutableState workflow.MutableState, - backoffSeconds time.Duration, + backoff time.Duration, taskQueueName string, ) *manager.RecordWorkflowExecutionStartedRequest { execution := &commonpb.WorkflowExecution{ @@ -355,7 +355,7 @@ func (s *visibilityQueueTaskExecutorSuite) createRecordWorkflowExecutionStartedR RunId: task.RunID, } executionInfo := mutableState.GetExecutionInfo() - executionTimestamp := timestamp.TimeValue(startEvent.GetEventTime()).Add(backoffSeconds) + executionTimestamp := timestamp.TimeValue(startEvent.GetEventTime()).Add(backoff) return &manager.RecordWorkflowExecutionStartedRequest{ VisibilityRequestBase: &manager.VisibilityRequestBase{ diff --git a/service/history/workflow/history_builder.go b/service/history/workflow/history_builder.go index 373c3e93cb3..9b1779597d9 100644 --- a/service/history/workflow/history_builder.go +++ b/service/history/workflow/history_builder.go @@ -25,7 +25,6 @@ package workflow import ( - "fmt" "time" commandpb "go.temporal.io/api/command/v1" @@ -44,9 +43,9 @@ import ( ) const ( - HistoryBuilderStateMutable HistoryBuilderState = 0 - HistoryBuilderStateImmutable = 1 - HistoryBuilderStateSealed = 2 + HistoryBuilderStateMutable HistoryBuilderState = iota + HistoryBuilderStateImmutable + HistoryBuilderStateSealed ) // TODO should the reorderFunc functionality be ported? @@ -1172,13 +1171,13 @@ func (b *HistoryBuilder) assignTaskIDs( func (b *HistoryBuilder) assertMutable() { if b.state != HistoryBuilderStateMutable { - panic(fmt.Sprintf("history builder is mutated while not in mutable state")) + panic("history builder is mutated while not in mutable state") } } func (b *HistoryBuilder) assertNotSealed() { if b.state == HistoryBuilderStateSealed { - panic(fmt.Sprintf("history builder is in sealed state")) + panic("history builder is in sealed state") } } @@ -1189,7 +1188,7 @@ func (b *HistoryBuilder) createNewHistoryEvent( b.assertMutable() if b.workflowFinished { - panic(fmt.Sprintf("history builder unable to create new event after workflow finish")) + panic("history builder unable to create new event after workflow finish") } if b.finishEvent(eventType) { b.workflowFinished = true diff --git a/service/history/workflow/mutable_state_impl.go b/service/history/workflow/mutable_state_impl.go index 2ddb2bd0a82..1a5c41ce53a 100644 --- a/service/history/workflow/mutable_state_impl.go +++ b/service/history/workflow/mutable_state_impl.go @@ -645,6 +645,9 @@ func (e *MutableStateImpl) GetActivityInfoWithTimerHeartbeat( scheduledEventID int64, ) (*persistencespb.ActivityInfo, time.Time, bool) { ai, ok := e.pendingActivityInfoIDs[scheduledEventID] + if !ok { + return nil, time.Time{}, false + } timerVis, ok := e.pendingActivityTimerHeartbeats[scheduledEventID] return ai, timerVis, ok @@ -1209,32 +1212,6 @@ func (e *MutableStateImpl) DeleteUserTimer( return nil } -// nolint:unused -func (e *MutableStateImpl) getWorkflowTaskInfo() *WorkflowTaskInfo { - - taskQueue := &taskqueuepb.TaskQueue{} - if e.IsStickyTaskQueueEnabled() { - taskQueue.Name = e.executionInfo.StickyTaskQueue - taskQueue.Kind = enumspb.TASK_QUEUE_KIND_STICKY - } else { - taskQueue.Name = e.executionInfo.TaskQueue - taskQueue.Kind = enumspb.TASK_QUEUE_KIND_NORMAL - } - - return &WorkflowTaskInfo{ - Version: e.executionInfo.WorkflowTaskVersion, - ScheduledEventID: e.executionInfo.WorkflowTaskScheduledEventId, - StartedEventID: e.executionInfo.WorkflowTaskStartedEventId, - RequestID: e.executionInfo.WorkflowTaskRequestId, - WorkflowTaskTimeout: e.executionInfo.WorkflowTaskTimeout, - Attempt: e.executionInfo.WorkflowTaskAttempt, - StartedTime: e.executionInfo.WorkflowTaskStartedTime, - ScheduledTime: e.executionInfo.WorkflowTaskScheduledTime, - TaskQueue: taskQueue, - OriginalScheduledTime: e.executionInfo.WorkflowTaskOriginalScheduledTime, - } -} - // GetWorkflowTaskInfo returns details about the in-progress workflow task func (e *MutableStateImpl) GetWorkflowTaskInfo( scheduledEventID int64, @@ -1832,13 +1809,13 @@ func (e *MutableStateImpl) addBinaryCheckSumIfNotExists( // CheckResettable check if workflow can be reset func (e *MutableStateImpl) CheckResettable() error { if len(e.GetPendingChildExecutionInfos()) > 0 { - return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending child workflow.")) + return serviceerror.NewInvalidArgument("it is not allowed resetting to a point that workflow has pending child workflow.") } if len(e.GetPendingRequestCancelExternalInfos()) > 0 { - return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending request cancel.")) + return serviceerror.NewInvalidArgument("it is not allowed resetting to a point that workflow has pending request cancel.") } if len(e.GetPendingSignalExternalInfos()) > 0 { - return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending signals to send.")) + return serviceerror.NewInvalidArgument("it is not allowed resetting to a point that workflow has pending signals to send.") } return nil } @@ -4462,7 +4439,7 @@ func (e *MutableStateImpl) closeTransactionHandleWorkflowReset( e.GetExecutionInfo().AutoResetPoints, ); pt != nil { if err := e.taskGenerator.GenerateWorkflowResetTasks( - e.unixNanoToTime(now.UnixNano()), + now, ); err != nil { return err } @@ -4489,13 +4466,13 @@ func (e *MutableStateImpl) closeTransactionHandleActivityUserTimerTasks( } if err := e.taskGenerator.GenerateActivityTimerTasks( - e.unixNanoToTime(now.UnixNano()), + now, ); err != nil { return err } return e.taskGenerator.GenerateUserTimerTasks( - e.unixNanoToTime(now.UnixNano()), + now, ) } @@ -4566,13 +4543,6 @@ func (e *MutableStateImpl) createCallerError( return serviceerror.NewInvalidArgument(msg) } -func (_ *MutableStateImpl) unixNanoToTime( - timestampNanos int64, -) time.Time { - - return time.Unix(0, timestampNanos).UTC() -} - func (e *MutableStateImpl) logInfo(msg string, tags ...tag.Tag) { tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId)) tags = append(tags, tag.WorkflowRunID(e.executionState.RunId)) diff --git a/service/history/workflow/mutable_state_rebuilder.go b/service/history/workflow/mutable_state_rebuilder.go index db77c2f1346..a74b11a1db1 100644 --- a/service/history/workflow/mutable_state_rebuilder.go +++ b/service/history/workflow/mutable_state_rebuilder.go @@ -28,7 +28,6 @@ package workflow import ( "fmt" - "time" "github.com/pborman/uuid" commonpb "go.temporal.io/api/common/v1" @@ -658,10 +657,3 @@ func (b *MutableStateRebuilderImpl) ApplyEvents( return newRunMutableStateBuilder, nil } - -func (b *MutableStateRebuilderImpl) unixNanoToTime( - unixNano int64, -) time.Time { - - return time.Unix(0, unixNano).UTC() -} diff --git a/service/history/workflow/query_registry.go b/service/history/workflow/query_registry.go index fae6dad3e7b..d7d29a8ea25 100644 --- a/service/history/workflow/query_registry.go +++ b/service/history/workflow/query_registry.go @@ -225,7 +225,7 @@ func (r *queryRegistryImpl) getQueryNoLock(id string) (query, error) { } func (r *queryRegistryImpl) getIDs(m map[string]query) []string { - result := make([]string, len(m), len(m)) + result := make([]string, len(m)) index := 0 for id := range m { result[index] = id diff --git a/service/history/workflow/query_registry_test.go b/service/history/workflow/query_registry_test.go index e5696f86ccc..2c3e48156b6 100644 --- a/service/history/workflow/query_registry_test.go +++ b/service/history/workflow/query_registry_test.go @@ -51,8 +51,8 @@ func (s *QueryRegistrySuite) SetupTest() { func (s *QueryRegistrySuite) TestQueryRegistry() { qr := NewQueryRegistry() - ids := make([]string, 100, 100) - completionChs := make([]<-chan struct{}, 100, 100) + ids := make([]string, 100) + completionChs := make([]<-chan struct{}, 100) for i := 0; i < 100; i++ { ids[i], completionChs[i] = qr.BufferQuery(&querypb.WorkflowQuery{}) } diff --git a/service/history/workflow/retry.go b/service/history/workflow/retry.go index a13ad4605a5..843af8b6cc7 100644 --- a/service/history/workflow/retry.go +++ b/service/history/workflow/retry.go @@ -98,7 +98,7 @@ func getBackoffInterval( interval = *maxInterval } else if maxInterval == nil && interval <= 0 { return backoff.NoBackoff, enumspb.RETRY_STATE_TIMEOUT - } else { + // } else { // maxInterval != nil && (0 < interval && interval <= *maxInterval) // or // maxInterval == nil && interval > 0 diff --git a/service/history/workflowRebuilder.go b/service/history/workflowRebuilder.go index 1ec2bf0b9ea..9ec383dae7a 100644 --- a/service/history/workflowRebuilder.go +++ b/service/history/workflowRebuilder.go @@ -28,7 +28,6 @@ package history import ( "context" - "fmt" "math" "go.temporal.io/api/serviceerror" @@ -36,7 +35,6 @@ import ( persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/log" - "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/service/history/api" "go.temporal.io/server/service/history/shard" @@ -184,24 +182,3 @@ func (r *workflowRebuilderImpl) persistToDB( } return nil } - -func (r *workflowRebuilderImpl) getMutableState( - ctx context.Context, - workflowKey definition.WorkflowKey, -) (*persistencespb.WorkflowMutableState, int64, error) { - record, err := r.shard.GetWorkflowExecution(ctx, &persistence.GetWorkflowExecutionRequest{ - ShardID: r.shard.GetShardID(), - NamespaceID: workflowKey.NamespaceID, - WorkflowID: workflowKey.WorkflowID, - RunID: workflowKey.RunID, - }) - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - return nil, 0, err - } - // only check whether the execution is nil, do as much as we can - if record == nil { - return nil, 0, serviceerror.NewUnavailable(fmt.Sprintf("workflowRebuilder encountered error when loading execution record: %v", err)) - } - - return record.State, record.DBRecordVersion, nil -} diff --git a/service/history/workflowResetter.go b/service/history/workflowResetter.go index 50b5c53dbdc..cb0f5e19ce2 100644 --- a/service/history/workflowResetter.go +++ b/service/history/workflowResetter.go @@ -288,7 +288,7 @@ func (r *workflowResetterImpl) prepareResetWorkflow( } if len(resetMutableState.GetPendingChildExecutionInfos()) > 0 { - return nil, serviceerror.NewInvalidArgument(fmt.Sprintf("Can only reset workflow with pending child workflows")) + return nil, serviceerror.NewInvalidArgument("Can only reset workflow with pending child workflows") } if err := r.failWorkflowTask( diff --git a/service/history/workflowTaskHandlerCallbacks_test.go b/service/history/workflowTaskHandlerCallbacks_test.go index 8548c24497a..e65ab05b50a 100644 --- a/service/history/workflowTaskHandlerCallbacks_test.go +++ b/service/history/workflowTaskHandlerCallbacks_test.go @@ -312,7 +312,7 @@ func (s *WorkflowTaskHandlerCallbackSuite) constructQueryResults(ids []string, r for _, id := range ids { results[id] = &querypb.WorkflowQueryResult{ ResultType: enumspb.QUERY_RESULT_TYPE_ANSWERED, - Answer: payloads.EncodeBytes(make([]byte, resultSize, resultSize)), + Answer: payloads.EncodeBytes(make([]byte, resultSize)), } } return results diff --git a/service/matching/db_task_queue_ownership.go b/service/matching/db_task_queue_ownership.go index 02168eb2192..519a5ff612e 100644 --- a/service/matching/db_task_queue_ownership.go +++ b/service/matching/db_task_queue_ownership.go @@ -253,14 +253,14 @@ func (m *dbTaskQueueOwnershipImpl) generatedTaskIDsLocked( } } if m.ownershipState.maxTaskIDInclusive-m.ownershipState.lastAllocatedTaskID < int64(numTasks) { - panic(fmt.Sprintf("dbTaskQueueOwnershipImpl generatedTaskIDsLocked unable to allocate task IDs")) + panic("dbTaskQueueOwnershipImpl generatedTaskIDsLocked unable to allocate task IDs") } allocatedTaskIDs := make([]int64, numTasks) for i := 0; i < numTasks; i++ { m.ownershipState.lastAllocatedTaskID++ if m.ownershipState.lastAllocatedTaskID > m.ownershipState.maxTaskIDInclusive { - panic(fmt.Sprintf("dbTaskQueueOwnershipImpl generatedTaskIDsLocked encountered task ID overflow")) + panic("dbTaskQueueOwnershipImpl generatedTaskIDsLocked encountered task ID overflow") } allocatedTaskIDs[i] = m.ownershipState.lastAllocatedTaskID } @@ -305,9 +305,9 @@ func (m *dbTaskQueueOwnershipImpl) updateStateLocked( } } else { if rangeID < m.ownershipState.rangeID { - panic(fmt.Sprintf("dbTaskQueueOwnershipImpl updateStateLocked encountered smaller range ID")) + panic("dbTaskQueueOwnershipImpl updateStateLocked encountered smaller range ID") } else if ackedTaskID < m.ownershipState.ackedTaskID { - panic(fmt.Sprintf("dbTaskQueueOwnershipImpl updateStateLocked encountered acked task ID")) + panic("dbTaskQueueOwnershipImpl updateStateLocked encountered acked task ID") } m.ownershipState.rangeID = rangeID m.ownershipState.ackedTaskID = ackedTaskID diff --git a/service/matching/handler.go b/service/matching/handler.go index e7a7597bb67..f71a66a8b8f 100644 --- a/service/matching/handler.go +++ b/service/matching/handler.go @@ -48,14 +48,13 @@ import ( type ( // Handler - gRPC handler interface for matchingservice Handler struct { - engine Engine - config *Config - metricsClient metrics.Client - logger log.Logger - startWG sync.WaitGroup - throttledLogger log.Logger - matchingServiceResolver membership.ServiceResolver - namespaceRegistry namespace.Registry + engine Engine + config *Config + metricsClient metrics.Client + logger log.Logger + startWG sync.WaitGroup + throttledLogger log.Logger + namespaceRegistry namespace.Registry } ) diff --git a/service/matching/matcher_test.go b/service/matching/matcher_test.go index f08a74c1b5c..055c8b46356 100644 --- a/service/matching/matcher_test.go +++ b/service/matching/matcher_test.go @@ -475,16 +475,6 @@ func (t *MatcherTestSuite) TestRemotePollForQuery() { t.True(task.isStarted()) } -func (t *MatcherTestSuite) newNamespaceCache() namespace.Registry { - entry := namespace.NewLocalNamespaceForTest( - &persistencespb.NamespaceInfo{Name: "test-namespace"}, - &persistencespb.NamespaceConfig{}, - "") - dc := namespace.NewMockRegistry(t.controller) - dc.EXPECT().GetNamespaceByID(gomock.Any()).Return(entry, nil).AnyTimes() - return dc -} - func randomTaskInfo() *persistencespb.AllocatedTaskInfo { rt1 := time.Date(rand.Intn(9999), time.Month(rand.Intn(12)+1), rand.Intn(28)+1, rand.Intn(24)+1, rand.Intn(60), rand.Intn(60), rand.Intn(1e9), time.UTC) rt2 := time.Date(rand.Intn(5000)+3000, time.Month(rand.Intn(12)+1), rand.Intn(28)+1, rand.Intn(24)+1, rand.Intn(60), rand.Intn(60), rand.Intn(1e9), time.UTC) diff --git a/service/matching/matchingEngine.go b/service/matching/matchingEngine.go index ec45bdd15a6..62228ebb9fe 100644 --- a/service/matching/matchingEngine.go +++ b/service/matching/matchingEngine.go @@ -111,8 +111,8 @@ var ( historyServiceOperationRetryPolicy = common.CreateHistoryServiceRetryPolicy() // ErrNoTasks is exported temporarily for integration test - ErrNoTasks = errors.New("No tasks") - errPumpClosed = errors.New("Task queue pump closed its channel") + ErrNoTasks = errors.New("no tasks") + errPumpClosed = errors.New("task queue pump closed its channel") pollerIDKey pollerIDCtxKey = "pollerID" identityKey identityCtxKey = "identity" @@ -700,6 +700,9 @@ func (e *matchingEngineImpl) UpdateWorkerBuildIdOrdering( namespaceID := namespace.ID(req.GetNamespaceId()) taskQueueName := req.GetRequest().GetTaskQueue() taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } tqMgr, err := e.getTaskQueueManager(hCtx, taskQueue, enumspb.TASK_QUEUE_KIND_NORMAL, true) if err != nil { return nil, err @@ -720,6 +723,9 @@ func (e *matchingEngineImpl) GetWorkerBuildIdOrdering( namespaceID := namespace.ID(req.GetNamespaceId()) taskQueueName := req.GetRequest().GetTaskQueue() taskQueue, err := newTaskQueueID(namespaceID, taskQueueName, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } tqMgr, err := e.getTaskQueueManager(hCtx, taskQueue, enumspb.TASK_QUEUE_KIND_NORMAL, true) if err != nil { if _, ok := err.(*serviceerror.NotFound); ok { @@ -758,6 +764,9 @@ func (e *matchingEngineImpl) getAllPartitions( return partitionKeys, err } taskQueueID, err := newTaskQueueID(namespaceID, taskQueue.GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return partitionKeys, err + } rootPartition := taskQueueID.GetRoot() partitionKeys = append(partitionKeys, rootPartition) diff --git a/service/matching/matchingEngine_test.go b/service/matching/matchingEngine_test.go index a0efe89da96..b8b05e8e36b 100644 --- a/service/matching/matchingEngine_test.go +++ b/service/matching/matchingEngine_test.go @@ -1927,7 +1927,7 @@ func (s *matchingEngineSuite) TestGetVersioningData() { s.NoError(err) s.NotNil(res.GetResponse().GetCurrentDefault()) lastNode = res.GetResponse().GetCurrentDefault() - for true { + for { if lastNode.GetPreviousIncompatible() == nil { break } @@ -1935,7 +1935,7 @@ func (s *matchingEngineSuite) TestGetVersioningData() { } s.Equal(mkVerId("95"), lastNode.GetVersion()) lastNode = res.GetResponse().GetCompatibleLeaves()[0] - for true { + for { if lastNode.GetPreviousCompatible() == nil { break } @@ -2031,7 +2031,6 @@ func (m *testTaskManager) GetName() string { } func (m *testTaskManager) Close() { - return } func (m *testTaskManager) getTaskQueueManager(id *taskQueueID) *testTaskQueueManager { diff --git a/service/matching/taskQueueManager_test.go b/service/matching/taskQueueManager_test.go index 9d9ab39aaf6..192c9b6f35c 100644 --- a/service/matching/taskQueueManager_test.go +++ b/service/matching/taskQueueManager_test.go @@ -217,6 +217,7 @@ func TestForeignPartitionOwnerCausesUnload(t *testing.T) { taskInfo: &persistencespb.TaskInfo{}, source: enumsspb.TASK_SOURCE_HISTORY, }) + require.NoError(t, err) require.False(t, sync) } diff --git a/service/matching/version_graph_test.go b/service/matching/version_graph_test.go index a9d5d1b93cf..439144b848b 100644 --- a/service/matching/version_graph_test.go +++ b/service/matching/version_graph_test.go @@ -32,6 +32,7 @@ import ( "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" "go.temporal.io/api/workflowservice/v1" + persistencepb "go.temporal.io/server/api/persistence/v1" ) @@ -363,7 +364,7 @@ func TestLimitsMaxSize(t *testing.T) { } lastNode := data.GetCurrentDefault() - for true { + for { if lastNode.GetPreviousIncompatible() == nil { break } @@ -372,7 +373,7 @@ func TestLimitsMaxSize(t *testing.T) { assert.Equal(t, mkVerId("24"), lastNode.GetVersion()) assert.Equal(t, 1, len(data.GetCompatibleLeaves())) lastNode = data.GetCompatibleLeaves()[0] - for true { + for { if lastNode.GetPreviousCompatible() == nil { break } diff --git a/service/worker/archiver/handler_test.go b/service/worker/archiver/handler_test.go index eba835dd0af..233850a68cd 100644 --- a/service/worker/archiver/handler_test.go +++ b/service/worker/archiver/handler_test.go @@ -233,7 +233,7 @@ func startAndFinishArchiverWorkflow(ctx workflow.Context, concurrency int, numRe requestCh := workflow.NewBufferedChannel(ctx, numRequests) handler := NewHandler(ctx, handlerTestLogger, handlerTestMetrics, concurrency, requestCh) handler.Start() - sentHashes := make([]uint64, numRequests, numRequests) + sentHashes := make([]uint64, numRequests) workflow.Go(ctx, func(ctx workflow.Context) { for i := 0; i < numRequests; i++ { ar, hash := randomArchiveRequest() diff --git a/service/worker/archiver/pump_test.go b/service/worker/archiver/pump_test.go index f556e4c8719..c518fdbaefc 100644 --- a/service/worker/archiver/pump_test.go +++ b/service/worker/archiver/pump_test.go @@ -244,8 +244,8 @@ func signalAndCarryoverPumpWorkflow(ctx workflow.Context, requestLimit int, carr } func sendRequestsToChannel(ctx workflow.Context, ch workflow.Channel, numRequests int) ([]ArchiveRequest, []uint64) { - requests := make([]ArchiveRequest, numRequests, numRequests) - hashes := make([]uint64, numRequests, numRequests) + requests := make([]ArchiveRequest, numRequests) + hashes := make([]uint64, numRequests) workflow.Go(ctx, func(ctx workflow.Context) { for i := 0; i < numRequests; i++ { requests[i], hashes[i] = randomArchiveRequest() @@ -256,8 +256,8 @@ func sendRequestsToChannel(ctx workflow.Context, ch workflow.Channel, numRequest } func sendRequestsToChannelBlocking(ctx workflow.Context, ch workflow.Channel, numRequests int) ([]ArchiveRequest, []uint64) { - requests := make([]ArchiveRequest, numRequests, numRequests) - hashes := make([]uint64, numRequests, numRequests) + requests := make([]ArchiveRequest, numRequests) + hashes := make([]uint64, numRequests) for i := 0; i < numRequests; i++ { requests[i], hashes[i] = randomArchiveRequest() ch.Send(ctx, requests[i]) @@ -275,15 +275,12 @@ func channelContainsExpected(ctx workflow.Context, ch workflow.Channel, expected return false } } - if ch.Receive(ctx, nil) { - return false - } - return true + return !ch.Receive(ctx, nil) } func randomCarryover(count int) ([]ArchiveRequest, []uint64) { - carryover := make([]ArchiveRequest, count, count) - hashes := make([]uint64, count, count) + carryover := make([]ArchiveRequest, count) + hashes := make([]uint64, count) for i := 0; i < count; i++ { carryover[i], hashes[i] = randomArchiveRequest() } diff --git a/service/worker/archiver/util.go b/service/worker/archiver/util.go index b099f5d13f2..83908bf4d9b 100644 --- a/service/worker/archiver/util.go +++ b/service/worker/archiver/util.go @@ -45,7 +45,7 @@ func hash(i interface{}) uint64 { var b bytes.Buffer // please make sure encoder is deterministic (especially when encoding map objects) // use json not gob here as json will sort map keys, while gob is non-deterministic - json.NewEncoder(&b).Encode(i) //nolint:errcheck + _ = json.NewEncoder(&b).Encode(i) return farm.Fingerprint64(b.Bytes()) } diff --git a/service/worker/batcher/workflow.go b/service/worker/batcher/workflow.go index f2105907c3e..a026be81f43 100644 --- a/service/worker/batcher/workflow.go +++ b/service/worker/batcher/workflow.go @@ -45,7 +45,6 @@ import ( ) const ( - batcherContextKey = "batcherContext" // BatcherTaskQueueName is the taskqueue name BatcherTaskQueueName = "temporal-sys-batcher-taskqueue" // BatchWFTypeName is the workflow type @@ -74,9 +73,6 @@ const ( BatchTypeSignal = "signal" ) -// AllBatchTypes is the batch types we supported -var AllBatchTypes = []string{BatchTypeTerminate, BatchTypeCancel, BatchTypeSignal} - type ( // TerminateParams is the parameters for terminating workflow TerminateParams struct { @@ -151,9 +147,13 @@ type ( // passing along the current heartbeat details to make heartbeat within a task so that it won't timeout hbd HeartBeatDetails } + + batcherContextKeyType struct{} ) var ( + batcherContextKey = batcherContextKeyType{} + batchActivityRetryPolicy = temporal.RetryPolicy{ InitialInterval: 10 * time.Second, BackoffCoefficient: 1.7, diff --git a/service/worker/migration/activities.go b/service/worker/migration/activities.go index 1c254ecb04f..42634027fb3 100644 --- a/service/worker/migration/activities.go +++ b/service/worker/migration/activities.go @@ -36,6 +36,7 @@ import ( "go.temporal.io/api/serviceerror" "go.temporal.io/api/workflowservice/v1" "go.temporal.io/sdk/activity" + "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/backoff" @@ -255,11 +256,9 @@ func (a *activities) generateWorkflowReplicationTask(ctx context.Context, wKey d } err := backoff.ThrottleRetryContext(ctx, op, historyServiceRetryPolicy, common.IsServiceTransientError) - if err != nil { - if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { - // ignore NotFound error - return nil - } + if _, isNotFound := err.(*serviceerror.NotFound); isNotFound { + // ignore NotFound error + return nil } return err diff --git a/service/worker/parentclosepolicy/workflow.go b/service/worker/parentclosepolicy/workflow.go index 4eca435b680..03194c986d0 100644 --- a/service/worker/parentclosepolicy/workflow.go +++ b/service/worker/parentclosepolicy/workflow.go @@ -49,7 +49,6 @@ import ( ) const ( - processorContextKey = "processorContext" // processorTaskQueueName is the taskqueue name processorTaskQueueName = "temporal-sys-processor-parent-close-policy" // processorWFTypeName is the workflow type @@ -77,9 +76,13 @@ type ( ParentExecution commonpb.WorkflowExecution Executions []RequestDetail } + + processorContextKeyType struct{} ) var ( + processorContextKey = processorContextKeyType{} + retryPolicy = temporal.RetryPolicy{ InitialInterval: 10 * time.Second, BackoffCoefficient: 1.7, diff --git a/service/worker/scanner/history/scavenger_test.go b/service/worker/scanner/history/scavenger_test.go index 3c0568896ca..9379aef38b3 100644 --- a/service/worker/scanner/history/scavenger_test.go +++ b/service/worker/scanner/history/scavenger_test.go @@ -40,7 +40,6 @@ import ( "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/persistence" - p "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives" "go.temporal.io/server/common/primitives/timestamp" ) @@ -94,42 +93,42 @@ func (s *ScavengerTestSuite) createTestScavenger( func (s *ScavengerTestSuite) TestAllSkipTasksTwoPages() { db, _, scvgr, controller := s.createTestScavenger(100) defer controller.Finish() - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, - }).Return(&p.GetAllHistoryTreeBranchesResponse{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ NextPageToken: []byte("page1"), - Branches: []p.HistoryBranchDetail{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID1", BranchID: "branchID1", ForkTime: timestamp.TimeNowPtrUtc(), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), }, { TreeID: "treeID2", BranchID: "branchID2", ForkTime: timestamp.TimeNowPtrUtc(), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), }, }, }, nil) - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, NextPageToken: []byte("page1"), - }).Return(&p.GetAllHistoryTreeBranchesResponse{ - Branches: []p.HistoryBranchDetail{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID3", BranchID: "branchID3", ForkTime: timestamp.TimeNowPtrUtc(), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), }, { TreeID: "treeID4", BranchID: "branchID4", ForkTime: timestamp.TimeNowPtrUtc(), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), }, }, }, nil) @@ -146,11 +145,11 @@ func (s *ScavengerTestSuite) TestAllSkipTasksTwoPages() { func (s *ScavengerTestSuite) TestAllErrorSplittingTasksTwoPages() { db, _, scvgr, controller := s.createTestScavenger(100) defer controller.Finish() - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, - }).Return(&p.GetAllHistoryTreeBranchesResponse{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ NextPageToken: []byte("page1"), - Branches: []p.HistoryBranchDetail{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID1", BranchID: "branchID1", @@ -166,11 +165,11 @@ func (s *ScavengerTestSuite) TestAllErrorSplittingTasksTwoPages() { }, }, nil) - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, NextPageToken: []byte("page1"), - }).Return(&p.GetAllHistoryTreeBranchesResponse{ - Branches: []p.HistoryBranchDetail{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID3", BranchID: "branchID3", @@ -198,42 +197,42 @@ func (s *ScavengerTestSuite) TestAllErrorSplittingTasksTwoPages() { func (s *ScavengerTestSuite) TestNoGarbageTwoPages() { db, client, scvgr, controller := s.createTestScavenger(100) defer controller.Finish() - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, - }).Return(&p.GetAllHistoryTreeBranchesResponse{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ NextPageToken: []byte("page1"), - Branches: []p.HistoryBranchDetail{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID1", BranchID: "branchID1", ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), }, { TreeID: "treeID2", BranchID: "branchID2", ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), }, }, }, nil) - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, NextPageToken: []byte("page1"), - }).Return(&p.GetAllHistoryTreeBranchesResponse{ - Branches: []p.HistoryBranchDetail{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: "treeID3", BranchID: "branchID3", ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), }, { TreeID: "treeID4", BranchID: "branchID4", ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), }, }, }, nil) @@ -279,41 +278,41 @@ func (s *ScavengerTestSuite) TestNoGarbageTwoPages() { func (s *ScavengerTestSuite) TestDeletingBranchesTwoPages() { db, client, scvgr, controller := s.createTestScavenger(100) defer controller.Finish() - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, - }).Return(&p.GetAllHistoryTreeBranchesResponse{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ NextPageToken: []byte("page1"), - Branches: []p.HistoryBranchDetail{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: treeID1, BranchID: branchID1, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), }, { TreeID: treeID2, BranchID: branchID2, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID2", "workflowID2", "runID2"), }, }, }, nil) - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, NextPageToken: []byte("page1"), - }).Return(&p.GetAllHistoryTreeBranchesResponse{ - Branches: []p.HistoryBranchDetail{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ + Branches: []persistence.HistoryBranchDetail{ { TreeID: treeID3, BranchID: branchID3, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), }, { TreeID: treeID4, BranchID: branchID4, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), }, }, }, nil) @@ -347,27 +346,27 @@ func (s *ScavengerTestSuite) TestDeletingBranchesTwoPages() { }, }).Return(nil, serviceerror.NewNotFound("")) - branchToken1, err := p.NewHistoryBranchTokenByBranchID(treeID1, branchID1) + branchToken1, err := persistence.NewHistoryBranchTokenByBranchID(treeID1, branchID1) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken1, ShardID: common.WorkflowIDToHistoryShard("namespaceID1", "workflowID1", s.numShards), }).Return(nil) - branchToken2, err := p.NewHistoryBranchTokenByBranchID(treeID2, branchID2) + branchToken2, err := persistence.NewHistoryBranchTokenByBranchID(treeID2, branchID2) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken2, ShardID: common.WorkflowIDToHistoryShard("namespaceID2", "workflowID2", s.numShards), }).Return(nil) - branchToken3, err := p.NewHistoryBranchTokenByBranchID(treeID3, branchID3) + branchToken3, err := persistence.NewHistoryBranchTokenByBranchID(treeID3, branchID3) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken3, ShardID: common.WorkflowIDToHistoryShard("namespaceID3", "workflowID3", s.numShards), }).Return(nil) - branchToken4, err := p.NewHistoryBranchTokenByBranchID(treeID4, branchID4) + branchToken4, err := persistence.NewHistoryBranchTokenByBranchID(treeID4, branchID4) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken4, ShardID: common.WorkflowIDToHistoryShard("namespaceID4", "workflowID4", s.numShards), }).Return(nil) @@ -384,17 +383,17 @@ func (s *ScavengerTestSuite) TestDeletingBranchesTwoPages() { func (s *ScavengerTestSuite) TestMixesTwoPages() { db, client, scvgr, controller := s.createTestScavenger(100) defer controller.Finish() - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, - }).Return(&p.GetAllHistoryTreeBranchesResponse{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ NextPageToken: []byte("page1"), - Branches: []p.HistoryBranchDetail{ + Branches: []persistence.HistoryBranchDetail{ { // skip TreeID: treeID1, BranchID: branchID1, ForkTime: timestamp.TimeNowPtrUtc(), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID1", "workflowID1", "runID1"), }, { // split error @@ -405,31 +404,31 @@ func (s *ScavengerTestSuite) TestMixesTwoPages() { }, }, }, nil) - db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &p.GetAllHistoryTreeBranchesRequest{ + db.EXPECT().GetAllHistoryTreeBranches(gomock.Any(), &persistence.GetAllHistoryTreeBranchesRequest{ PageSize: pageSize, NextPageToken: []byte("page1"), - }).Return(&p.GetAllHistoryTreeBranchesResponse{ - Branches: []p.HistoryBranchDetail{ + }).Return(&persistence.GetAllHistoryTreeBranchesResponse{ + Branches: []persistence.HistoryBranchDetail{ { // delete succ TreeID: treeID3, BranchID: branchID3, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID3", "workflowID3", "runID3"), }, { // delete fail TreeID: treeID4, BranchID: branchID4, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID4", "workflowID4", "runID4"), }, { // not delete TreeID: treeID5, BranchID: branchID5, ForkTime: timestamp.TimeNowPtrUtcAddDuration(-cleanUpThreshold * 2), - Info: p.BuildHistoryGarbageCleanupInfo("namespaceID5", "workflowID5", "runID5"), + Info: persistence.BuildHistoryGarbageCleanupInfo("namespaceID5", "workflowID5", "runID5"), }, }, }, nil) @@ -457,16 +456,16 @@ func (s *ScavengerTestSuite) TestMixesTwoPages() { }, }).Return(nil, nil) - branchToken3, err := p.NewHistoryBranchTokenByBranchID(treeID3, branchID3) + branchToken3, err := persistence.NewHistoryBranchTokenByBranchID(treeID3, branchID3) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken3, ShardID: common.WorkflowIDToHistoryShard("namespaceID3", "workflowID3", s.numShards), }).Return(nil) - branchToken4, err := p.NewHistoryBranchTokenByBranchID(treeID4, branchID4) + branchToken4, err := persistence.NewHistoryBranchTokenByBranchID(treeID4, branchID4) s.Nil(err) - db.EXPECT().DeleteHistoryBranch(gomock.Any(), &p.DeleteHistoryBranchRequest{ + db.EXPECT().DeleteHistoryBranch(gomock.Any(), &persistence.DeleteHistoryBranchRequest{ BranchToken: branchToken4, ShardID: common.WorkflowIDToHistoryShard("namespaceID4", "workflowID4", s.numShards), }).Return(fmt.Errorf("failed to delete history")) diff --git a/service/worker/scanner/workflow.go b/service/worker/scanner/workflow.go index c351d625e61..dee734d043b 100644 --- a/service/worker/scanner/workflow.go +++ b/service/worker/scanner/workflow.go @@ -40,19 +40,7 @@ import ( "go.temporal.io/server/service/worker/scanner/taskqueue" ) -type ( - contextKey int - - scannerCtxExecMgrFactory struct { - ctx scannerContext - } -) - -func (s scannerCtxExecMgrFactory) Close() {} - const ( - scannerContextKey = contextKey(0) - infiniteDuration = 20 * 365 * 24 * time.Hour tqScannerWFID = "temporal-sys-tq-scanner" @@ -71,7 +59,12 @@ const ( executionsScavengerActivityName = "temporal-sys-executions-scanner-scvg-activity" ) +type ( + scannerContextKeyType struct{} +) + var ( + scannerContextKey = scannerContextKeyType{} tlScavengerHBInterval = 10 * time.Second executionsScavengerHBInterval = 10 * time.Second diff --git a/service/worker/scheduler/calendar_test.go b/service/worker/scheduler/calendar_test.go index dacde718583..5476460b3f5 100644 --- a/service/worker/scheduler/calendar_test.go +++ b/service/worker/scheduler/calendar_test.go @@ -269,10 +269,10 @@ func (s *calendarSuite) TestParseValue() { s.NoError(err) s.Equal(29, i) - i, err = parseValue("29", 1, 12, parseModeInt) + _, err = parseValue("29", 1, 12, parseModeInt) s.Error(err) - i, err = parseValue("random text", 1, 31, parseModeInt) + _, err = parseValue("random text", 1, 31, parseModeInt) s.Error(err) i, err = parseValue("fri", 0, 7, parseModeDow) diff --git a/tests/integration/cassandra_test.go b/tests/integration/cassandra_test.go index a664dc381e5..b9e91f52106 100644 --- a/tests/integration/cassandra_test.go +++ b/tests/integration/cassandra_test.go @@ -44,8 +44,6 @@ import ( // TODO merge the initialization with existing persistence setup const ( - testCassandraClusterName = "temporal_cassandra_cluster" - testCassandraUser = "temporal" testCassandraPassword = "temporal" testCassandraDatabaseNamePrefix = "test_" diff --git a/tests/integration/mysql_test.go b/tests/integration/mysql_test.go index e4453e426fd..6723413ba23 100644 --- a/tests/integration/mysql_test.go +++ b/tests/integration/mysql_test.go @@ -46,8 +46,6 @@ import ( // TODO merge the initialization with existing persistence setup const ( - testMySQLClusterName = "temporal_mysql_cluster" - testMySQLUser = "temporal" testMySQLPassword = "temporal" testMySQLConnectionProtocol = "tcp" diff --git a/tests/integration/postgresql_test.go b/tests/integration/postgresql_test.go index 26999693c31..0320732570d 100644 --- a/tests/integration/postgresql_test.go +++ b/tests/integration/postgresql_test.go @@ -46,8 +46,6 @@ import ( // TODO merge the initialization with existing persistence setup const ( - testPostgreSQLClusterName = "temporal_postgresql_cluster" - testPostgreSQLUser = "temporal" testPostgreSQLPassword = "temporal" testPostgreSQLConnectionProtocol = "tcp" diff --git a/tests/testhelper/certificate.go b/tests/testhelper/certificate.go index 0008a170d56..9e132405365 100644 --- a/tests/testhelper/certificate.go +++ b/tests/testhelper/certificate.go @@ -37,11 +37,6 @@ import ( "time" ) -// GenerateSelfSignedUseEverywhereX509 generates a TLS serverCert that is self-signed -func generateSelfSignedUseEverywhereX509(commonName string, keyLengthBits int) (*tls.Certificate, error) { - return generateSelfSignedX509CA(commonName, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, keyLengthBits) -} - // GenerateSelfSignedX509CA generates a TLS serverCert that is self-signed func generateSelfSignedX509CA(commonName string, extUsage []x509.ExtKeyUsage, keyLengthBits int) (*tls.Certificate, error) { now := time.Now().UTC() @@ -149,8 +144,3 @@ func generateServerX509UsingCAAndSerialNumber(commonName string, serialNumber in return &tlsCert, privateKey, err } - -// GenerateServerX509UsingCA generates a TLS serverCert that is self-signed -func generateServerX509UsingCA(commonName string, ca *tls.Certificate) (*tls.Certificate, *rsa.PrivateKey, error) { - return generateServerX509UsingCAAndSerialNumber(commonName, 0, ca) -} diff --git a/tests/testhelper/tls.go b/tests/testhelper/tls.go index 4742aed8c27..670fb0b89aa 100644 --- a/tests/testhelper/tls.go +++ b/tests/testhelper/tls.go @@ -89,7 +89,13 @@ func GenerateTestChainWithSN(tempDir string, commonName string, serialNumber int func GenerateTestCerts(tempDir string, commonName string, num int) ([]*tls.Certificate, *x509.CertPool, *x509.CertPool, error) { caCert, err := GenerateSelfSignedCA(CAFilePath(tempDir)) + if err != nil { + return nil, nil, nil, err + } caPool, err := GenerateSelfSignedCAPool(caCert) + if err != nil { + return nil, nil, nil, err + } chains := make([]*tls.Certificate, num) for i := 0; i < num; i++ { @@ -109,7 +115,7 @@ func GenerateTestCerts(tempDir string, commonName string, num int) ([]*tls.Certi wrongCAPool, err := GenerateSelfSignedCAPool(wrongCACert) - return chains, caPool, wrongCAPool, nil + return chains, caPool, wrongCAPool, err } func GenerateSelfSignedCAPool(caCert *tls.Certificate) (*x509.CertPool, error) { diff --git a/tools/cassandra/cqlclient.go b/tools/cassandra/cqlclient.go index b1333ec5fef..c2ea07959ac 100644 --- a/tools/cassandra/cqlclient.go +++ b/tools/cassandra/cqlclient.go @@ -64,8 +64,7 @@ type ( } ) -var errNoHosts = errors.New("Cassandra Hosts list is empty or malformed") -var errGetSchemaVersion = errors.New("Failed to get current schema version from cassandra") +var errGetSchemaVersion = errors.New("unable to get current schema version from cassandra") const ( defaultTimeout = 30 // Timeout in seconds diff --git a/tools/cassandra/version_test.go b/tools/cassandra/version_test.go index aa399cf42be..d88c12b9f60 100644 --- a/tools/cassandra/version_test.go +++ b/tools/cassandra/version_test.go @@ -25,7 +25,6 @@ package cassandra import ( - "os" "path" "runtime" "testing" @@ -117,10 +116,3 @@ func (s *VersionTestSuite) createKeyspace(keyspace string) func() { client.Close() } } - -func (s *VersionTestSuite) createSchemaForVersion(subdir string, v string) { - vDir := subdir + "/v" + v - s.NoError(os.Mkdir(vDir, os.FileMode(0744))) - cqlFile := vDir + "/tmp.cql" - s.NoError(os.WriteFile(cqlFile, []byte{}, os.FileMode(0644))) -} diff --git a/tools/sql/clitest/updatetaskTest.go b/tools/sql/clitest/updatetaskTest.go index 508569442e9..8e5de3714e0 100644 --- a/tools/sql/clitest/updatetaskTest.go +++ b/tools/sql/clitest/updatetaskTest.go @@ -103,6 +103,7 @@ func (s *UpdateSchemaTestSuite) TestDryrun() { // TestVisibilityDryrun test func (s *UpdateSchemaTestSuite) TestVisibilityDryrun() { conn, err := newTestConn(s.DBName, s.host, s.port, s.pluginName) + s.NoError(err) defer conn.Close() dir, err := filepath.Abs(s.visibilitySchemaVersionDir) s.NoError(err) diff --git a/tools/sql/clitest/versionTest.go b/tools/sql/clitest/versionTest.go index 21610976f03..b3d5ccd6f90 100644 --- a/tools/sql/clitest/versionTest.go +++ b/tools/sql/clitest/versionTest.go @@ -26,7 +26,6 @@ package clitest import ( "fmt" - "os" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -146,9 +145,3 @@ func (s *VersionTestSuite) createDatabase(database string) func() { connection.Close() } } -func (s *VersionTestSuite) createSchemaForVersion(subdir string, v string) { - vDir := subdir + "/v" + v - s.NoError(os.Mkdir(vDir, os.FileMode(0744))) - cqlFile := vDir + "/tmp.sql" - s.NoError(os.WriteFile(cqlFile, []byte{}, os.FileMode(0644))) -}