Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
MioOgbeni authored Oct 18, 2021
2 parents b19740f + fa476b5 commit d847f0f
Show file tree
Hide file tree
Showing 14 changed files with 259 additions and 97 deletions.
8 changes: 4 additions & 4 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,20 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re

## Unreleased

### Fixed

- [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races

### Added

- [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response.
- [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus.
- [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans.
- [#4736](https://github.com/thanos-io/thanos/pull/4736) S3: Add capability to use custom AWS STS Endpoint
- [#4764](https://github.com/thanos-io/thanos/pull/4764) Compactor: add `block-viewer.global.sync-block-timeout` flag to set the timeout of synchronization block metas.

### Fixed

- [#4508](https://github.com/thanos-io/thanos/pull/4508) Adjust and rename `ThanosSidecarUnhealthy` to `ThanosSidecarNoConnectionToStartedPrometheus`; Remove `ThanosSidecarPrometheusDown` alert; Remove unused `thanos_sidecar_last_heartbeat_success_time_seconds` metrics.
- [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races.
- [#4754](https://github.com/thanos-io/thanos/pull/4754) Query: Fix possible panic on stores endpoint.
- [#4753](https://github.com/thanos-io/thanos/pull/4753) Store: validate block sync concurrency parameter

## [v0.23.1](https://github.com/thanos-io/thanos/tree/release-0.23) - 2021.10.1

Expand Down
7 changes: 5 additions & 2 deletions cmd/thanos/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -537,14 +537,14 @@ func runCompact(
}

g.Add(func() error {
iterCtx, iterCancel := context.WithTimeout(ctx, conf.waitInterval)
iterCtx, iterCancel := context.WithTimeout(ctx, conf.blockViewerSyncBlockTimeout)
_, _, _ = f.Fetch(iterCtx)
iterCancel()

// For /global state make sure to fetch periodically.
return runutil.Repeat(conf.blockViewerSyncBlockInterval, ctx.Done(), func() error {
return runutil.RetryWithLog(logger, time.Minute, ctx.Done(), func() error {
iterCtx, iterCancel := context.WithTimeout(ctx, conf.waitInterval)
iterCtx, iterCancel := context.WithTimeout(ctx, conf.blockViewerSyncBlockTimeout)
defer iterCancel()

_, _, err := f.Fetch(iterCtx)
Expand Down Expand Up @@ -576,6 +576,7 @@ type compactConfig struct {
blockSyncConcurrency int
blockMetaFetchConcurrency int
blockViewerSyncBlockInterval time.Duration
blockViewerSyncBlockTimeout time.Duration
cleanupBlocksInterval time.Duration
compactionConcurrency int
downsampleConcurrency int
Expand Down Expand Up @@ -634,6 +635,8 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) {
Default("32").IntVar(&cc.blockMetaFetchConcurrency)
cmd.Flag("block-viewer.global.sync-block-interval", "Repeat interval for syncing the blocks between local and remote view for /global Block Viewer UI.").
Default("1m").DurationVar(&cc.blockViewerSyncBlockInterval)
cmd.Flag("block-viewer.global.sync-block-timeout", "Maximum time for syncing the blocks between local and remote view for /global Block Viewer UI.").
Default("5m").DurationVar(&cc.blockViewerSyncBlockTimeout)
cmd.Flag("compact.cleanup-interval", "How often we should clean up partially uploaded blocks and blocks with deletion mark in the background when --wait has been enabled. Setting it to \"0s\" disables it - the cleaning will only happen at the end of an iteration.").
Default("5m").DurationVar(&cc.cleanupBlocksInterval)

Expand Down
2 changes: 1 addition & 1 deletion cmd/thanos/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,7 @@ func runQuery(

api := v1.NewQueryAPI(
logger,
endpoints,
endpoints.GetEndpointStatus,
engineFactory(promql.NewEngine, engineOpts, dynamicLookbackDelta),
queryableCreator,
// NOTE: Will share the same replica label as the query for now.
Expand Down
2 changes: 1 addition & 1 deletion cmd/thanos/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) {
cmd.Flag("sync-block-duration", "Repeat interval for syncing the blocks between local and remote view.").
Default("3m").DurationVar(&sc.syncInterval)

cmd.Flag("block-sync-concurrency", "Number of goroutines to use when constructing index-cache.json blocks from object storage.").
cmd.Flag("block-sync-concurrency", "Number of goroutines to use when constructing index-cache.json blocks from object storage. Must be equal or greater than 1.").
Default("20").IntVar(&sc.blockSyncConcurrency)

cmd.Flag("block-meta-fetch-concurrency", "Number of goroutines to use when fetching block metadata from object storage.").
Expand Down
4 changes: 4 additions & 0 deletions docs/components/compact.md
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,10 @@ Flags:
Repeat interval for syncing the blocks between
local and remote view for /global Block Viewer
UI.
--block-viewer.global.sync-block-timeout=5m
Maximum time for syncing the blocks between
local and remote view for /global Block Viewer
UI.
--bucket-web-label=BUCKET-WEB-LABEL
Prometheus label to use as timeline title in the
bucket web UI
Expand Down
1 change: 1 addition & 0 deletions docs/components/store.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Flags:
--block-sync-concurrency=20
Number of goroutines to use when constructing
index-cache.json blocks from object storage.
Must be equal or greater than 1.
--chunk-pool-size=2GB Maximum size of concurrently allocatable bytes
reserved strictly to reuse for chunks in
memory.
Expand Down
14 changes: 9 additions & 5 deletions pkg/api/query/v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ type QueryAPI struct {
enableExemplarPartialResponse bool
disableCORS bool

replicaLabels []string
endpointSet *query.EndpointSet
replicaLabels []string
endpointStatus func() []query.EndpointStatus

defaultRangeQueryStep time.Duration
defaultInstantQueryMaxSourceResolution time.Duration
Expand All @@ -106,7 +106,7 @@ type QueryAPI struct {
// NewQueryAPI returns an initialized QueryAPI type.
func NewQueryAPI(
logger log.Logger,
endpointSet *query.EndpointSet,
endpointStatus func() []query.EndpointStatus,
qe func(int64) *promql.Engine,
c query.QueryableCreator,
ruleGroups rules.UnaryClient,
Expand Down Expand Up @@ -146,7 +146,7 @@ func NewQueryAPI(
enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse,
enableExemplarPartialResponse: enableExemplarPartialResponse,
replicaLabels: replicaLabels,
endpointSet: endpointSet,
endpointStatus: endpointStatus,
defaultRangeQueryStep: defaultRangeQueryStep,
defaultInstantQueryMaxSourceResolution: defaultInstantQueryMaxSourceResolution,
defaultMetadataTimeRange: defaultMetadataTimeRange,
Expand Down Expand Up @@ -715,7 +715,11 @@ func (qapi *QueryAPI) labelNames(r *http.Request) (interface{}, []error, *api.Ap

func (qapi *QueryAPI) stores(_ *http.Request) (interface{}, []error, *api.ApiError) {
statuses := make(map[string][]query.EndpointStatus)
for _, status := range qapi.endpointSet.GetEndpointStatus() {
for _, status := range qapi.endpointStatus() {
// Don't consider an endpoint if we cannot retrieve component type.
if status.ComponentType == nil {
continue
}
statuses[status.ComponentType.String()] = append(statuses[status.ComponentType.String()], status)
}
return statuses, nil, nil
Expand Down
87 changes: 87 additions & 0 deletions pkg/api/query/v1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1201,6 +1201,93 @@ func TestMetadataEndpoints(t *testing.T) {
}
}

func TestStoresEndpoint(t *testing.T) {
apiWithNotEndpoints := &QueryAPI{
endpointStatus: func() []query.EndpointStatus {
return []query.EndpointStatus{}
},
}
apiWithValidEndpoints := &QueryAPI{
endpointStatus: func() []query.EndpointStatus {
return []query.EndpointStatus{
{
Name: "endpoint-1",
ComponentType: component.Store,
},
{
Name: "endpoint-2",
ComponentType: component.Store,
},
{
Name: "endpoint-3",
ComponentType: component.Sidecar,
},
}
},
}
apiWithInvalidEndpoint := &QueryAPI{
endpointStatus: func() []query.EndpointStatus {
return []query.EndpointStatus{
{
Name: "endpoint-1",
ComponentType: component.Store,
},
{
Name: "endpoint-2",
},
}
},
}

testCases := []endpointTestCase{
{
endpoint: apiWithNotEndpoints.stores,
method: http.MethodGet,
response: map[string][]query.EndpointStatus{},
},
{
endpoint: apiWithValidEndpoints.stores,
method: http.MethodGet,
response: map[string][]query.EndpointStatus{
"store": {
{
Name: "endpoint-1",
ComponentType: component.Store,
},
{
Name: "endpoint-2",
ComponentType: component.Store,
},
},
"sidecar": {
{
Name: "endpoint-3",
ComponentType: component.Sidecar,
},
},
},
},
{
endpoint: apiWithInvalidEndpoint.stores,
method: http.MethodGet,
response: map[string][]query.EndpointStatus{
"store": {
{
Name: "endpoint-1",
ComponentType: component.Store,
},
},
},
},
}

for i, test := range testCases {
if ok := testEndpoint(t, test, strings.TrimSpace(fmt.Sprintf("#%d %s", i, test.query.Encode())), reflect.DeepEqual); !ok {
return
}
}
}

func TestParseTime(t *testing.T) {
ts, err := time.Parse(time.RFC3339Nano, "2015-06-03T13:21:58.555Z")
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/block/fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -660,9 +660,9 @@ func (f *DeduplicateFilter) DuplicateIDs() []ulid.ULID {

func addNodeBySources(root, add *Node) bool {
var rootNode *Node
childSources := add.Compaction.Sources
for _, node := range root.Children {
parentSources := node.Compaction.Sources
childSources := add.Compaction.Sources

// Block exists with same sources, add as child.
if contains(parentSources, childSources) && contains(childSources, parentSources) {
Expand Down
Loading

0 comments on commit d847f0f

Please sign in to comment.