From 0438b230535f6870f306f634e941762934eece25 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 19 Apr 2023 00:13:43 +0800 Subject: [PATCH 1/4] Update mimir-prometheus vendor to d162bb51b6ef --- go.mod | 2 +- go.sum | 6 +- .../prometheus/prometheus/config/config.go | 18 +- .../prometheus/model/labels/labels.go | 24 +- .../prometheus/model/labels/labels_string.go | 21 +- .../prometheus/model/labels/matcher.go | 8 + .../prometheus/model/labels/regexp.go | 6 +- .../prometheus/model/relabel/relabel.go | 2 +- .../prometheus/notifier/notifier.go | 2 +- .../prometheus/prometheus/promql/engine.go | 408 ++++++++----- .../prometheus/prometheus/promql/functions.go | 485 ++++++++------- .../promql/parser/generated_parser.y | 2 +- .../promql/parser/generated_parser.y.go | 2 +- .../prometheus/prometheus/promql/quantile.go | 2 +- .../prometheus/prometheus/promql/test.go | 30 +- .../prometheus/prometheus/promql/value.go | 239 +++++--- .../prometheus/prometheus/rules/alerting.go | 16 +- .../prometheus/prometheus/rules/manager.go | 101 ++-- .../prometheus/prometheus/rules/recording.go | 2 +- .../prometheus/prometheus/scrape/scrape.go | 4 +- .../prometheus/prometheus/scrape/target.go | 4 +- .../prometheus/prometheus/storage/buffer.go | 558 +++++++++++++++--- .../prometheus/storage/remote/codec.go | 197 +++++-- .../prometheus/storage/remote/read.go | 2 +- .../storage/remote/write_handler.go | 2 +- .../prometheus/prometheus/storage/series.go | 23 +- .../prometheus/template/template.go | 2 +- .../prometheus/tsdb/chunkenc/chunk.go | 2 +- .../prometheus/prometheus/tsdb/compact.go | 128 ++-- .../prometheus/prometheus/tsdb/head.go | 49 +- .../prometheus/prometheus/tsdb/head_append.go | 4 +- .../prometheus/tsdb/index/postings.go | 14 +- .../prometheus/prometheus/tsdb/ooo_head.go | 4 +- .../prometheus/prometheus/tsdb/querier.go | 6 +- .../prometheus/tsdb/tsdbutil/chunks.go | 14 +- .../prometheus/util/jsonutil/marshal.go | 83 ++- .../prometheus/prometheus/web/api/v1/api.go | 8 +- .../prometheus/web/api/v1/json_codec.go | 163 ++--- vendor/modules.txt | 4 +- 39 files changed, 1759 insertions(+), 888 deletions(-) diff --git a/go.mod b/go.mod index 527b9e53e3f..8cbd38f841b 100644 --- a/go.mod +++ b/go.mod @@ -244,7 +244,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230417132058-c461e223418b +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230418130726-d162bb51b6ef // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index f75af07f697..bdc88b48b20 100644 --- a/go.sum +++ b/go.sum @@ -81,7 +81,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxT github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= @@ -529,8 +529,8 @@ github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9 h1:WB3bGH2f1UN6 github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20230417132058-c461e223418b h1:El+0EImX0ZSFKzy0NnvuFILGv3khlEnJmRSK58p5F9M= -github.com/grafana/mimir-prometheus v0.0.0-20230417132058-c461e223418b/go.mod h1:nOnMC6vOTtyXwYaSWAUtfqtdlxfxkQvW7niVgJqcJxY= +github.com/grafana/mimir-prometheus v0.0.0-20230418130726-d162bb51b6ef h1:jsrCY75xwUUrt9Xd2V6pP8lqhdlGrQSNzMw6dEYc2Ew= +github.com/grafana/mimir-prometheus v0.0.0-20230418130726-d162bb51b6ef/go.mod h1:7cY9a98yoTl0qTVsrHKAPzSurtekpMpjvOsG3T7OON4= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index a29c98eed22..5c51d5a0d85 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -173,16 +173,16 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ - // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 500 samples per batch, we will be able to push 1M samples/s. - MaxShards: 200, + // With a maximum of 50 shards, assuming an average of 100ms remote write + // time and 2000 samples per batch, we will be able to push 1M samples/s. + MaxShards: 50, MinShards: 1, - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, - // Each shard will have a max of 2500 samples pending in its channel, plus the pending - // samples that have been enqueued. Theoretically we should only ever have about 3000 samples - // per shard pending. At 200 shards that's 600k. - Capacity: 2500, + // Each shard will have a max of 10,000 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples + // per shard pending. At 50 shards that's 600k. + Capacity: 10000, BatchSendDeadline: model.Duration(5 * time.Second), // Backoff times for retrying a batch of samples on recoverable errors. @@ -194,7 +194,7 @@ var ( DefaultMetadataConfig = MetadataConfig{ Send: true, SendInterval: model.Duration(1 * time.Minute), - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, } // DefaultRemoteReadConfig is the default remote read configuration. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 8fc401564b0..b7398d17f97 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -570,24 +570,18 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - if res == nil { - // In the general case, labels are removed, modified or moved - // rather than added. - res = make(Labels, 0, len(b.base)) - } else { - res = res[:0] + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 } - // Justification that res can be the same slice as base: in this loop - // we move forward through base, and either skip an element or assign - // it to res at its current position or an earlier position. + res := make(Labels, 0, expectedSize) for _, l := range b.base { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { continue @@ -637,3 +631,9 @@ func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go index 560b643db31..ff46103ebcd 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go @@ -56,8 +56,14 @@ func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } func decodeSize(data string, index int) (int, int) { - var size int - for shift := uint(0); ; shift += 7 { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. b := data[index] @@ -158,7 +164,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { b.Del(MetricName) b.Del(names...) } - return b.Labels(EmptyLabels()) + return b.Labels() } // Hash returns a hash value for the label set. @@ -624,10 +630,9 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } @@ -637,7 +642,7 @@ func (b *Builder) Labels(res Labels) Labels { a, d := 0, 0 bufSize := len(b.base.data) + labelsSize(b.add) - buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + buf := make([]byte, 0, bufSize) for pos := 0; pos < len(b.base.data); { oldPos := pos var lName string @@ -812,7 +817,7 @@ func (b *ScratchBuilder) Labels() Labels { } // Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) if size <= cap(b.overwriteBuffer) { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go index 8a449af8c21..1282f80d639 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go @@ -137,3 +137,11 @@ func (m *Matcher) Prefix() string { } return m.re.prefix } + +// IsRegexOptimized returns whether regex is optimized. +func (m *Matcher) IsRegexOptimized() bool { + if m.re == nil { + return false + } + return m.re.IsOptimized() +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index 980bd733242..52729c26c9a 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -151,11 +151,9 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { } } -// isOptimized returns true if any fast-path optimization is applied to the +// IsOptimized returns true if any fast-path optimization is applied to the // regex matcher. -// -//nolint:unused -func (m *FastRegexMatcher) isOptimized() bool { +func (m *FastRegexMatcher) IsOptimized() bool { return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != "" } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 5ef79b4a7db..5027c3963cb 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -211,7 +211,7 @@ func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) if !ProcessBuilder(lb, cfgs...) { return labels.EmptyLabels(), false } - return lb.Labels(lbls), true + return lb.Labels(), true } // ProcessBuilder is like Process, but the caller passes a labels.Builder diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 79697d07966..c3b2e5c7e0f 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -359,7 +359,7 @@ func (n *Manager) Send(alerts ...*Alert) { } }) - a.Labels = lb.Labels(a.Labels) + a.Labels = lb.Labels() } alerts = n.relabelAlerts(alerts) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index ad30b155098..fae4f093201 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -189,7 +189,8 @@ func (q *query) Cancel() { // Close implements the Query interface. func (q *query) Close() { for _, s := range q.matrix { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } @@ -399,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -415,7 +416,7 @@ func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs strin // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -680,11 +681,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}} + if len(s.Histograms) > 0 { + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + } else { + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + } } return vector, warnings, nil case parser.ValueTypeScalar: - return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil + return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: return mat, warnings, nil default: @@ -940,9 +945,10 @@ type errWithWarnings struct { func (e errWithWarnings) Error() string { return e.err.Error() } -// An evaluator evaluates given expressions over given fixed timestamps. It -// is attached to an engine through which it connects to a querier and reports -// errors. On timeout or cancellation of its context it terminates. +// An evaluator evaluates the given expressions over the given fixed +// timestamps. It is attached to an engine through which it connects to a +// querier and reports errors. On timeout or cancellation of its context it +// terminates. type evaluator struct { ctx context.Context @@ -1137,17 +1143,35 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Points { + for _, point := range series.Floats { + if point.T == ts { + if ev.currentSamples < ev.maxSamples { + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Floats = series.Floats[1:] + ev.currentSamples++ + } else { + ev.error(ErrTooManySamples(env)) + } + } + break + } + for _, point := range series.Histograms { if point.T == ts { if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) if prepSeries != nil { bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) } // Move input vectors forward so we don't have to re-scan the same // past points at the next step. - matrixes[i][si].Points = series.Points[1:] + matrixes[i][si].Histograms = series.Histograms[1:] ev.currentSamples++ } else { ev.error(ErrTooManySamples(env)) @@ -1184,8 +1208,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) if ev.endTimestamp == ev.startTimestamp { mat := make(Matrix, len(result)) for i, s := range result { - s.Point.T = ts - mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + if s.H == nil { + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + } else { + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + } } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1197,22 +1224,28 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) h := sample.Metric.Hash() ss, ok := seriess[h] if !ok { - ss = Series{ - Metric: sample.Metric, - Points: getPointSlice(numSteps), + ss = Series{Metric: sample.Metric} + } + if sample.H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) } + ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H}) } - sample.Point.T = ts - ss.Points = append(ss.Points, sample.Point) seriess[h] = ss - } } // Reuse the original point slices. for _, m := range origMatrixes { for _, s := range m { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } // Assemble the output matrix. By the time we get here we know we don't have too many samples. @@ -1253,7 +1286,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } totalSamples := 0 for _, s := range mat { - totalSamples += len(s.Points) + totalSamples += len(s.Floats) + len(s.Histograms) vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, totalSamples, ws @@ -1297,7 +1330,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { var param float64 if e.Param != nil { - param = v[0].(Vector)[0].V + param = v[0].(Vector)[0].F } return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil }, e.Param, e.Expr) @@ -1396,7 +1429,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { stepRange = ev.interval } // Reuse objects across steps to save memory allocations. - points := getPointSlice(16) + var floats []FPoint + var histograms []HPoint inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} @@ -1404,8 +1438,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator for i, s := range selVS.Series { - ev.currentSamples -= len(points) - points = points[:0] + ev.currentSamples -= len(floats) + len(histograms) + if floats != nil { + floats = floats[:0] + } + if histograms != nil { + histograms = histograms[:0] + } chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() @@ -1418,7 +1457,6 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } ss := Series{ Metric: metric, - Points: getPointSlice(numSteps), } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1428,44 +1466,54 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // when looking up the argument, as there will be no gaps. for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } maxt := ts - offset mint := maxt - selRange // Evaluate the matrix selector for this series for this step. - points = ev.matrixIterSlice(it, mint, maxt, points) - if len(points) == 0 { + floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + if len(floats)+len(histograms) == 0 { continue } - inMatrix[0].Points = points + inMatrix[0].Floats = floats + inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. outVec := call(inArgs, e.Args, enh) - ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) + ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) + if outVec[0].H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: outVec[0].F, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: outVec[0].H, T: ts}) + } } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) } - if len(ss.Points) > 0 { - if ev.currentSamples+len(ss.Points) <= ev.maxSamples { + if len(ss.Floats)+len(ss.Histograms) > 0 { + if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples { mat = append(mat, ss) - ev.currentSamples += len(ss.Points) + ev.currentSamples += len(ss.Floats) + len(ss.Histograms) } else { ev.error(ErrTooManySamples(env)) } - } else { - putPointSlice(ss.Points) } ev.samplesStats.UpdatePeak(ev.currentSamples) } ev.samplesStats.UpdatePeak(ev.currentSamples) - ev.currentSamples -= len(points) - putPointSlice(points) + ev.currentSamples -= len(floats) + len(histograms) + putFPointSlice(floats) + putHPointSlice(histograms) // The absent_over_time function returns 0 or 1 series. So far, the matrix // contains multiple series. The following code will create a new series @@ -1474,7 +1522,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { steps := int(1 + (ev.endTimestamp-ev.startTimestamp)/ev.interval) // Iterate once to look for a complete series. for _, s := range mat { - if len(s.Points) == steps { + if len(s.Floats)+len(s.Histograms) == steps { return Matrix{}, warnings } } @@ -1482,7 +1530,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { found := map[int64]struct{}{} for i, s := range mat { - for _, p := range s.Points { + for _, p := range s.Floats { + found[p.T] = struct{}{} + } + for _, p := range s.Histograms { found[p.T] = struct{}{} } if i > 0 && len(found) == steps { @@ -1490,17 +1541,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - newp := make([]Point, 0, steps-len(found)) + newp := make([]FPoint, 0, steps-len(found)) for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { if _, ok := found[ts]; !ok { - newp = append(newp, Point{T: ts, V: 1}) + newp = append(newp, FPoint{T: ts, F: 1}) } } return Matrix{ Series{ Metric: createLabelsForAbsentFunction(e.Args[0]), - Points: newp, + Floats: newp, }, }, warnings } @@ -1520,8 +1571,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if e.Op == parser.SUB { for i := range mat { mat[i].Metric = dropMetricName(mat[i].Metric) - for j := range mat[i].Points { - mat[i].Points[j].V = -mat[i].Points[j].V + for j := range mat[i].Floats { + mat[i].Floats[j].F = -mat[i].Floats[j].F } } if mat.ContainsSameLabelset() { @@ -1534,8 +1585,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) - return append(enh.Out, Sample{Point: Point{V: val}}), nil + val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: // Function to compute the join signature for each series. @@ -1565,18 +1616,18 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil + return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1595,15 +1646,24 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), - Points: getPointSlice(numSteps), } for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, h, ok := ev.vectorSelectorSingle(it, e, ts) + _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, H: h, T: ts}) + if h == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts}) + } ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1612,10 +1672,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - if len(ss.Points) > 0 { + if len(ss.Floats)+len(ss.Histograms) > 0 { mat = append(mat, ss) - } else { - putPointSlice(ss.Points) } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1706,15 +1764,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr)) } for i := range mat { - if len(mat[i].Points) != 1 { + if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { - mat[i].Points = append(mat[i].Points, Point{ - T: ts, - V: mat[i].Points[0].V, - H: mat[i].Points[0].H, - }) + if len(mat[i].Floats) > 0 { + mat[i].Floats = append(mat[i].Floats, FPoint{ + T: ts, + F: mat[i].Floats[0].F, + }) + } else { + mat[i].Histograms = append(mat[i].Histograms, HPoint{ + T: ts, + H: mat[i].Histograms[0].H, + }) + } ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1741,11 +1805,13 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect chkIter = s.Iterator(chkIter) it.Reset(chkIter) - t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) + t, f, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { vec = append(vec, Sample{ Metric: node.Series[i].Labels(), - Point: Point{V: v, H: h, T: t}, + T: t, + F: f, + H: h, }) ev.currentSamples++ @@ -1795,17 +1861,35 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no return t, v, h, true } -var pointPool zeropool.Pool[[]Point] +var ( + fPointPool zeropool.Pool[[]FPoint] + hPointPool zeropool.Pool[[]HPoint] +) + +func getFPointSlice(sz int) []FPoint { + if p := fPointPool.Get(); p != nil { + return p + } + return make([]FPoint, 0, sz) +} + +func putFPointSlice(p []FPoint) { + if p != nil { + fPointPool.Put(p[:0]) + } +} -func getPointSlice(sz int) []Point { - if p := pointPool.Get(); p != nil { +func getHPointSlice(sz int) []HPoint { + if p := hPointPool.Get(); p != nil { return p } - return make([]Point, 0, sz) + return make([]HPoint, 0, sz) } -func putPointSlice(p []Point) { - pointPool.Put(p[:0]) +func putHPointSlice(p []HPoint) { + if p != nil { + hPointPool.Put(p[:0]) + } } // matrixSelector evaluates a *parser.MatrixSelector expression. @@ -1837,13 +1921,15 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag Metric: series[i].Labels(), } - ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) - ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points))) + ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) + totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms)) + ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen) - if len(ss.Points) > 0 { + if totalLen > 0 { matrix = append(matrix, ss) } else { - putPointSlice(ss.Points) + putFPointSlice(ss.Floats) + putHPointSlice(ss.Histograms) } } return matrix, ws @@ -1857,24 +1943,54 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag // values). Any such points falling before mint are discarded; points that fall // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. -func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { - if len(out) > 0 && out[len(out)-1].T >= mint { +func (ev *evaluator) matrixIterSlice( + it *storage.BufferedSeriesIterator, mint, maxt int64, + floats []FPoint, histograms []HPoint, +) ([]FPoint, []HPoint) { + mintFloats, mintHistograms := mint, mint + + // First floats... + if len(floats) > 0 && floats[len(floats)-1].T >= mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; out[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { } ev.currentSamples -= drop - copy(out, out[drop:]) - out = out[:len(out)-drop] + copy(floats, floats[drop:]) + floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mint = out[len(out)-1].T + 1 + mintFloats = floats[len(floats)-1].T + 1 } else { - ev.currentSamples -= len(out) - out = out[:0] + ev.currentSamples -= len(floats) + if floats != nil { + floats = floats[:0] + } + } + + // ...then the same for histograms. TODO(beorn7): Use generics? + if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; histograms[drop].T < mint; drop++ { + } + ev.currentSamples -= drop + copy(histograms, histograms[drop:]) + histograms = histograms[:len(histograms)-drop] + // Only append points with timestamps after the last timestamp we have. + mintHistograms = histograms[len(histograms)-1].T + 1 + } else { + ev.currentSamples -= len(histograms) + if histograms != nil { + histograms = histograms[:0] + } } soughtValueType := it.Seek(maxt) @@ -1896,25 +2012,31 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintHistograms { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) } case chunkenc.ValFloat: - t, v := buf.At() - if value.IsStaleNaN(v) { + t, f := buf.At() + if value.IsStaleNaN(f) { continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintFloats { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) } } } @@ -1926,21 +2048,27 @@ loop: if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) ev.currentSamples++ } case chunkenc.ValFloat: - t, v := it.At() - if t == maxt && !value.IsStaleNaN(v) { + t, f := it.At() + if t == maxt && !value.IsStaleNaN(f) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) ev.currentSamples++ } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return out + return floats, histograms } func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { @@ -2086,18 +2214,18 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } // Account for potentially swapped sidedness. - vl, vr := ls.V, rs.V + fl, fr := ls.F, rs.F hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { - vl, vr = vr, vl + fl, fr = fr, fl hl, hr = hr, hl } - value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr) + floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) if returnBool { if keep { - value = 1.0 + floatValue = 1.0 } else { - value = 0.0 + floatValue = 0.0 } } else if !keep { continue @@ -2131,7 +2259,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // Both lhs and rhs are of same type. enh.Out = append(enh.Out, Sample{ Metric: metric, - Point: Point{V: value, H: histogramValue}, + F: floatValue, + H: histogramValue, }) } } @@ -2192,7 +2321,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(labels.EmptyLabels()) + ret := enh.lb.Labels() enh.resultMetric[str] = ret return ret } @@ -2200,7 +2329,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.V, rhs.V + lv, rv := lhsSample.F, rhs.V // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { @@ -2221,7 +2350,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala keep = true } if keep { - lhsSample.V = value + lhsSample.F = value if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2232,7 +2361,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) + return labels.NewBuilder(l).Del(labels.MetricName).Labels() } // scalarBinop evaluates a binary operation between two Scalars. @@ -2313,7 +2442,7 @@ type groupedAggregation struct { hasFloat bool // Has at least 1 float64 sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated. labels labels.Labels - value float64 + floatValue float64 histogramValue *histogram.FloatHistogram mean float64 groupCount int @@ -2365,8 +2494,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if op == parser.COUNT_VALUES { enh.resetBuilder(metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = enh.lb.Labels(labels.EmptyLabels()) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + metric = enh.lb.Labels() // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2388,17 +2517,17 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if without { enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) - m = enh.lb.Labels(labels.EmptyLabels()) + m = enh.lb.Labels() } else if len(grouping) > 0 { enh.lb.Keep(grouping...) - m = enh.lb.Labels(labels.EmptyLabels()) + m = enh.lb.Labels() } else { m = labels.EmptyLabels() } newAgg := &groupedAggregation{ labels: m, - value: s.V, - mean: s.V, + floatValue: s.F, + mean: s.F, groupCount: 1, } if s.H == nil { @@ -2420,21 +2549,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } switch op { case parser.STDVAR, parser.STDDEV: - result[groupingKey].value = 0 + result[groupingKey].floatValue = 0 case parser.TOPK, parser.QUANTILE: result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) result[groupingKey].heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.BOTTOMK: result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize) result[groupingKey].reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.GROUP: - result[groupingKey].value = 1 + result[groupingKey].floatValue = 1 } continue } @@ -2459,19 +2588,19 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // point in copying the histogram in that case. } else { group.hasFloat = true - group.value += s.V + group.floatValue += s.F } case parser.AVG: group.groupCount++ if math.IsInf(group.mean, 0) { - if math.IsInf(s.V, 0) && (group.mean > 0) == (s.V > 0) { + if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) { // The `mean` and `s.V` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. break } - if !math.IsInf(s.V, 0) && !math.IsNaN(s.V) { + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -2482,19 +2611,19 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } } // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.V/float64(group.groupCount) - group.mean/float64(group.groupCount) + group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: - if group.value < s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue < s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.MIN: - if group.value > s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue > s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.COUNT, parser.COUNT_VALUES: @@ -2502,21 +2631,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.STDVAR, parser.STDDEV: group.groupCount++ - delta := s.V - group.mean + delta := s.F - group.mean group.mean += delta / float64(group.groupCount) - group.value += delta * (s.V - group.mean) + group.floatValue += delta * (s.F - group.mean) case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. if int64(len(group.heap)) < k { heap.Push(&group.heap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) - } else if group.heap[0].V < s.V || (math.IsNaN(group.heap[0].V) && !math.IsNaN(s.V)) { + } else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) { // This new element is bigger than the previous smallest element - overwrite that. group.heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } if k > 1 { @@ -2528,13 +2657,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // We build a heap of up to k elements, with the biggest element at heap[0]. if int64(len(group.reverseHeap)) < k { heap.Push(&group.reverseHeap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) - } else if group.reverseHeap[0].V > s.V || (math.IsNaN(group.reverseHeap[0].V) && !math.IsNaN(s.V)) { + } else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) { // This new element is smaller than the previous biggest element - overwrite that. group.reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } if k > 1 { @@ -2554,16 +2683,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.value = aggr.mean + aggr.floatValue = aggr.mean case parser.COUNT, parser.COUNT_VALUES: - aggr.value = float64(aggr.groupCount) + aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.value = aggr.value / float64(aggr.groupCount) + aggr.floatValue = aggr.floatValue / float64(aggr.groupCount) case parser.STDDEV: - aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) case parser.TOPK: // The heap keeps the lowest value on top, so reverse it. @@ -2573,7 +2702,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.heap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. @@ -2586,13 +2715,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.reverseHeap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. case parser.QUANTILE: - aggr.value = quantile(q, aggr.heap) + aggr.floatValue = quantile(q, aggr.heap) case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { @@ -2605,7 +2734,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value, H: aggr.histogramValue}, + F: aggr.floatValue, + H: aggr.histogramValue, }) } return enh.Out diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 3da38ea0f3a..fd99703df22 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -54,9 +54,9 @@ type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNo // === time() float64 === func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: float64(enh.Ts) / 1000, - }}} + return Vector{Sample{ + F: float64(enh.Ts) / 1000, + }} } // extrapolatedRate is a utility function for rate/increase/delta. @@ -67,65 +67,71 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) - resultValue float64 - resultHistogram *histogram.FloatHistogram + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultFloat float64 + resultHistogram *histogram.FloatHistogram + firstT, lastT int64 + numSamplesMinusOne int ) - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { + // We need either at least two Histograms and no Floats, or at least two + // Floats and no Histograms to calculate a rate. Otherwise, drop this + // Vector element. + if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { + // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. return enh.Out } - if samples.Points[0].H != nil { - resultHistogram = histogramRate(samples.Points, isCounter) + switch { + case len(samples.Histograms) > 1: + numSamplesMinusOne = len(samples.Histograms) - 1 + firstT = samples.Histograms[0].T + lastT = samples.Histograms[numSamplesMinusOne].T + resultHistogram = histogramRate(samples.Histograms, isCounter) if resultHistogram == nil { - // Points are a mix of floats and histograms, or the histograms - // are not compatible with each other. - // TODO(beorn7): find a way of communicating the exact reason + // The histograms are not compatible with each other. + // TODO(beorn7): Communicate this failure reason. return enh.Out } - } else { - resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V - prevValue := samples.Points[0].V - // We have to iterate through everything even in the non-counter - // case because we have to check that everything is a float. - // TODO(beorn7): Find a way to check that earlier, e.g. by - // handing in a []FloatPoint and a []HistogramPoint separately. - for _, currPoint := range samples.Points[1:] { - if currPoint.H != nil { - return nil // Range contains a mix of histograms and floats. - } - if !isCounter { - continue - } - if currPoint.V < prevValue { - resultValue += prevValue + case len(samples.Floats) > 1: + numSamplesMinusOne = len(samples.Floats) - 1 + firstT = samples.Floats[0].T + lastT = samples.Floats[numSamplesMinusOne].T + resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F + if !isCounter { + break + } + // Handle counter resets: + prevValue := samples.Floats[0].F + for _, currPoint := range samples.Floats[1:] { + if currPoint.F < prevValue { + resultFloat += prevValue } - prevValue = currPoint.V + prevValue = currPoint.F } + default: + // Not enough samples. TODO(beorn7): Communicate this failure reason. + return enh.Out } // Duration between first/last samples and boundary of range. - durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 - durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + durationToStart := float64(firstT-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-lastT) / 1000 - sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 - averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + sampledInterval := float64(lastT-firstT) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) // TODO(beorn7): Do this for histograms, too. - if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { - // Counters cannot be negative. If we have any slope at - // all (i.e. resultValue went up), we can extrapolate - // the zero point of the counter. If the duration to the - // zero point is shorter than the durationToStart, we - // take the zero point as the start of the series, - // thereby avoiding extrapolation to negative counter - // values. - durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + // Counters cannot be negative. If we have any slope at all + // (i.e. resultFloat went up), we can extrapolate the zero point + // of the counter. If the duration to the zero point is shorter + // than the durationToStart, we take the zero point as the start + // of the series, thereby avoiding extrapolation to negative + // counter values. + durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -153,21 +159,19 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod factor /= ms.Range.Seconds() } if resultHistogram == nil { - resultValue *= factor + resultFloat *= factor } else { resultHistogram.Scale(factor) } - return append(enh.Out, Sample{ - Point: Point{V: resultValue, H: resultHistogram}, - }) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is // not a histogram. -func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { - prev := points[0].H // We already know that this is a histogram. +func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { + prev := points[0].H last := points[len(points)-1].H if last == nil { return nil // Range contains a mix of histograms and floats. @@ -243,19 +247,19 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return out } - lastSample := samples.Points[len(samples.Points)-1] - previousSample := samples.Points[len(samples.Points)-2] + lastSample := samples.Floats[len(samples.Floats)-1] + previousSample := samples.Floats[len(samples.Floats)-2] var resultValue float64 - if isRate && lastSample.V < previousSample.V { + if isRate && lastSample.F < previousSample.F { // Counter reset. - resultValue = lastSample.V + resultValue = lastSample.F } else { - resultValue = lastSample.V - previousSample.V + resultValue = lastSample.F - previousSample.F } sampledInterval := lastSample.T - previousSample.T @@ -269,9 +273,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{ - Point: Point{V: resultValue}, - }) + return append(out, Sample{F: resultValue}) } // Calculate the trend value at the given index i in raw data d. @@ -300,10 +302,10 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode samples := vals[0].(Matrix)[0] // The smoothing factor argument. - sf := vals[1].(Vector)[0].V + sf := vals[1].(Vector)[0].F // The trend factor argument. - tf := vals[2].(Vector)[0].V + tf := vals[2].(Vector)[0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -313,7 +315,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) } - l := len(samples.Points) + l := len(samples.Floats) // Can't do the smoothing operation with less than two points. if l < 2 { @@ -322,15 +324,15 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode var s0, s1, b float64 // Set initial values. - s1 = samples.Points[0].V - b = samples.Points[1].V - samples.Points[0].V + s1 = samples.Floats[0].F + b = samples.Floats[1].F - samples.Floats[0].F // Run the smoothing operation. var x, y float64 for i := 1; i < l; i++ { // Scale the raw value against the smoothing factor. - x = sf * samples.Points[i].V + x = sf * samples.Floats[i].F // Scale the last smoothed value with the trend at this point. b = calcTrendValue(i-1, tf, s0, s1, b) @@ -339,9 +341,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{ - Point: Point{V: s1}, - }) + return append(enh.Out, Sample{F: s1}) } // === sort(node parser.ValueTypeVector) Vector === @@ -365,15 +365,15 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V - max := vals[2].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F + max := vals[2].(Vector)[0].F if max < min { return enh.Out } for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, math.Min(max, el.V))}, + F: math.Max(min, math.Min(max, el.F)), }) } return enh.Out @@ -382,11 +382,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].Point.V + max := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Min(max, el.V)}, + F: math.Min(max, el.F), }) } return enh.Out @@ -395,11 +395,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, el.V)}, + F: math.Max(min, el.F), }) } return enh.Out @@ -412,16 +412,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].Point.V + toNearest = vals[1].(Vector)[0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest for _, el := range vec { - v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: v}, + F: f, }) } return enh.Out @@ -431,37 +431,38 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{ - Point: Point{V: math.NaN()}, - }) + return append(enh.Out, Sample{F: math.NaN()}) } - return append(enh.Out, Sample{ - Point: Point{V: v[0].V}, - }) + return append(enh.Out, Sample{F: v[0].F}) } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { el := vals[0].(Matrix)[0] - return append(enh.Out, Sample{ - Point: Point{V: aggrFn(el.Points)}, - }) + return append(enh.Out, Sample{F: aggrFn(el)}) } // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. avg_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 - for _, v := range values { + for _, f := range s.Floats { count++ if math.IsInf(mean, 0) { - if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { - // The `mean` and `v.V` values are `Inf` of the same sign. They + if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { + // The `mean` and `f.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. continue } - if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -471,7 +472,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(v.V/count-mean/count, mean, c) + mean, c = kahanSumInc(f.F/count-mean/count, mean, c) } if math.IsInf(mean, 0) { @@ -483,8 +484,8 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === count_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - return float64(len(values)) + return aggrOverTime(vals, enh, func(s Series) float64 { + return float64(len(s.Floats) + len(s.Histograms)) }) } @@ -492,19 +493,42 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { el := vals[0].(Matrix)[0] + var f FPoint + if len(el.Floats) > 0 { + f = el.Floats[len(el.Floats)-1] + } + + var h HPoint + if len(el.Histograms) > 0 { + h = el.Histograms[len(el.Histograms)-1] + } + + if h.H == nil || h.T < f.T { + return append(enh.Out, Sample{ + Metric: el.Metric, + F: f.F, + }) + } return append(enh.Out, Sample{ Metric: el.Metric, - Point: Point{V: el.Points[len(el.Points)-1].V}, + H: h.H, }) } // === max_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - max := values[0].V - for _, v := range values { - if v.V > max || math.IsNaN(max) { - max = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. max_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + max := s.Floats[0].F + for _, f := range s.Floats { + if f.F > max || math.IsNaN(max) { + max = f.F } } return max @@ -513,11 +537,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === min_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - min := values[0].V - for _, v := range values { - if v.V < min || math.IsNaN(min) { - min = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. min_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + min := s.Floats[0].F + for _, f := range s.Floats { + if f.F < min || math.IsNaN(min) { + min = f.F } } return min @@ -526,10 +557,17 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. sum_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 - for _, v := range values { - sum, c = kahanSumInc(v.V, sum, c) + for _, f := range s.Floats { + sum, c = kahanSumInc(f.F, sum, c) } if math.IsInf(sum, 0) { return sum @@ -540,29 +578,41 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] + if len(el.Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. quantile_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } - values := make(vectorByValueHeap, 0, len(el.Points)) - for _, v := range el.Points { - values = append(values, Sample{Point: Point{V: v.V}}) + values := make(vectorByValueHeap, 0, len(el.Floats)) + for _, f := range el.Floats { + values = append(values, Sample{F: f.F}) } - return append(enh.Out, Sample{ - Point: Point{V: quantile(q, values)}, - }) + return append(enh.Out, Sample{F: quantile(q, values)}) } // === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stddev_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) }) @@ -570,15 +620,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN // === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stdvar_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count }) @@ -592,7 +649,7 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), - Point: Point{V: 1}, + F: 1, }) } @@ -602,25 +659,24 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, - Sample{ - Point: Point{V: 1}, - }) + return append(enh.Out, Sample{F: 1}) } // === present_over_time(Vector parser.ValueTypeMatrix) Vector === func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { return 1 }) } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(el.V)}, - }) + if el.H == nil { // Process only float samples. + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(el.Metric), + F: f(el.F), + }) + } } return enh.Out } @@ -741,9 +797,7 @@ func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // === pi() Scalar === func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: math.Pi, - }}} + return Vector{Sample{F: math.Pi}} } // === sgn(Vector parser.ValueTypeVector) Vector === @@ -764,7 +818,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: float64(el.T) / 1000}, + F: float64(el.T) / 1000, }) } return enh.Out @@ -793,7 +847,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. -func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { +func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) { var ( n float64 sumX, cX float64 @@ -803,18 +857,18 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl initY float64 constY bool ) - initY = samples[0].V + initY = samples[0].F constY = true for i, sample := range samples { // Set constY to false if any new y values are encountered. - if constY && i > 0 && sample.V != initY { + if constY && i > 0 && sample.F != initY { constY = false } n += 1.0 x := float64(sample.T-interceptTime) / 1e3 sumX, cX = kahanSumInc(x, sumX, cX) - sumY, cY = kahanSumInc(sample.V, sumY, cY) - sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumY, cY = kahanSumInc(sample.F, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY) sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } if constY { @@ -842,33 +896,29 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 - slope, _ := linearRegression(samples.Points, samples.Points[0].T) - return append(enh.Out, Sample{ - Point: Point{V: slope}, - }) + slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + return append(enh.Out, Sample{F: slope}) } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].V + duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } - slope, intercept := linearRegression(samples.Points, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{ - Point: Point{V: slope*duration + intercept}, - }) + return append(enh.Out, Sample{F: slope*duration + intercept}) } // === histogram_count(Vector parser.ValueTypeVector) Vector === @@ -882,7 +932,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Count}, + F: sample.H.Count, }) } return enh.Out @@ -899,7 +949,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Sum}, + F: sample.H.Sum, }) } return enh.Out @@ -907,8 +957,8 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - lower := vals[0].(Vector)[0].V - upper := vals[1].(Vector)[0].V + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) for _, sample := range inVec { @@ -918,7 +968,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramFraction(lower, upper, sample.H)}, + F: histogramFraction(lower, upper, sample.H), }) } return enh.Out @@ -926,7 +976,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) if enh.signatureToMetricWithBuckets == nil { @@ -960,12 +1010,12 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(labels.EmptyLabels()) + Labels() mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, sample.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) } @@ -985,7 +1035,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramQuantile(q, sample.H)}, + F: histogramQuantile(q, sample.H), }) } @@ -993,7 +1043,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if len(mb.buckets) > 0 { enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - Point: Point{V: bucketQuantile(q, mb.buckets)}, + F: bucketQuantile(q, mb.buckets), }) } } @@ -1003,40 +1053,55 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // === resets(Matrix parser.ValueTypeMatrix) Vector === func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms resets := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current < prev { - resets++ + + if len(floats) > 1 { + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F + if current < prev { + resets++ + } + prev = current } - prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(resets)}, - }) + if len(histograms) > 1 { + prev := histograms[0].H + for _, sample := range histograms[1:] { + current := sample.H + if current.DetectReset(prev) { + resets++ + } + prev = current + } + } + + return append(enh.Out, Sample{F: float64(resets)}) } // === changes(Matrix parser.ValueTypeMatrix) Vector === func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats changes := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V + + if len(floats) == 0 { + // TODO(beorn7): Only histogram values, still need to add support. + return enh.Out + } + + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { changes++ } prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(changes)}, - }) + return append(enh.Out, Sample{F: float64(changes)}) } // === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === @@ -1080,14 +1145,15 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -1098,7 +1164,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: vals[0].(Vector)[0].V}, + F: vals[0].(Vector)[0].F, }) } @@ -1148,13 +1214,14 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -1166,15 +1233,15 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: f(time.Unix(enh.Ts/1000, 0).UTC())}, + F: f(time.Unix(enh.Ts/1000, 0).UTC()), }) } for _, el := range vals[0].(Vector) { - t := time.Unix(int64(el.V), 0).UTC() + t := time.Unix(int64(el.F), 0).UTC() enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(t)}, + F: f(t), }) } return enh.Out @@ -1332,10 +1399,20 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V < s[j].V + return vi < vj } func (s vectorByValueHeap) Swap(i, j int) { @@ -1361,10 +1438,20 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V > s[j].V + return vi > vj } func (s vectorByReverseValueHeap) Swap(i, j int) { @@ -1414,7 +1501,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { } } - return b.Labels(labels.EmptyLabels()) + return b.Labels() } func stringFromArg(e parser.Expr) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 461e854ac1a..b1c604eeca5 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -567,7 +567,7 @@ label_matcher : IDENTIFIER match_op STRING */ metric : metric_identifier label_set - { b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels(labels.EmptyLabels()) } + { b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels() } | label_set {$$ = $1} ; diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 3a0e8bf69bf..2cf3e06b9a7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -1494,7 +1494,7 @@ yydefault: { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) - yyVAL.labels = b.Labels(labels.EmptyLabels()) + yyVAL.labels = b.Labels() } case 96: yyDollar = yyS[yypt-1 : yypt+1] diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index 1561a2ce880..aaead671c7a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -382,5 +382,5 @@ func quantile(q float64, values vectorByValueHeap) float64 { upperIndex := math.Min(n-1, lowerIndex+1) weight := rank - math.Floor(rank) - return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight + return values[int(lowerIndex)].F*(1-weight) + values[int(upperIndex)].F*weight } diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 78cc1e9fbb1..64fa66d5d3f 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -281,7 +281,7 @@ func (*evalCmd) testCmd() {} type loadCmd struct { gap time.Duration metrics map[uint64]labels.Labels - defs map[uint64][]Point + defs map[uint64][]FPoint exemplars map[uint64][]exemplar.Exemplar } @@ -289,7 +289,7 @@ func newLoadCmd(gap time.Duration) *loadCmd { return &loadCmd{ gap: gap, metrics: map[uint64]labels.Labels{}, - defs: map[uint64][]Point{}, + defs: map[uint64][]FPoint{}, exemplars: map[uint64][]exemplar.Exemplar{}, } } @@ -302,13 +302,13 @@ func (cmd loadCmd) String() string { func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) { h := m.Hash() - samples := make([]Point, 0, len(vals)) + samples := make([]FPoint, 0, len(vals)) ts := testStartTime for _, v := range vals { if !v.Omitted { - samples = append(samples, Point{ + samples = append(samples, FPoint{ T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), - V: v.Value, + F: v.Value, }) } ts = ts.Add(cmd.gap) @@ -323,7 +323,7 @@ func (cmd *loadCmd) append(a storage.Appender) error { m := cmd.metrics[h] for _, s := range smpls { - if _, err := a.Append(0, m, s.T, s.V); err != nil { + if _, err := a.Append(0, m, s.T, s.F); err != nil { return err } } @@ -399,8 +399,8 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if ev.ordered && exp.pos != pos+1 { return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) } - if !almostEqual(exp.vals[0].Value, v.V) { - return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.V) + if !almostEqual(exp.vals[0].Value, v.F) { + return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.F) } seen[fp] = true @@ -409,7 +409,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if !seen[fp] { fmt.Println("vector result", len(val), ev.expr) for _, ss := range val { - fmt.Println(" ", ss.Metric, ss.Point) + fmt.Println(" ", ss.Metric, ss.T, ss.F) } return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) } @@ -538,7 +538,7 @@ func (t *Test) exec(tc testCommand) error { } queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) for _, iq := range queries { - q, err := t.QueryEngine().NewInstantQuery(t.storage, nil, iq.expr, iq.evalTime) + q, err := t.QueryEngine().NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) if err != nil { return err } @@ -560,7 +560,7 @@ func (t *Test) exec(tc testCommand) error { // Check query returns same result in range mode, // by checking against the middle step. - q, err = t.queryEngine.NewRangeQuery(t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) + q, err = t.queryEngine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) if err != nil { return err } @@ -576,15 +576,15 @@ func (t *Test) exec(tc testCommand) error { mat := rangeRes.Value.(Matrix) vec := make(Vector, 0, len(mat)) for _, series := range mat { - for _, point := range series.Points { + for _, point := range series.Floats { if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, Point: point}) + vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F}) break } } } if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].Point.V}) + err = cmd.compareResult(Scalar{V: vec[0].F}) } else { err = cmd.compareResult(vec) } @@ -763,7 +763,7 @@ func (ll *LazyLoader) appendTill(ts int64) error { ll.loadCmd.defs[h] = smpls[i:] break } - if _, err := app.Append(0, m, s.T, s.V); err != nil { + if _, err := app.Append(0, m, s.T, s.F); err != nil { return err } if i == len(smpls)-1 { diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 91904dda25a..f59a25112b4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -17,6 +17,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "strconv" "strings" @@ -64,76 +65,72 @@ func (s Scalar) MarshalJSON() ([]byte, error) { // Series is a stream of data points belonging to a metric. type Series struct { - Metric labels.Labels - Points []Point + Metric labels.Labels `json:"metric"` + Floats []FPoint `json:"values,omitempty"` + Histograms []HPoint `json:"histograms,omitempty"` } func (s Series) String() string { - vals := make([]string, len(s.Points)) - for i, v := range s.Points { - vals[i] = v.String() + // TODO(beorn7): This currently renders floats first and then + // histograms, each sorted by timestamp. Maybe, in mixed series, that's + // fine. Maybe, however, primary sorting by timestamp is preferred, in + // which case this has to be changed. + vals := make([]string, 0, len(s.Floats)+len(s.Histograms)) + for _, f := range s.Floats { + vals = append(vals, f.String()) + } + for _, h := range s.Histograms { + vals = append(vals, h.String()) } return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) } -// MarshalJSON is mirrored in web/api/v1/api.go for efficiency reasons. -// This implementation is still provided for debug purposes and usage -// without jsoniter. -func (s Series) MarshalJSON() ([]byte, error) { - // Note that this is rather inefficient because it re-creates the whole - // series, just separated by Histogram Points and Value Points. For API - // purposes, there is a more efficient jsoniter implementation in - // web/api/v1/api.go. - series := struct { - M labels.Labels `json:"metric"` - V []Point `json:"values,omitempty"` - H []Point `json:"histograms,omitempty"` - }{ - M: s.Metric, - } - for _, p := range s.Points { - if p.H == nil { - series.V = append(series.V, p) - continue - } - series.H = append(series.H, p) - } - return json.Marshal(series) +// FPoint represents a single float data point for a given timestamp. +type FPoint struct { + T int64 + F float64 +} + +func (p FPoint) String() string { + s := strconv.FormatFloat(p.F, 'f', -1, 64) + return fmt.Sprintf("%s @[%v]", s, p.T) +} + +// MarshalJSON implements json.Marshaler. +// +// JSON marshaling is only needed for the HTTP API. Since FPoint is such a +// frequently marshaled type, it gets an optimized treatment directly in +// web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is +// still provided here as convenience for debugging and for other users of this +// code. Also note that the different marshaling implementations might lead to +// slightly different results in terms of formatting and rounding of the +// timestamp. +func (p FPoint) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(p.F, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) } -// Point represents a single data point for a given timestamp. -// If H is not nil, then this is a histogram point and only (T, H) is valid. -// If H is nil, then only (T, V) is valid. -type Point struct { +// HPoint represents a single histogram data point for a given timestamp. +// H must never be nil. +type HPoint struct { T int64 - V float64 H *histogram.FloatHistogram } -func (p Point) String() string { - var s string - if p.H != nil { - s = p.H.String() - } else { - s = strconv.FormatFloat(p.V, 'f', -1, 64) - } - return fmt.Sprintf("%s @[%v]", s, p.T) +func (p HPoint) String() string { + return fmt.Sprintf("%s @[%v]", p.H.String(), p.T) } // MarshalJSON implements json.Marshaler. // -// JSON marshaling is only needed for the HTTP API. Since Point is such a +// JSON marshaling is only needed for the HTTP API. Since HPoint is such a // frequently marshaled type, it gets an optimized treatment directly in // web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is // still provided here as convenience for debugging and for other users of this // code. Also note that the different marshaling implementations might lead to // slightly different results in terms of formatting and rounding of the // timestamp. -func (p Point) MarshalJSON() ([]byte, error) { - if p.H == nil { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) - } +func (p HPoint) MarshalJSON() ([]byte, error) { h := struct { Count string `json:"count"` Sum string `json:"sum"` @@ -171,42 +168,54 @@ func (p Point) MarshalJSON() ([]byte, error) { return json.Marshal([...]interface{}{float64(p.T) / 1000, h}) } -// Sample is a single sample belonging to a metric. +// Sample is a single sample belonging to a metric. It represents either a float +// sample or a histogram sample. If H is nil, it is a float sample. Otherwise, +// it is a histogram sample. type Sample struct { - Point + T int64 + F float64 + H *histogram.FloatHistogram Metric labels.Labels } func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, s.Point) + var str string + if s.H == nil { + p := FPoint{T: s.T, F: s.F} + str = p.String() + } else { + p := HPoint{T: s.T, H: s.H} + str = p.String() + } + return fmt.Sprintf("%s => %s", s.Metric, str) } -// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because Point -// wouldn't be marshaled with jsoniter in all cases otherwise. +// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because FPoint and +// HPoint wouldn't be marshaled with jsoniter otherwise. func (s Sample) MarshalJSON() ([]byte, error) { - if s.Point.H == nil { - v := struct { + if s.H == nil { + f := struct { M labels.Labels `json:"metric"` - V Point `json:"value"` + F FPoint `json:"value"` }{ M: s.Metric, - V: s.Point, + F: FPoint{T: s.T, F: s.F}, } - return json.Marshal(v) + return json.Marshal(f) } h := struct { M labels.Labels `json:"metric"` - H Point `json:"histogram"` + H HPoint `json:"histogram"` }{ M: s.Metric, - H: s.Point, + H: HPoint{T: s.T, H: s.H}, } return json.Marshal(h) } -// Vector is basically only an alias for model.Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. +// Vector is basically only an an alias for []Sample, but the contract is that +// in a Vector, all Samples have the same timestamp. type Vector []Sample func (vec Vector) String() string { @@ -258,7 +267,7 @@ func (m Matrix) String() string { func (m Matrix) TotalSamples() int { numSamples := 0 for _, series := range m { - numSamples += len(series.Points) + numSamples += len(series.Floats) + len(series.Histograms) } return numSamples } @@ -362,7 +371,8 @@ func (ss *StorageSeries) Labels() labels.Labels { return ss.series.Metric } -// Iterator returns a new iterator of the data of the series. +// Iterator returns a new iterator of the data of the series. In case of +// multiple samples with the same timestamp, it returns the float samples first. func (ss *StorageSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { if ssi, ok := it.(*storageSeriesIterator); ok { ssi.reset(ss.series) @@ -372,44 +382,51 @@ func (ss *StorageSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { } type storageSeriesIterator struct { - points []Point - curr int + floats []FPoint + histograms []HPoint + iFloats, iHistograms int + currT int64 + currF float64 + currH *histogram.FloatHistogram } func newStorageSeriesIterator(series Series) *storageSeriesIterator { return &storageSeriesIterator{ - points: series.Points, - curr: -1, + floats: series.Floats, + histograms: series.Histograms, + iFloats: -1, + iHistograms: 0, + currT: math.MinInt64, } } func (ssi *storageSeriesIterator) reset(series Series) { - ssi.points = series.Points - ssi.curr = -1 + ssi.floats = series.Floats + ssi.histograms = series.Histograms + ssi.iFloats = -1 + ssi.iHistograms = 0 + ssi.currT = math.MinInt64 + ssi.currF = 0 + ssi.currH = nil } func (ssi *storageSeriesIterator) Seek(t int64) chunkenc.ValueType { - i := ssi.curr - if i < 0 { - i = 0 + if ssi.iFloats >= len(ssi.floats) && ssi.iHistograms >= len(ssi.histograms) { + return chunkenc.ValNone } - for ; i < len(ssi.points); i++ { - p := ssi.points[i] - if p.T >= t { - ssi.curr = i - if p.H != nil { - return chunkenc.ValFloatHistogram - } - return chunkenc.ValFloat + for ssi.currT < t { + if ssi.Next() == chunkenc.ValNone { + return chunkenc.ValNone } } - ssi.curr = len(ssi.points) - 1 - return chunkenc.ValNone + if ssi.currH != nil { + return chunkenc.ValFloatHistogram + } + return chunkenc.ValFloat } func (ssi *storageSeriesIterator) At() (t int64, v float64) { - p := ssi.points[ssi.curr] - return p.T, p.V + return ssi.currT, ssi.currF } func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -417,25 +434,59 @@ func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { } func (ssi *storageSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - p := ssi.points[ssi.curr] - return p.T, p.H + return ssi.currT, ssi.currH } func (ssi *storageSeriesIterator) AtT() int64 { - p := ssi.points[ssi.curr] - return p.T + return ssi.currT } func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { - ssi.curr++ - if ssi.curr >= len(ssi.points) { - return chunkenc.ValNone + if ssi.currH != nil { + ssi.iHistograms++ + } else { + ssi.iFloats++ } - p := ssi.points[ssi.curr] - if p.H != nil { + var ( + pickH, pickF = false, false + floatsExhausted = ssi.iFloats >= len(ssi.floats) + histogramsExhausted = ssi.iHistograms >= len(ssi.histograms) + ) + + switch { + case floatsExhausted: + if histogramsExhausted { // Both exhausted! + return chunkenc.ValNone + } + pickH = true + case histogramsExhausted: // and floats not exhausted. + pickF = true + // From here on, we have to look at timestamps. + case ssi.histograms[ssi.iHistograms].T < ssi.floats[ssi.iFloats].T: + // Next histogram comes before next float. + pickH = true + default: + // In all other cases, we pick float so that we first iterate + // through floats if the timestamp is the same. + pickF = true + } + + switch { + case pickF: + p := ssi.floats[ssi.iFloats] + ssi.currT = p.T + ssi.currF = p.F + ssi.currH = nil + return chunkenc.ValFloat + case pickH: + p := ssi.histograms[ssi.iHistograms] + ssi.currT = p.T + ssi.currF = 0 + ssi.currH = p.H return chunkenc.ValFloatHistogram + default: + panic("storageSeriesIterater.Next failed to pick value type") } - return chunkenc.ValFloat } func (ssi *storageSeriesIterator) Err() error { diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 2f4de0f3d9d..e517753a8ec 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -234,8 +234,9 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { lb.Set(alertStateLabel, alert.State.String()) s := promql.Sample{ - Metric: lb.Labels(labels.EmptyLabels()), - Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, + Metric: lb.Labels(), + T: timestamp.FromTime(ts), + F: 1, } return s } @@ -252,8 +253,9 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro lb.Set(labels.AlertName, r.name) s := promql.Sample{ - Metric: lb.Labels(labels.EmptyLabels()), - Point: promql.Point{T: timestamp.FromTime(ts), V: v}, + Metric: lb.Labels(), + T: timestamp.FromTime(ts), + F: v, } return s } @@ -339,7 +341,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim // Provide the alert information to the template. l := smpl.Metric.Map() - tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl.V) + tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl.F) // Inject some convenience variables that are easier to remember for users // who are not used to Go's templating system. defs := []string{ @@ -381,7 +383,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim }) annotations := sb.Labels() - lbs := lb.Labels(labels.EmptyLabels()) + lbs := lb.Labels() h := lbs.Hash() resultFPs[h] = struct{}{} @@ -394,7 +396,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim Annotations: annotations, ActiveAt: ts, State: StatePending, - Value: smpl.V, + Value: smpl.F, } } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 9845d7c78d2..578ee38a2e3 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -189,7 +189,7 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, // It converts scalar into vector results. func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - q, err := engine.NewInstantQuery(q, nil, qs, t) + q, err := engine.NewInstantQuery(ctx, q, nil, qs, t) if err != nil { return nil, err } @@ -202,7 +202,8 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point{T: v.T, V: v.V}, + T: v.T, + F: v.V, Metric: labels.Labels{}, }}, nil default: @@ -256,7 +257,8 @@ type Group struct { opts *ManagerOptions mtx sync.Mutex evaluationTime time.Duration - lastEvaluation time.Time + lastEvaluation time.Time // Wall-clock time of most recent evaluation. + lastEvalTimestamp time.Time // Time slot used for most recent evaluation. shouldRestore bool @@ -269,13 +271,19 @@ type Group struct { metrics *Metrics - ruleGroupPostProcessFunc RuleGroupPostProcessFunc + // Rule group evaluation iteration function, + // defaults to DefaultEvalIterationFunc. + evalIterationFunc GroupEvalIterationFunc + alignEvaluationTimeOnInterval bool } -// This function will be used before each rule group evaluation if not nil. -// Use this function type if the rule group post processing is needed. -type RuleGroupPostProcessFunc func(g *Group, lastEvalTimestamp time.Time, log log.Logger) error +// GroupEvalIterationFunc is used to implement and extend rule group +// evaluation iteration logic. It is configured in Group.evalIterationFunc, +// and periodically invoked at each group evaluation interval to +// evaluate the rules in the group at that point in time. +// DefaultEvalIterationFunc is the default implementation. +type GroupEvalIterationFunc func(ctx context.Context, g *Group, evalTimestamp time.Time) type GroupOptions struct { Name, File string @@ -287,7 +295,7 @@ type GroupOptions struct { Opts *ManagerOptions EvaluationDelay *time.Duration done chan struct{} - RuleGroupPostProcessFunc RuleGroupPostProcessFunc + EvalIterationFunc GroupEvalIterationFunc AlignEvaluationTimeOnInterval bool } @@ -309,6 +317,11 @@ func NewGroup(o GroupOptions) *Group { metrics.GroupSamples.WithLabelValues(key) metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) + evalIterationFunc := o.EvalIterationFunc + if evalIterationFunc == nil { + evalIterationFunc = DefaultEvalIterationFunc + } + return &Group{ name: o.Name, file: o.File, @@ -325,7 +338,7 @@ func NewGroup(o GroupOptions) *Group { terminated: make(chan struct{}), logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), metrics: metrics, - ruleGroupPostProcessFunc: o.RuleGroupPostProcessFunc, + evalIterationFunc: evalIterationFunc, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } } @@ -355,6 +368,8 @@ func (g *Group) Limit() int { return g.limit } // If it's empty or nil, then the owning user/tenant is considered to be the source tenant. func (g *Group) SourceTenants() []string { return g.sourceTenants } +func (g *Group) Logger() log.Logger { return g.logger } + func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -373,18 +388,6 @@ func (g *Group) run(ctx context.Context) { }, }) - iter := func() { - g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() - - start := time.Now() - g.Eval(ctx, evalTimestamp) - timeSinceStart := time.Since(start) - - g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) - g.setEvaluationTime(timeSinceStart) - g.setLastEvaluation(start) - } - // The assumption here is that since the ticker was started after having // waited for `evalTimestamp` to pass, the ticks will trigger soon // after each `evalTimestamp + N * g.interval` occurrence. @@ -414,7 +417,7 @@ func (g *Group) run(ctx context.Context) { }(time.Now()) }() - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) if g.shouldRestore { // If we have to restore, we wait for another Eval to finish. // The reason behind this is, during first eval (or before it) @@ -430,7 +433,7 @@ func (g *Group) run(ctx context.Context) { g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) } g.RestoreForState(time.Now()) @@ -453,21 +456,29 @@ func (g *Group) run(ctx context.Context) { } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) - useRuleGroupPostProcessFunc(g, evalTimestamp.Add(-(missed+1)*g.interval)) - - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) } } } } -func useRuleGroupPostProcessFunc(g *Group, lastEvalTimestamp time.Time) { - if g.ruleGroupPostProcessFunc != nil { - err := g.ruleGroupPostProcessFunc(g, lastEvalTimestamp, g.logger) - if err != nil { - level.Warn(g.logger).Log("msg", "ruleGroupPostProcessFunc failed", "err", err) - } - } +// DefaultEvalIterationFunc is the default implementation of +// GroupEvalIterationFunc that is periodically invoked to evaluate the rules +// in a group at a given point in time and updates Group state and metrics +// accordingly. Custom GroupEvalIterationFunc implementations are recommended +// to invoke this function as well, to ensure correct Group state and metrics +// are maintained. +func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.Time) { + g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() + + start := time.Now() + g.Eval(ctx, evalTimestamp) + timeSinceStart := time.Since(start) + + g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) + g.setEvaluationTime(timeSinceStart) + g.setLastEvaluation(start) + g.setLastEvalTimestamp(evalTimestamp) } func (g *Group) stop() { @@ -547,6 +558,20 @@ func (g *Group) setLastEvaluation(ts time.Time) { g.lastEvaluation = ts } +// GetLastEvalTimestamp returns the timestamp of the last evaluation. +func (g *Group) GetLastEvalTimestamp() time.Time { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.lastEvalTimestamp +} + +// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation. +func (g *Group) setLastEvalTimestamp(ts time.Time) { + g.mtx.Lock() + defer g.mtx.Unlock() + g.lastEvalTimestamp = ts +} + // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { var offset int64 @@ -689,7 +714,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { - _, err = app.Append(0, s.Metric, s.T, s.V) + _, err = app.Append(0, s.Metric, s.T, s.F) } if err != nil { @@ -1060,11 +1085,11 @@ func (m *Manager) Stop() { // Update the rule manager's state as the config requires. If // loading the new rules failed the old rule set is restored. -func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc RuleGroupPostProcessFunc) error { +func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error { m.mtx.Lock() defer m.mtx.Unlock() - groups, errs := m.LoadGroups(interval, externalLabels, externalURL, ruleGroupPostProcessFunc, files...) + groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) if errs != nil { for _, e := range errs { @@ -1154,7 +1179,7 @@ func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.Parse // LoadGroups reads groups from a list of files. func (m *Manager) LoadGroups( - interval time.Duration, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc RuleGroupPostProcessFunc, filenames ...string, + interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string, ) (map[string]*Group, []error) { groups := make(map[string]*Group) @@ -1212,7 +1237,7 @@ func (m *Manager) LoadGroups( Opts: m.opts, EvaluationDelay: (*time.Duration)(rg.EvaluationDelay), done: m.done, - RuleGroupPostProcessFunc: ruleGroupPostProcessFunc, + EvalIterationFunc: groupEvalIterationFunc, AlignEvaluationTimeOnInterval: rg.AlignEvaluationTimeOnInterval, }) } diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index ca88beb1ceb..8abf8fa5f2f 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -93,7 +93,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, evalDelay time.Duration, ts lb.Set(l.Name, l.Value) }) - sample.Metric = lb.Labels(sample.Metric) + sample.Metric = lb.Labels() } // Check that the rule does not produce identical metrics after applying diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 01c66ca81dd..f38527ff306 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -692,7 +692,7 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re } } - res := lb.Labels(labels.EmptyLabels()) + res := lb.Labels() if len(rc) > 0 { res, _ = relabel.Process(res, rc...) @@ -726,7 +726,7 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels lb.Set(l.Name, l.Value) }) - return lb.Labels(labels.EmptyLabels()) + return lb.Labels() } // appender returns an appender for ingested samples from the target. diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 59f6e2873ea..f250910c10c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -380,7 +380,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } } - preRelabelLabels := lb.Labels(labels.EmptyLabels()) + preRelabelLabels := lb.Labels() keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. @@ -476,7 +476,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort lb.Set(model.InstanceLabel, addr) } - res = lb.Labels(labels.EmptyLabels()) + res = lb.Labels() err = res.Validate(func(l labels.Label) error { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 92767cdd781..27ac21661bc 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) // BufferedSeriesIterator wraps an iterator with a look-back buffer. @@ -43,7 +44,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator { func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { // TODO(codesome): based on encoding, allocate different buffer. bit := &BufferedSeriesIterator{ - buf: newSampleRing(delta, 16), + buf: newSampleRing(delta, 0, chunkenc.ValNone), delta: delta, } bit.Reset(it) @@ -68,11 +69,8 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. -func (b *BufferedSeriesIterator) PeekBack(n int) ( - t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool, -) { - s, ok := b.buf.nthLast(n) - return s.t, s.v, s.h, s.fh, ok +func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) { + return b.buf.nthLast(n) } // Buffer returns an iterator over the buffered data. Invalidates previously @@ -122,14 +120,14 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat: - t, v := b.it.At() - b.buf.add(sample{t: t, v: v}) + t, f := b.it.At() + b.buf.addF(fSample{t: t, f: f}) case chunkenc.ValHistogram: t, h := b.it.AtHistogram() - b.buf.add(sample{t: t, h: h}) + b.buf.addH(hSample{t: t, h: h}) case chunkenc.ValFloatHistogram: t, fh := b.it.AtFloatHistogram() - b.buf.add(sample{t: t, fh: fh}) + b.buf.addFH(fhSample{t: t, fh: fh}) default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } @@ -166,56 +164,122 @@ func (b *BufferedSeriesIterator) Err() error { return b.it.Err() } -// TODO(beorn7): Consider having different sample types for different value types. -type sample struct { +type fSample struct { + t int64 + f float64 +} + +func (s fSample) T() int64 { + return s.t +} + +func (s fSample) F() float64 { + return s.f +} + +func (s fSample) H() *histogram.Histogram { + panic("H() called for fSample") +} + +func (s fSample) FH() *histogram.FloatHistogram { + panic("FH() called for fSample") +} + +func (s fSample) Type() chunkenc.ValueType { + return chunkenc.ValFloat +} + +type hSample struct { + t int64 + h *histogram.Histogram +} + +func (s hSample) T() int64 { + return s.t +} + +func (s hSample) F() float64 { + panic("F() called for hSample") +} + +func (s hSample) H() *histogram.Histogram { + return s.h +} + +func (s hSample) FH() *histogram.FloatHistogram { + return s.h.ToFloat() +} + +func (s hSample) Type() chunkenc.ValueType { + return chunkenc.ValHistogram +} + +type fhSample struct { t int64 - v float64 - h *histogram.Histogram fh *histogram.FloatHistogram } -func (s sample) T() int64 { +func (s fhSample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s fhSample) F() float64 { + panic("F() called for fhSample") } -func (s sample) H() *histogram.Histogram { - return s.h +func (s fhSample) H() *histogram.Histogram { + panic("H() called for fhSample") } -func (s sample) FH() *histogram.FloatHistogram { +func (s fhSample) FH() *histogram.FloatHistogram { return s.fh } -func (s sample) Type() chunkenc.ValueType { - switch { - case s.h != nil: - return chunkenc.ValHistogram - case s.fh != nil: - return chunkenc.ValFloatHistogram - default: - return chunkenc.ValFloat - } +func (s fhSample) Type() chunkenc.ValueType { + return chunkenc.ValFloatHistogram } type sampleRing struct { delta int64 - buf []sample // lookback buffer - i int // position of most recent element in ring buffer - f int // position of first element in ring buffer - l int // number of elements in buffer + // Lookback buffers. We use buf for mixed samples, but one of the three + // concrete ones for homogenous samples. (Only one of the four bufs is + // allowed to be populated!) This avoids the overhead of the interface + // wrapper for the happy (and by far most common) case of homogenous + // samples. + buf []tsdbutil.Sample + fBuf []fSample + hBuf []hSample + fhBuf []fhSample + + i int // Position of most recent element in ring buffer. + f int // Position of first element in ring buffer. + l int // Number of elements in buffer. it sampleRingIterator } -func newSampleRing(delta int64, sz int) *sampleRing { - r := &sampleRing{delta: delta, buf: make([]sample, sz)} +// newSampleRing creates a new sampleRing. If you do not know the prefereed +// value type yet, use a size of 0 (in which case the provided typ doesn't +// matter). On the first add, a buffer of size 16 will be allocated with the +// preferred type being the type of the first added sample. +func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { + r := &sampleRing{delta: delta} r.reset() - + if size <= 0 { + // Will initialize on first add. + return r + } + switch typ { + case chunkenc.ValFloat: + r.fBuf = make([]fSample, size) + case chunkenc.ValHistogram: + r.hBuf = make([]hSample, size) + case chunkenc.ValFloatHistogram: + r.fhBuf = make([]fhSample, size) + default: + r.buf = make([]tsdbutil.Sample, size) + } return r } @@ -236,7 +300,7 @@ type sampleRingIterator struct { r *sampleRing i int t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -246,17 +310,34 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { if it.i >= it.r.l { return chunkenc.ValNone } - s := it.r.at(it.i) - it.t = s.t switch { - case s.h != nil: + case len(it.r.fBuf) > 0: + s := it.r.atF(it.i) + it.t = s.t + it.f = s.f + return chunkenc.ValFloat + case len(it.r.hBuf) > 0: + s := it.r.atH(it.i) + it.t = s.t it.h = s.h return chunkenc.ValHistogram - case s.fh != nil: + case len(it.r.fhBuf) > 0: + s := it.r.atFH(it.i) + it.t = s.t it.fh = s.fh return chunkenc.ValFloatHistogram + } + s := it.r.at(it.i) + it.t = s.T() + switch s.Type() { + case chunkenc.ValHistogram: + it.h = s.H() + return chunkenc.ValHistogram + case chunkenc.ValFloatHistogram: + it.fh = s.FH() + return chunkenc.ValFloatHistogram default: - it.v = s.v + it.f = s.F() return chunkenc.ValFloat } } @@ -270,7 +351,7 @@ func (it *sampleRingIterator) Err() error { } func (it *sampleRingIterator) At() (int64, float64) { - return it.t, it.v + return it.t, it.f } func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -288,22 +369,182 @@ func (it *sampleRingIterator) AtT() int64 { return it.t } -func (r *sampleRing) at(i int) sample { +func (r *sampleRing) at(i int) tsdbutil.Sample { j := (r.f + i) % len(r.buf) return r.buf[j] } -// add adds a sample to the ring buffer and frees all samples that fall -// out of the delta range. -func (r *sampleRing) add(s sample) { - l := len(r.buf) +func (r *sampleRing) atF(i int) fSample { + j := (r.f + i) % len(r.fBuf) + return r.fBuf[j] +} + +func (r *sampleRing) atH(i int) hSample { + j := (r.f + i) % len(r.hBuf) + return r.hBuf[j] +} + +func (r *sampleRing) atFH(i int) fhSample { + j := (r.f + i) % len(r.fhBuf) + return r.fhBuf[j] +} + +// add adds a sample to the ring buffer and frees all samples that fall out of +// the delta range. Note that this method works for any sample +// implementation. If you know you are dealing with one of the implementations +// from this package (fSample, hSample, fhSample), call one of the specialized +// methods addF, addH, or addFH for better performance. +func (r *sampleRing) add(s tsdbutil.Sample) { + if len(r.buf) == 0 { + // Nothing added to the interface buf yet. Let's check if we can + // stay specialized. + switch s := s.(type) { + case fSample: + if len(r.hBuf)+len(r.fhBuf) == 0 { + r.fBuf = addF(s, r.fBuf, r) + return + } + case hSample: + if len(r.fBuf)+len(r.fhBuf) == 0 { + r.hBuf = addH(s, r.hBuf, r) + return + } + case fhSample: + if len(r.fBuf)+len(r.hBuf) == 0 { + r.fhBuf = addFH(s, r.fhBuf, r) + return + } + } + // The new sample isn't a fit for the already existing + // ones. Copy the latter into the interface buffer where needed. + switch { + case len(r.fBuf) > 0: + for _, s := range r.fBuf { + r.buf = append(r.buf, s) + } + r.fBuf = nil + case len(r.hBuf) > 0: + for _, s := range r.hBuf { + r.buf = append(r.buf, s) + } + r.hBuf = nil + case len(r.fhBuf) > 0: + for _, s := range r.fhBuf { + r.buf = append(r.buf, s) + } + r.fhBuf = nil + } + } + r.buf = addSample(s, r.buf, r) +} + +// addF is a version of the add method specialized for fSample. +func (r *sampleRing) addF(s fSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = addSample(s, r.buf, r) + case len(r.hBuf)+len(r.fhBuf) > 0: + // Already have specialized samples that are not fSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.fBuf = addF(s, r.fBuf, r) + } +} + +// addH is a version of the add method specialized for hSample. +func (r *sampleRing) addH(s hSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = addSample(s, r.buf, r) + case len(r.fBuf)+len(r.fhBuf) > 0: + // Already have samples that are not hSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.hBuf = addH(s, r.hBuf, r) + } +} + +// addFH is a version of the add method specialized for fhSample. +func (r *sampleRing) addFH(s fhSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = addSample(s, r.buf, r) + case len(r.fBuf)+len(r.hBuf) > 0: + // Already have samples that are not fhSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.fhBuf = addFH(s, r.fhBuf, r) + } +} + +// genericAdd is a generic implementation of adding a tsdbutil.Sample +// implementation to a buffer of a sample ring. However, the Go compiler +// currently (go1.20) decides to not expand the code during compile time, but +// creates dynamic code to handle the different types. That has a significant +// overhead during runtime, noticeable in PromQL benchmarks. For example, the +// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% +// longer runtime, 9% higher allocation size, and 10% more allocations. +// Therefore, genericAdd has been manually implemented for all the types +// (addSample, addF, addH, addFH) below. +// +// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { +// l := len(buf) +// // Grow the ring buffer if it fits no more elements. +// if l == 0 { +// buf = make([]T, 16) +// l = 16 +// } +// if l == r.l { +// newBuf := make([]T, 2*l) +// copy(newBuf[l+r.f:], buf[r.f:]) +// copy(newBuf, buf[:r.f]) +// +// buf = newBuf +// r.i = r.f +// r.f += l +// l = 2 * l +// } else { +// r.i++ +// if r.i >= l { +// r.i -= l +// } +// } +// +// buf[r.i] = s +// r.l++ +// +// // Free head of the buffer of samples that just fell out of the range. +// tmin := s.T() - r.delta +// for buf[r.f].T() < tmin { +// r.f++ +// if r.f >= l { +// r.f -= l +// } +// r.l-- +// } +// return buf +// } + +// addSample is a handcoded specialization of genericAdd (see above). +func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample { + l := len(buf) // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]tsdbutil.Sample, 16) + l = 16 + } if l == r.l { - buf := make([]sample, 2*l) - copy(buf[l+r.f:], r.buf[r.f:]) - copy(buf, r.buf[:r.f]) + newBuf := make([]tsdbutil.Sample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) - r.buf = buf + buf = newBuf r.i = r.f r.f += l l = 2 * l @@ -314,18 +555,136 @@ func (r *sampleRing) add(s sample) { } } - r.buf[r.i] = s + buf[r.i] = s r.l++ // Free head of the buffer of samples that just fell out of the range. - tmin := s.t - r.delta - for r.buf[r.f].t < tmin { + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } + return buf +} + +// addF is a handcoded specialization of genericAdd (see above). +func addF(s fSample, buf []fSample, r *sampleRing) []fSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]fSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addH is a handcoded specialization of genericAdd (see above). +func addH(s hSample, buf []hSample, r *sampleRing) []hSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]hSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]hSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addFH is a handcoded specialization of genericAdd (see above). +func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fhSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]fhSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf } // reduceDelta lowers the buffered time delta, dropping any samples that are @@ -340,39 +699,98 @@ func (r *sampleRing) reduceDelta(delta int64) bool { return true } + switch { + case len(r.fBuf) > 0: + genericReduceDelta(r.fBuf, r) + case len(r.hBuf) > 0: + genericReduceDelta(r.hBuf, r) + case len(r.fhBuf) > 0: + genericReduceDelta(r.fhBuf, r) + default: + genericReduceDelta(r.buf, r) + } + return true +} + +func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) { // Free head of the buffer of samples that just fell out of the range. - l := len(r.buf) - tmin := r.buf[r.i].t - delta - for r.buf[r.f].t < tmin { + l := len(buf) + tmin := buf[r.i].T() - r.delta + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } - return true } // nthLast returns the nth most recent element added to the ring. -func (r *sampleRing) nthLast(n int) (sample, bool) { +func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { if n > r.l { - return sample{}, false + return fSample{}, false + } + i := r.l - n + switch { + case len(r.fBuf) > 0: + return r.atF(i), true + case len(r.hBuf) > 0: + return r.atH(i), true + case len(r.fhBuf) > 0: + return r.atFH(i), true + default: + return r.at(i), true } - return r.at(r.l - n), true } -func (r *sampleRing) samples() []sample { - res := make([]sample, r.l) +func (r *sampleRing) samples() []tsdbutil.Sample { + res := make([]tsdbutil.Sample, r.l) k := r.f + r.l var j int - if k > len(r.buf) { - k = len(r.buf) - j = r.l - k + r.f - } - n := copy(res, r.buf[r.f:k]) - copy(res[n:], r.buf[:j]) + switch { + case len(r.buf) > 0: + if k > len(r.buf) { + k = len(r.buf) + j = r.l - k + r.f + } + n := copy(res, r.buf[r.f:k]) + copy(res[n:], r.buf[:j]) + case len(r.fBuf) > 0: + if k > len(r.fBuf) { + k = len(r.fBuf) + j = r.l - k + r.f + } + resF := make([]fSample, r.l) + n := copy(resF, r.fBuf[r.f:k]) + copy(resF[n:], r.fBuf[:j]) + for i, s := range resF { + res[i] = s + } + case len(r.hBuf) > 0: + if k > len(r.hBuf) { + k = len(r.hBuf) + j = r.l - k + r.f + } + resH := make([]hSample, r.l) + n := copy(resH, r.hBuf[r.f:k]) + copy(resH[n:], r.hBuf[:j]) + for i, s := range resH { + res[i] = s + } + case len(r.fhBuf) > 0: + if k > len(r.fhBuf) { + k = len(r.fhBuf) + j = r.l - k + r.f + } + resFH := make([]fhSample, r.l) + n := copy(resFH, r.fhBuf[r.f:k]) + copy(resFH[n:], r.fhBuf[:j]) + for i, s := range resFH { + res[i] = s + } + } return res } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index ba802bccb3f..bfbd08d24ba 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "math" "net/http" "sort" "strings" @@ -173,7 +174,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet return errSeriesSet{err: err} } lbls := labelProtosToLabels(ts.Labels) - series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples}) + series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) } if sortSeries { @@ -359,8 +360,9 @@ func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { - labels labels.Labels - samples []prompb.Sample + labels labels.Labels + floats []prompb.Sample + histograms []prompb.Histogram } func (c *concreteSeries) Labels() labels.Labels { @@ -372,84 +374,167 @@ func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { csi.reset(c) return csi } - return newConcreteSeriersIterator(c) + return newConcreteSeriesIterator(c) } // concreteSeriesIterator implements storage.SeriesIterator. type concreteSeriesIterator struct { - cur int - series *concreteSeries + floatsCur int + histogramsCur int + curValType chunkenc.ValueType + series *concreteSeries } -func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { +func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator { return &concreteSeriesIterator{ - cur: -1, - series: series, + floatsCur: -1, + histogramsCur: -1, + curValType: chunkenc.ValNone, + series: series, } } func (c *concreteSeriesIterator) reset(series *concreteSeries) { - c.cur = -1 + c.floatsCur = -1 + c.histogramsCur = -1 + c.curValType = chunkenc.ValNone c.series = series } // Seek implements storage.SeriesIterator. func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { - if c.cur == -1 { - c.cur = 0 + if c.floatsCur == -1 { + c.floatsCur = 0 + } + if c.histogramsCur == -1 { + c.histogramsCur = 0 } - if c.cur >= len(c.series.samples) { + if c.floatsCur >= len(c.series.floats) && c.histogramsCur >= len(c.series.histograms) { return chunkenc.ValNone } + // No-op check. - if s := c.series.samples[c.cur]; s.Timestamp >= t { - return chunkenc.ValFloat + if (c.curValType == chunkenc.ValFloat && c.series.floats[c.floatsCur].Timestamp >= t) || + ((c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram) && c.series.histograms[c.histogramsCur].Timestamp >= t) { + return c.curValType } - // Do binary search between current position and end. - c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool { - return c.series.samples[n+c.cur].Timestamp >= t + + c.curValType = chunkenc.ValNone + + // Binary search between current position and end for both float and histograms samples. + c.floatsCur += sort.Search(len(c.series.floats)-c.floatsCur, func(n int) bool { + return c.series.floats[n+c.floatsCur].Timestamp >= t + }) + c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool { + return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) - if c.cur < len(c.series.samples) { - return chunkenc.ValFloat + + if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + // If float samples and histogram samples have overlapping timestamps prefer the float samples. + if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { + c.curValType = chunkenc.ValFloat + } else { + c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) + } + // When the timestamps do not overlap the cursor for the non-selected sample type has advanced too + // far; we decrement it back down here. + if c.series.floats[c.floatsCur].Timestamp != c.series.histograms[c.histogramsCur].Timestamp { + if c.curValType == chunkenc.ValFloat { + c.histogramsCur-- + } else { + c.floatsCur-- + } + } + } else if c.floatsCur < len(c.series.floats) { + c.curValType = chunkenc.ValFloat + } else if c.histogramsCur < len(c.series.histograms) { + c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } - return chunkenc.ValNone - // TODO(beorn7): Add histogram support. + + return c.curValType +} + +func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { + if _, isInt := h.GetCount().(*prompb.Histogram_CountInt); isInt { + return chunkenc.ValHistogram + } + return chunkenc.ValFloatHistogram } // At implements chunkenc.Iterator. func (c *concreteSeriesIterator) At() (t int64, v float64) { - s := c.series.samples[c.cur] + if c.curValType != chunkenc.ValFloat { + panic("iterator is not on a float sample") + } + s := c.series.floats[c.floatsCur] return s.Timestamp, s.Value } -// AtHistogram always returns (0, nil) because there is no support for histogram -// values yet. -// TODO(beorn7): Fix that for histogram support in remote storage. +// AtHistogram implements chunkenc.Iterator func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - return 0, nil + if c.curValType != chunkenc.ValHistogram { + panic("iterator is not on an integer histogram sample") + } + h := c.series.histograms[c.histogramsCur] + return h.Timestamp, HistogramProtoToHistogram(h) } -// AtFloatHistogram always returns (0, nil) because there is no support for histogram -// values yet. -// TODO(beorn7): Fix that for histogram support in remote storage. +// AtFloatHistogram implements chunkenc.Iterator func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - return 0, nil + switch c.curValType { + case chunkenc.ValHistogram: + fh := c.series.histograms[c.histogramsCur] + return fh.Timestamp, HistogramProtoToFloatHistogram(fh) + case chunkenc.ValFloatHistogram: + fh := c.series.histograms[c.histogramsCur] + return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh) + default: + panic("iterator is not on a histogram sample") + } } // AtT implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtT() int64 { - s := c.series.samples[c.cur] - return s.Timestamp + if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram { + return c.series.histograms[c.histogramsCur].Timestamp + } + return c.series.floats[c.floatsCur].Timestamp } +const noTS = int64(math.MaxInt64) + // Next implements chunkenc.Iterator. func (c *concreteSeriesIterator) Next() chunkenc.ValueType { - c.cur++ - if c.cur < len(c.series.samples) { - return chunkenc.ValFloat - } - return chunkenc.ValNone - // TODO(beorn7): Add histogram support. + peekFloatTS := noTS + if c.floatsCur+1 < len(c.series.floats) { + peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp + } + peekHistTS := noTS + if c.histogramsCur+1 < len(c.series.histograms) { + peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp + } + c.curValType = chunkenc.ValNone + + if peekFloatTS < peekHistTS { + c.floatsCur++ + c.curValType = chunkenc.ValFloat + } else if peekHistTS < peekFloatTS { + c.histogramsCur++ + c.curValType = chunkenc.ValHistogram + } else if peekFloatTS == noTS && peekHistTS == noTS { + // This only happens when the iterator is exhausted; we set the cursors off the end to prevent + // Seek() from returning anything afterwards. + c.floatsCur = len(c.series.floats) + c.histogramsCur = len(c.series.histograms) + } else { + // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms + // anyway otherwise the histogram sample will get selected on the next call to Next(). + c.floatsCur++ + c.histogramsCur++ + c.curValType = chunkenc.ValFloat + } + + return c.curValType } // Err implements chunkenc.Iterator. @@ -557,10 +642,10 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } } -// HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the +// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the // provided proto message to a Float Histogram. The caller has to make sure that -// the proto message represents an float histogram and not a integer histogram. -func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { +// the proto message represents a float histogram and not an integer histogram. +func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -575,6 +660,24 @@ func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogr } } +// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message +// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a +// float histogram. +func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: float64(hp.GetZeroCountInt()), + Count: float64(hp.GetCountInt()), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()), + } +} + func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) for i := 0; i < len(s); i++ { @@ -584,6 +687,16 @@ func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { return spans } +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram { return prompb.Histogram{ Count: &prompb.Histogram_CountInt{CountInt: h.Count}, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index 21524d70dbd..af61334f480 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -278,5 +278,5 @@ func (sf seriesFilter) Labels() labels.Labels { b := labels.NewBuilder(sf.Series.Labels()) // todo: check if this is too inefficient. b.Del(sf.toFilter...) - return b.Labels(labels.EmptyLabels()) + return b.Labels() } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 45304c43c48..5aa11308887 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -126,7 +126,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, hp := range ts.Histograms { if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. - fhs := HistogramProtoToFloatHistogram(hp) + fhs := FloatHistogramProtoToFloatHistogram(hp) _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) } else { hs := HistogramProtoToHistogram(hp) diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go index dcb6dd82e6c..f609df3f021 100644 --- a/vendor/github.com/prometheus/prometheus/storage/series.go +++ b/vendor/github.com/prometheus/prometheus/storage/series.go @@ -109,7 +109,7 @@ func (it *listSeriesIterator) Reset(samples Samples) { func (it *listSeriesIterator) At() (int64, float64) { s := it.samples.Get(it.idx) - return s.T(), s.V() + return s.T(), s.F() } func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -376,10 +376,17 @@ func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. -func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { +func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { if newSampleFn == nil { - newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { - return sample{t, v, h, fh} + newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + switch { + case h != nil: + return hSample{t, h} + case fh != nil: + return fhSample{t, fh} + default: + return fSample{t, f} + } } } @@ -389,12 +396,12 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, case chunkenc.ValNone: return result, iter.Err() case chunkenc.ValFloat: - t, v := iter.At() + t, f := iter.At() // NaNs can't be compared normally, so substitute for another value. - if math.IsNaN(v) { - v = -42 + if math.IsNaN(f) { + f = -42 } - result = append(result, newSampleFn(t, v, nil, nil)) + result = append(result, newSampleFn(t, f, nil, nil)) case chunkenc.ValHistogram: t, h := iter.AtHistogram() result = append(result, newSampleFn(t, 0, h, nil)) diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index d5f80006fc2..d61a880a22d 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -93,7 +93,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer result := make(queryResult, len(vector)) for n, v := range vector { s := sample{ - Value: v.V, + Value: v.F, Labels: v.Metric.Map(), } result[n] = &s diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index c550cbc78ef..1ebef3eb106 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -96,7 +96,7 @@ type Iterator interface { // timestamp equal or greater than t. If the current sample found by a // previous `Next` or `Seek` operation already has this property, Seek // has no effect. If a sample has been found, Seek returns the type of - // its value. Otherwise, it returns ValNone, after with the iterator is + // its value. Otherwise, it returns ValNone, after which the iterator is // exhausted. Seek(t int64) ValueType // At returns the current timestamp/value pair if the value is a float. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 7c721de3ee7..216036cfca9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -83,7 +83,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { - metrics *compactorMetrics + metrics *CompactorMetrics logger log.Logger ranges []int64 chunkPool chunkenc.Pool @@ -95,47 +95,47 @@ type LeveledCompactor struct { concurrencyOpts LeveledCompactorConcurrencyOptions } -type compactorMetrics struct { - ran prometheus.Counter - populatingBlocks prometheus.Gauge - overlappingBlocks prometheus.Counter - duration prometheus.Histogram - chunkSize prometheus.Histogram - chunkSamples prometheus.Histogram - chunkRange prometheus.Histogram +type CompactorMetrics struct { + Ran prometheus.Counter + PopulatingBlocks prometheus.Gauge + OverlappingBlocks prometheus.Counter + Duration prometheus.Histogram + ChunkSize prometheus.Histogram + ChunkSamples prometheus.Histogram + ChunkRange prometheus.Histogram } -func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { - m := &compactorMetrics{} +func newCompactorMetrics(r prometheus.Registerer) *CompactorMetrics { + m := &CompactorMetrics{} - m.ran = prometheus.NewCounter(prometheus.CounterOpts{ + m.Ran = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_compactions_total", Help: "Total number of compactions that were executed for the partition.", }) - m.populatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{ + m.PopulatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_compaction_populating_block", Help: "Set to 1 when a block is currently being written to the disk.", }) - m.overlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ + m.OverlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_vertical_compactions_total", Help: "Total number of compactions done on overlapping blocks.", }) - m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.Duration = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_duration_seconds", Help: "Duration of compaction runs", Buckets: prometheus.ExponentialBuckets(1, 2, 14), }) - m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_size_bytes", Help: "Final size of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(32, 1.5, 12), }) - m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_samples", Help: "Final number of samples on their first compaction", Buckets: prometheus.ExponentialBuckets(4, 1.5, 12), }) - m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_range_seconds", Help: "Final time range of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(100, 4, 10), @@ -143,13 +143,13 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { if r != nil { r.MustRegister( - m.ran, - m.populatingBlocks, - m.overlappingBlocks, - m.duration, - m.chunkRange, - m.chunkSamples, - m.chunkSize, + m.Ran, + m.PopulatingBlocks, + m.OverlappingBlocks, + m.Duration, + m.ChunkRange, + m.ChunkSamples, + m.ChunkSize, ) } return m @@ -428,13 +428,13 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // and returns slice of block IDs. Position of returned block ID in the result slice corresponds to the shard index. // If given output block has no series, corresponding block ID will be zero ULID value. func (c *LeveledCompactor) CompactWithSplitting(dest string, dirs []string, open []*Block, shardCount uint64) (result []ulid.ULID, _ error) { - return c.compact(dest, dirs, open, shardCount) + return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}, shardCount) } // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { - ulids, err := c.compact(dest, dirs, open, 1) + ulids, err := c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}, 1) if err != nil { return ulid.ULID{}, err } @@ -453,7 +453,7 @@ type shardedBlock struct { indexw IndexWriter } -func (c *LeveledCompactor) compact(dest string, dirs []string, open []*Block, shardCount uint64) (_ []ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator, shardCount uint64) (_ []ulid.ULID, err error) { if shardCount == 0 { shardCount = 1 } @@ -487,7 +487,7 @@ func (c *LeveledCompactor) compact(dest string, dirs []string, open []*Block, sh outBlocks[ix] = shardedBlock{meta: CompactBlockMetas(ulid.MustNew(outBlocksTime, rand.Reader), metas...)} } - err = c.write(dest, outBlocks, blocks...) + err = c.write(dest, outBlocks, blockPopulator, blocks...) if err == nil { ulids := make([]ulid.ULID, len(outBlocks)) allOutputBlocksAreEmpty := true @@ -619,7 +619,7 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s } // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. - err := c.write(dest, outBlocks[ix], oooHead.CloneForTimeRange(mint, maxt-1)) + err := c.write(dest, outBlocks[ix], DefaultBlockPopulator{}, oooHead.CloneForTimeRange(mint, maxt-1)) if err != nil { // We need to delete all blocks in case there was an error. for _, obs := range outBlocks { @@ -691,7 +691,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } } - err := c.write(dest, []shardedBlock{{meta: meta}}, b) + err := c.write(dest, []shardedBlock{{meta: meta}}, DefaultBlockPopulator{}, b) if err != nil { return uid, err } @@ -736,7 +736,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates new output blocks that are the union of the provided blocks into dir. -func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blocks ...BlockReader) (err error) { +func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPopulator BlockPopulator, blocks ...BlockReader) (err error) { var closers []io.Closer defer func(t time.Time) { @@ -760,8 +760,8 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blocks . } } } - c.metrics.ran.Inc() - c.metrics.duration.Observe(time.Since(t).Seconds()) + c.metrics.Ran.Inc() + c.metrics.Duration.Observe(time.Since(t).Seconds()) }(time.Now()) for ix := range outBlocks { @@ -794,9 +794,9 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blocks . if outBlocks[ix].meta.Compaction.Level == 1 { chunkw = &instrumentedChunkWriter{ ChunkWriter: chunkw, - size: c.metrics.chunkSize, - samples: c.metrics.chunkSamples, - trange: c.metrics.chunkRange, + size: c.metrics.ChunkSize, + samples: c.metrics.ChunkSamples, + trange: c.metrics.ChunkRange, } } @@ -814,7 +814,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blocks . } // We use MinTime and MaxTime from first output block, because ALL output blocks have the same min/max times set. - if err := c.populateBlock(blocks, outBlocks[0].meta.MinTime, outBlocks[0].meta.MaxTime, outBlocks); err != nil { + if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, c.concurrencyOpts, blocks, outBlocks[0].meta.MinTime, outBlocks[0].meta.MaxTime, outBlocks); err != nil { return errors.Wrap(err, "populate block") } @@ -938,12 +938,18 @@ func timeFromMillis(ms int64) time.Time { return time.Unix(0, ms*int64(time.Millisecond)) } -// populateBlock fills the index and chunk writers of output blocks with new data gathered as the union -// of the provided blocks. +type BlockPopulator interface { + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock) error +} + +type DefaultBlockPopulator struct{} + +// PopulateBlock fills the index and chunk writers with new data gathered as the union +// of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. // If there is more than 1 output block, each output block will only contain series that hash into its shard // (based on total number of output blocks). -func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block(s) from no readers") } @@ -961,23 +967,23 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, errs.Add(errors.Wrap(cerr, "close")) } err = errs.Err() - c.metrics.populatingBlocks.Set(0) + metrics.PopulatingBlocks.Set(0) }() - c.metrics.populatingBlocks.Set(1) + metrics.PopulatingBlocks.Set(1) globalMaxt := blocks[0].Meta().MaxTime for i, b := range blocks { select { - case <-c.ctx.Done(): - return c.ctx.Err() + case <-ctx.Done(): + return ctx.Err() default: } if !overlapping { if i > 0 && b.Meta().MinTime < globalMaxt { - c.metrics.overlappingBlocks.Inc() + metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(c.logger).Log("msg", "Found overlapping blocks during compaction") + level.Info(logger).Log("msg", "Found overlapping blocks during compaction") } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime @@ -1009,7 +1015,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, } all = indexr.SortedPostings(all) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false)) + sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false)) if len(outBlocks) > 1 { // To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy. @@ -1021,7 +1027,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, } all = indexr.SortedPostings(all) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - symbolsSets = append(symbolsSets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false)) + symbolsSets = append(symbolsSets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false)) } else { syms := indexr.Symbols() if i == 0 { @@ -1042,17 +1048,17 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, return errors.Wrap(symbols.Err(), "next symbol") } } else { - if err := c.populateSymbols(symbolsSets, outBlocks); err != nil { + if err := populateSymbols(ctx, mergeFunc, concurrencyOpts, symbolsSets, outBlocks); err != nil { return err } } // Semaphore for number of blocks that can be closed at once. - sema := semaphore.NewWeighted(int64(c.concurrencyOpts.MaxClosingBlocks)) + sema := semaphore.NewWeighted(int64(concurrencyOpts.MaxClosingBlocks)) blockWriters := make([]*asyncBlockWriter, len(outBlocks)) for ix := range outBlocks { - blockWriters[ix] = newAsyncBlockWriter(c.chunkPool, outBlocks[ix].chunkw, outBlocks[ix].indexw, sema) + blockWriters[ix] = newAsyncBlockWriter(chunkPool, outBlocks[ix].chunkw, outBlocks[ix].indexw, sema) } defer func() { // Stop all async writers. @@ -1072,14 +1078,14 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, if len(sets) > 1 { // Merge series using specified chunk series merger. // The default one is the compacting series merger. - set = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc) + set = storage.NewMergeChunkSeriesSet(sets, mergeFunc) } // Iterate over all sorted chunk series. for set.Next() { select { - case <-c.ctx.Done(): - return c.ctx.Err() + case <-ctx.Done(): + return ctx.Err() default: } s := set.At() @@ -1101,7 +1107,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64, continue } - debugOutOfOrderChunks(chks, c.logger) + debugOutOfOrderChunks(chks, logger) obIx := uint64(0) if len(outBlocks) > 1 { @@ -1139,12 +1145,12 @@ const inMemorySymbolsLimit = 1_000_000 // populateSymbols writes symbols to output blocks. We need to iterate through all series to find // which series belongs to what block. We collect symbols per sharded block, and then add sorted symbols to // block's index. -func (c *LeveledCompactor) populateSymbols(sets []storage.ChunkSeriesSet, outBlocks []shardedBlock) error { +func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, sets []storage.ChunkSeriesSet, outBlocks []shardedBlock) error { if len(outBlocks) == 0 { return errors.New("no output block") } - flushers := newSymbolFlushers(c.concurrencyOpts.SymbolsFlushersCount) + flushers := newSymbolFlushers(concurrencyOpts.SymbolsFlushersCount) defer flushers.close() // Make sure to stop flushers before exiting to avoid leaking goroutines. batchers := make([]*symbolsBatcher, len(outBlocks)) @@ -1161,11 +1167,11 @@ func (c *LeveledCompactor) populateSymbols(sets []storage.ChunkSeriesSet, outBlo seriesSet := sets[0] if len(sets) > 1 { - seriesSet = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc) + seriesSet = storage.NewMergeChunkSeriesSet(sets, mergeFunc) } for seriesSet.Next() { - if err := c.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return err } @@ -1203,7 +1209,7 @@ func (c *LeveledCompactor) populateSymbols(sets []storage.ChunkSeriesSet, outBlo } for ix := range outBlocks { - if err := c.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 74c3a0ddffe..30b85cc8b78 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -601,7 +601,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second func (h *Head) Init(minValidTime int64) error { h.minValidTime.Store(minValidTime) defer func() { - h.postings.EnsureOrder() + h.postings.EnsureOrder(h.opts.WALReplayConcurrency) }() defer h.gc() // After loading the wal remove the obsolete data from the head. defer func() { @@ -618,6 +618,7 @@ func (h *Head) Init(minValidTime int64) error { snapIdx, snapOffset := -1, 0 refSeries := make(map[chunks.HeadSeriesRef]*memSeries) + snapshotLoaded := false if h.opts.EnableMemorySnapshotOnShutdown { level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer @@ -647,6 +648,7 @@ func (h *Head) Init(minValidTime int64) error { var err error snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() if err == nil { + snapshotLoaded = true level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) } if err != nil { @@ -664,26 +666,36 @@ func (h *Head) Init(minValidTime int64) error { } mmapChunkReplayStart := time.Now() - mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries) - if err != nil { - // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) - if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { - h.metrics.mmapChunkCorruptionTotal.Inc() - } + var ( + mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk + oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk + lastMmapRef chunks.ChunkDiskMapperRef + err error + ) + if snapshotLoaded || h.wal != nil { + // If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded + // anyway. So we only load m-map chunks when it won't be discarded. + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) + if err != nil { + // TODO(codesome): clear out all m-map chunks here for refSeries. + level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { + h.metrics.mmapChunkCorruptionTotal.Inc() + } - // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data. - snapIdx, snapOffset = -1, 0 + // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data. + snapIdx, snapOffset = -1, 0 - // If this fails, data will be recovered from WAL. - // Hence we wont lose any data (given WAL is not corrupt). - mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) - if err != nil { - return err + // If this fails, data will be recovered from WAL. + // Hence we wont lose any data (given WAL is not corrupt). + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) + if err != nil { + return err + } } + level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String()) } - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String()) if h.wal == nil { level.Info(h.logger).Log("msg", "WAL not found") return nil @@ -851,6 +863,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) numSamples: numSamples, }) + h.updateMinOOOMaxOOOTime(mint, maxt) return nil } @@ -1878,7 +1891,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu type sample struct { t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -1888,7 +1901,7 @@ func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHi } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.f } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 8076942e35e..fbe2e884fda 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -437,7 +437,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi return false, headMaxt - t, storage.ErrOutOfOrderSample } -// appendableHistogram checks whether the given sample is valid for appending to the series. +// appendableHistogram checks whether the given histogram is valid for appending to the series. func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { c := s.head() if c == nil { @@ -459,7 +459,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { return nil } -// appendableFloatHistogram checks whether the given sample is valid for appending to the series. +// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series. func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error { c := s.head() if c == nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 1006aefa7e9..507aacd73df 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -224,7 +224,10 @@ func (p *MemPostings) All() Postings { // EnsureOrder ensures that all postings lists are sorted. After it returns all further // calls to add and addFor will insert new IDs in a sorted manner. -func (p *MemPostings) EnsureOrder() { +// Parameter numberOfConcurrentProcesses is used to specify the maximal number of +// CPU cores used for this operation. If it is <= 0, GOMAXPROCS is used. +// GOMAXPROCS was the default before introducing this parameter. +func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { p.mtx.Lock() defer p.mtx.Unlock() @@ -232,13 +235,16 @@ func (p *MemPostings) EnsureOrder() { return } - n := runtime.GOMAXPROCS(0) + concurrency := numberOfConcurrentProcesses + if concurrency <= 0 { + concurrency = runtime.GOMAXPROCS(0) + } workc := make(chan *[][]storage.SeriesRef) var wg sync.WaitGroup - wg.Add(n) + wg.Add(concurrency) - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { go func() { for job := range workc { for _, l := range *job { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go index 63d0b37127c..45827889e69 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -78,7 +78,7 @@ func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { return nil, err } for _, s := range o.samples { - app.Append(s.t, s.v) + app.Append(s.t, s.f) } return x, nil } @@ -96,7 +96,7 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, if s.t > maxt { break } - app.Append(s.t, s.v) + app.Append(s.t, s.f) } return x, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index aed4952ac0e..a8fcd22ec94 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -171,7 +171,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if sortSeries { p = q.index.SortedPostings(p) } - return newBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } // PostingsForMatchers assembles a single postings iterator against the index reader @@ -387,7 +387,7 @@ func (s *seriesData) Labels() labels.Labels { return s.labels } // blockBaseSeriesSet allows to iterate over all series in the single block. // Iterated series are trimmed with given min and max time as well as tombstones. -// See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. +// See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating. type blockBaseSeriesSet struct { blockID ulid.ULID p index.Postings @@ -873,7 +873,7 @@ type blockChunkSeriesSet struct { blockBaseSeriesSet } -func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { +func NewBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { return &blockChunkSeriesSet{ blockBaseSeriesSet{ blockID: id, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go index f9981ffe16d..02a7dd61982 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go @@ -28,7 +28,7 @@ type Samples interface { type Sample interface { T() int64 - V() float64 + F() float64 H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType @@ -69,7 +69,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { for i := 0; i < s.Len(); i++ { switch sampleType { case chunkenc.ValFloat: - ca.Append(s.Get(i).T(), s.Get(i).V()) + ca.Append(s.Get(i).T(), s.Get(i).F()) case chunkenc.ValHistogram: ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) case chunkenc.ValFloatHistogram: @@ -87,7 +87,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { type sample struct { t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -96,8 +96,8 @@ func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s sample) F() float64 { + return s.f } func (s sample) H() *histogram.Histogram { @@ -123,7 +123,7 @@ func (s sample) Type() chunkenc.ValueType { func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { - samples[i] = sample{t: minTime + int64(i*1000), v: 1.0} + samples[i] = sample{t: minTime + int64(i*1000), f: 1.0} } return ChunkFromSamples(samples) } @@ -133,7 +133,7 @@ func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { return sample{ t: int64(i), - v: float64(i), + f: float64(i), } }) } diff --git a/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go b/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go index a82ae100d7f..d715eabe68d 100644 --- a/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go +++ b/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go @@ -18,6 +18,8 @@ import ( "strconv" jsoniter "github.com/json-iterator/go" + + "github.com/prometheus/prometheus/model/histogram" ) // MarshalTimestamp marshals a point timestamp using the passed jsoniter stream. @@ -42,13 +44,13 @@ func MarshalTimestamp(t int64, stream *jsoniter.Stream) { } } -// MarshalValue marshals a point value using the passed jsoniter stream. -func MarshalValue(v float64, stream *jsoniter.Stream) { +// MarshalFloat marshals a float value using the passed jsoniter stream. +func MarshalFloat(f float64, stream *jsoniter.Stream) { stream.WriteRaw(`"`) // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). buf := stream.Buffer() - abs := math.Abs(v) + abs := math.Abs(f) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { @@ -56,7 +58,80 @@ func MarshalValue(v float64, stream *jsoniter.Stream) { fmt = 'e' } } - buf = strconv.AppendFloat(buf, v, fmt, -1, 64) + buf = strconv.AppendFloat(buf, f, fmt, -1, 64) stream.SetBuffer(buf) stream.WriteRaw(`"`) } + +// MarshalHistogram marshals a histogram value using the passed jsoniter stream. +// It writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func MarshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + MarshalFloat(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + MarshalFloat(h.Sum, stream) + + bucketFound := false + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + stream.WriteArrayStart() + stream.WriteInt(boundaries) + stream.WriteMore() + MarshalFloat(bucket.Lower, stream) + stream.WriteMore() + MarshalFloat(bucket.Upper, stream) + stream.WriteMore() + MarshalFloat(bucket.Count, stream) + stream.WriteArrayEnd() + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index c4af7821dcd..349821ea240 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -177,8 +177,8 @@ type TSDBAdminStats interface { // QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked. type QueryEngine interface { SetQueryLogger(l promql.QueryLogger) - NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) - NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) + NewInstantQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) } // API can register a set of endpoints in a router and handle @@ -425,7 +425,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts) + qry, err := api.QueryEngine.NewInstantQuery(ctx, api.Queryable, opts, r.FormValue("query"), ts) if err != nil { return invalidParamError(err, "query") } @@ -528,7 +528,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step) + qry, err := api.QueryEngine.NewRangeQuery(ctx, api.Queryable, opts, r.FormValue("query"), start, end, step) if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go index 79ebfee182b..fb2b1c04693 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go @@ -19,7 +19,6 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/jsonutil" ) @@ -27,7 +26,8 @@ import ( func init() { jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) } @@ -60,7 +60,7 @@ func (j JSONCodec) Encode(resp *Response) ([]byte, error) { // < more values> // ], // "histograms": [ -// [ 1435781451.781, { < histogram, see below > } ], +// [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ], // < more histograms > // ], // }, @@ -75,41 +75,26 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { } stream.SetBuffer(append(stream.Buffer(), m...)) - // We make two passes through the series here: In the first marshaling - // all value points, in the second marshaling all histogram - // points. That's probably cheaper than just one pass in which we copy - // out histogram Points into a newly allocated slice for separate - // marshaling. (Could be benchmarked, though.) - var foundValue, foundHistogram bool - for _, p := range s.Points { - if p.H == nil { - stream.WriteMore() - if !foundValue { - stream.WriteObjectField(`values`) - stream.WriteArrayStart() - } - foundValue = true - marshalPointJSON(unsafe.Pointer(&p), stream) - } else { - foundHistogram = true + for i, p := range s.Floats { + stream.WriteMore() + if i == 0 { + stream.WriteObjectField(`values`) + stream.WriteArrayStart() } + marshalFPointJSON(unsafe.Pointer(&p), stream) } - if foundValue { + if len(s.Floats) > 0 { stream.WriteArrayEnd() } - if foundHistogram { - firstHistogram := true - for _, p := range s.Points { - if p.H != nil { - stream.WriteMore() - if firstHistogram { - stream.WriteObjectField(`histograms`) - stream.WriteArrayStart() - } - firstHistogram = false - marshalPointJSON(unsafe.Pointer(&p), stream) - } + for i, p := range s.Histograms { + stream.WriteMore() + if i == 0 { + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() } + marshalHPointJSON(unsafe.Pointer(&p), stream) + } + if len(s.Histograms) > 0 { stream.WriteArrayEnd() } stream.WriteObjectEnd() @@ -127,7 +112,7 @@ func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { // "job" : "prometheus", // "instance" : "localhost:9090" // }, -// "value": [ 1435781451.781, "1" ] +// "value": [ 1435781451.781, "1.234" ] // }, // // For histogram samples, it writes something like this: @@ -138,7 +123,7 @@ func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { // "job" : "prometheus", // "instance" : "localhost:9090" // }, -// "histogram": [ 1435781451.781, { < histogram, see below > } ] +// "histogram": [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ] // }, func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { s := *((*promql.Sample)(ptr)) @@ -151,12 +136,20 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { } stream.SetBuffer(append(stream.Buffer(), m...)) stream.WriteMore() - if s.Point.H == nil { + if s.H == nil { stream.WriteObjectField(`value`) } else { stream.WriteObjectField(`histogram`) } - marshalPointJSON(unsafe.Pointer(&s.Point), stream) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(s.T, stream) + stream.WriteMore() + if s.H == nil { + jsonutil.MarshalFloat(s.F, stream) + } else { + jsonutil.MarshalHistogram(s.H, stream) + } + stream.WriteArrayEnd() stream.WriteObjectEnd() } @@ -164,94 +157,28 @@ func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { return false } -// marshalPointJSON writes `[ts, "val"]`. -func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*promql.Point)(ptr)) +// marshalFPointJSON writes `[ts, "1.234"]`. +func marshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.FPoint)(ptr)) stream.WriteArrayStart() jsonutil.MarshalTimestamp(p.T, stream) stream.WriteMore() - if p.H == nil { - jsonutil.MarshalValue(p.V, stream) - } else { - marshalHistogram(p.H, stream) - } + jsonutil.MarshalFloat(p.F, stream) stream.WriteArrayEnd() } -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalHistogramJSON writes something like: -// -// { -// "count": "42", -// "sum": "34593.34", -// "buckets": [ -// [ 3, "-0.25", "0.25", "3"], -// [ 0, "0.25", "0.5", "12"], -// [ 0, "0.5", "1", "21"], -// [ 0, "2", "4", "6"] -// ] -// } -// -// The 1st element in each bucket array determines if the boundaries are -// inclusive (AKA closed) or exclusive (AKA open): -// -// 0: lower exclusive, upper inclusive -// 1: lower inclusive, upper exclusive -// 2: both exclusive -// 3: both inclusive -// -// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is -// the bucket count. -func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { - stream.WriteObjectStart() - stream.WriteObjectField(`count`) - jsonutil.MarshalValue(h.Count, stream) +// marshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`. +func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.HPoint)(ptr)) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(p.T, stream) stream.WriteMore() - stream.WriteObjectField(`sum`) - jsonutil.MarshalValue(h.Sum, stream) + jsonutil.MarshalHistogram(p.H, stream) + stream.WriteArrayEnd() +} - bucketFound := false - it := h.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue // No need to expose empty buckets in JSON. - } - stream.WriteMore() - if !bucketFound { - stream.WriteObjectField(`buckets`) - stream.WriteArrayStart() - } - bucketFound = true - boundaries := 2 // Exclusive on both sides AKA open interval. - if bucket.LowerInclusive { - if bucket.UpperInclusive { - boundaries = 3 // Inclusive on both sides AKA closed interval. - } else { - boundaries = 1 // Inclusive only on lower end AKA right open. - } - } else { - if bucket.UpperInclusive { - boundaries = 0 // Inclusive only on upper end AKA left open. - } - } - stream.WriteArrayStart() - stream.WriteInt(boundaries) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Lower, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Upper, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Count, stream) - stream.WriteArrayEnd() - } - if bucketFound { - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() +func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { + return false } // marshalExemplarJSON writes. @@ -277,7 +204,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { // "value" key. stream.WriteMore() stream.WriteObjectField(`value`) - jsonutil.MarshalValue(p.Value, stream) + jsonutil.MarshalFloat(p.Value, stream) // "timestamp" key. stream.WriteMore() diff --git a/vendor/modules.txt b/vendor/modules.txt index be5fc056f23..1276124f245 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -844,7 +844,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230417132058-c461e223418b +# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230418130726-d162bb51b6ef ## explicit; go 1.19 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1433,7 +1433,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230417132058-c461e223418b +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230418130726-d162bb51b6ef # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # google.golang.org/grpc => google.golang.org/grpc v1.47.0 # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 From 6ead00964f99b6d8d0e44553dbfd10f5c138d5e4 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 19 Apr 2023 00:15:19 +0800 Subject: [PATCH 2/4] Update code to work with mimir-prometheus --- integration/compactor_test.go | 2 +- integration/querier_remote_read_test.go | 2 +- pkg/api/protobuf_codec.go | 37 ++- pkg/api/protobuf_codec_test.go | 235 ++++++++---------- pkg/compactor/compactor_test.go | 2 +- pkg/compactor/split_merge_grouper.go | 2 +- pkg/compactor/split_merge_job.go | 4 +- pkg/distributor/distributor_test.go | 12 +- pkg/distributor/ha_tracker_test.go | 2 +- pkg/distributor/query_test.go | 2 +- pkg/frontend/querymiddleware/querysharding.go | 10 +- .../querymiddleware/querysharding_test.go | 73 +++--- .../activeseries/active_series_test.go | 4 +- pkg/mimirpb/compat.go | 15 +- pkg/mimirpb/compat_test.go | 12 +- pkg/mimirpb/mimir.pb.go | 1 - pkg/mimirpb/mimir.proto | 1 - pkg/querier/block_test.go | 10 +- pkg/querier/blocks_store_queryable_test.go | 28 +-- pkg/querier/distributor_queryable_test.go | 4 +- pkg/querier/querier_test.go | 81 +++--- .../tenantfederation/tenant_federation.go | 2 +- pkg/ruler/compat.go | 2 +- pkg/ruler/manager_test.go | 2 +- pkg/ruler/remotequerier_decoder.go | 30 +-- pkg/ruler/remotequerier_test.go | 36 ++- pkg/storegateway/bucket_test.go | 2 +- pkg/storegateway/gateway_test.go | 2 +- tools/tsdb-chunks/main_test.go | 2 +- tools/tsdb-index-health/main_test.go | 2 +- tools/tsdb-print-chunk/main_test.go | 2 +- 31 files changed, 298 insertions(+), 323 deletions(-) diff --git a/integration/compactor_test.go b/integration/compactor_test.go index 822a5be00fa..3a278f1ee39 100644 --- a/integration/compactor_test.go +++ b/integration/compactor_test.go @@ -217,7 +217,7 @@ type sample struct { } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.v } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/integration/querier_remote_read_test.go b/integration/querier_remote_read_test.go index 4ffdda9a326..02075ce4fcd 100644 --- a/integration/querier_remote_read_test.go +++ b/integration/querier_remote_read_test.go @@ -341,7 +341,7 @@ func TestQuerierStreamingRemoteRead(t *testing.T) { ts, fh := chkItr.AtFloatHistogram() require.Equal(t, histograms[sampleIdx].Timestamp, ts) - expected := remote.HistogramProtoToFloatHistogram(histograms[sampleIdx]) + expected := remote.FloatHistogramProtoToFloatHistogram(histograms[sampleIdx]) test.RequireFloatHistogramEqual(t, expected, fh) default: require.Fail(t, "unrecognized value type") diff --git a/pkg/api/protobuf_codec.go b/pkg/api/protobuf_codec.go index 195ebe9f69f..237bba061c7 100644 --- a/pkg/api/protobuf_codec.go +++ b/pkg/api/protobuf_codec.go @@ -106,13 +106,13 @@ func (c protobufCodec) encodeVector(v promql.Vector) mimirpb.VectorData { samples = append(samples, mimirpb.VectorSample{ Metric: metric, TimestampMs: s.T, - Value: s.V, + Value: s.F, }) } else { histograms = append(histograms, mimirpb.VectorHistogram{ Metric: metric, TimestampMs: s.T, - Histogram: *mimirpb.FloatHistogramFromPrometheusModel(s.Point.H), + Histogram: *mimirpb.FloatHistogramFromPrometheusModel(s.H), }) } } @@ -136,29 +136,20 @@ func (c protobufCodec) encodeMatrix(m promql.Matrix) mimirpb.MatrixData { } func (c protobufCodec) encodeMatrixSeries(s promql.Series) mimirpb.MatrixSeries { - histogramCount := 0 - - for _, p := range s.Points { - if p.H != nil { - histogramCount++ - } + samples := make([]mimirpb.Sample, 0, len(s.Floats)) + for _, p := range s.Floats { + samples = append(samples, mimirpb.Sample{ + TimestampMs: p.T, + Value: p.F, + }) } - samples := make([]mimirpb.Sample, 0, len(s.Points)-histogramCount) - histograms := make([]mimirpb.FloatHistogramPair, 0, histogramCount) - - for _, p := range s.Points { - if p.H == nil { - samples = append(samples, mimirpb.Sample{ - TimestampMs: p.T, - Value: p.V, - }) - } else { - histograms = append(histograms, mimirpb.FloatHistogramPair{ - TimestampMs: p.T, - Histogram: *mimirpb.FloatHistogramFromPrometheusModel(p.H), - }) - } + histograms := make([]mimirpb.FloatHistogramPair, 0, len(s.Histograms)) + for _, p := range s.Histograms { + histograms = append(histograms, mimirpb.FloatHistogramPair{ + TimestampMs: p.T, + Histogram: *mimirpb.FloatHistogramFromPrometheusModel(p.H), + }) } return mimirpb.MatrixSeries{ diff --git a/pkg/api/protobuf_codec_test.go b/pkg/api/protobuf_codec_test.go index 7719b49239d..36613653a63 100644 --- a/pkg/api/protobuf_codec_test.go +++ b/pkg/api/protobuf_codec_test.go @@ -134,10 +134,8 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.EmptyLabels(), - Point: promql.Point{ - T: 1234, - V: 5.67, - }, + T: 1234, + F: 5.67, }, }, }, @@ -165,10 +163,8 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.FromStrings("name-1", "value-1"), - Point: promql.Point{ - T: 1234, - V: 5.67, - }, + T: 1234, + F: 5.67, }, }, }, @@ -196,10 +192,8 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Point: promql.Point{ - T: 1234, - V: 5.67, - }, + T: 1234, + F: 5.67, }, }, }, @@ -227,17 +221,13 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Point: promql.Point{ - T: 1234, - V: 5.67, - }, + T: 1234, + F: 5.67, }, { Metric: labels.FromStrings("name-3", "value-3", "name-4", "value-4"), - Point: promql.Point{ - T: 2345, - V: 6.78, - }, + T: 2345, + F: 6.78, }, }, }, @@ -270,26 +260,24 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.FromStrings("name-1", "value-1"), - Point: promql.Point{ - T: 1234, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 3, - ZeroThreshold: 1.23, - ZeroCount: 456, - Count: 9001, - Sum: 789.1, - PositiveSpans: []histogram.Span{ - {Offset: 4, Length: 1}, - {Offset: 3, Length: 2}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 7, Length: 3}, - {Offset: 9, Length: 1}, - }, - PositiveBuckets: []float64{100, 200, 300}, - NegativeBuckets: []float64{400, 500, 600, 700}, + T: 1234, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 3, + ZeroThreshold: 1.23, + ZeroCount: 456, + Count: 9001, + Sum: 789.1, + PositiveSpans: []histogram.Span{ + {Offset: 4, Length: 1}, + {Offset: 3, Length: 2}, }, + NegativeSpans: []histogram.Span{ + {Offset: 7, Length: 3}, + {Offset: 9, Length: 1}, + }, + PositiveBuckets: []float64{100, 200, 300}, + NegativeBuckets: []float64{400, 500, 600, 700}, }, }, }, @@ -335,65 +323,57 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Vector{ { Metric: labels.FromStrings("name-1", "value-1"), - Point: promql.Point{ - T: 1234, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 3, - ZeroThreshold: 1.23, - ZeroCount: 456, - Count: 9001, - Sum: 789.1, - PositiveSpans: []histogram.Span{ - {Offset: 4, Length: 1}, - {Offset: 3, Length: 2}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 7, Length: 3}, - {Offset: 9, Length: 1}, - }, - PositiveBuckets: []float64{100, 200, 300}, - NegativeBuckets: []float64{400, 500, 600, 700}, + T: 1234, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 3, + ZeroThreshold: 1.23, + ZeroCount: 456, + Count: 9001, + Sum: 789.1, + PositiveSpans: []histogram.Span{ + {Offset: 4, Length: 1}, + {Offset: 3, Length: 2}, + }, + NegativeSpans: []histogram.Span{ + {Offset: 7, Length: 3}, + {Offset: 9, Length: 1}, }, + PositiveBuckets: []float64{100, 200, 300}, + NegativeBuckets: []float64{400, 500, 600, 700}, }, }, { Metric: labels.FromStrings("name-2", "value-2"), - Point: promql.Point{ - T: 5678, - V: 9.01, - }, + T: 5678, + F: 9.01, }, { Metric: labels.FromStrings("name-3", "value-3"), - Point: promql.Point{ - T: 12340, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 4, - ZeroThreshold: 1.203, - ZeroCount: 4560, - Count: 90010, - Sum: 7890.1, - PositiveSpans: []histogram.Span{ - {Offset: 40, Length: 1}, - {Offset: 30, Length: 2}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 70, Length: 3}, - {Offset: 90, Length: 1}, - }, - PositiveBuckets: []float64{1000, 2000, 3000}, - NegativeBuckets: []float64{4000, 5000, 6000, 7000}, + T: 12340, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 4, + ZeroThreshold: 1.203, + ZeroCount: 4560, + Count: 90010, + Sum: 7890.1, + PositiveSpans: []histogram.Span{ + {Offset: 40, Length: 1}, + {Offset: 30, Length: 2}, }, + NegativeSpans: []histogram.Span{ + {Offset: 70, Length: 3}, + {Offset: 90, Length: 1}, + }, + PositiveBuckets: []float64{1000, 2000, 3000}, + NegativeBuckets: []float64{4000, 5000, 6000, 7000}, }, }, { Metric: labels.FromStrings("name-4", "value-4"), - Point: promql.Point{ - T: 56780, - V: 90.01, - }, + T: 56780, + F: 90.01, }, }, }, @@ -487,7 +467,6 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.EmptyLabels(), - Points: nil, }, }, }, @@ -514,7 +493,6 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1"), - Points: nil, }, }, }, @@ -541,7 +519,6 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Points: nil, }, }, }, @@ -568,10 +545,10 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Points: []promql.Point{ + Floats: []promql.FPoint{ { T: 1234, - V: 5.67, + F: 5.67, }, }, }, @@ -605,9 +582,9 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Points: []promql.Point{ - {T: 1234, V: 5.67}, - {T: 5678, V: 9.01}, + Floats: []promql.FPoint{ + {T: 1234, F: 5.67}, + {T: 5678, F: 9.01}, }, }, }, @@ -644,16 +621,16 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1"), - Points: []promql.Point{ - {T: 1234, V: 5.67}, - {T: 5678, V: 9.01}, + Floats: []promql.FPoint{ + {T: 1234, F: 5.67}, + {T: 5678, F: 9.01}, }, }, { Metric: labels.FromStrings("name-2", "value-2"), - Points: []promql.Point{ - {T: 12340, V: 50.67}, - {T: 56780, V: 90.01}, + Floats: []promql.FPoint{ + {T: 12340, F: 50.67}, + {T: 56780, F: 90.01}, }, }, }, @@ -703,29 +680,27 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Points: []promql.Point{ - { - T: 1234, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 3, - ZeroThreshold: 1.23, - ZeroCount: 456, - Count: 9001, - Sum: 789.1, - PositiveSpans: []histogram.Span{ - {Offset: 4, Length: 1}, - {Offset: 3, Length: 2}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 7, Length: 3}, - {Offset: 9, Length: 1}, - }, - PositiveBuckets: []float64{100, 200, 300}, - NegativeBuckets: []float64{400, 500, 600, 700}, + Histograms: []promql.HPoint{{ + T: 1234, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 3, + ZeroThreshold: 1.23, + ZeroCount: 456, + Count: 9001, + Sum: 789.1, + PositiveSpans: []histogram.Span{ + {Offset: 4, Length: 1}, + {Offset: 3, Length: 2}, }, + NegativeSpans: []histogram.Span{ + {Offset: 7, Length: 3}, + {Offset: 9, Length: 1}, + }, + PositiveBuckets: []float64{100, 200, 300}, + NegativeBuckets: []float64{400, 500, 600, 700}, }, - }, + }}, }, }, }, @@ -774,7 +749,17 @@ var protobufCodecScenarios = map[string]struct { Result: promql.Matrix{ { Metric: labels.FromStrings("name-1", "value-1", "name-2", "value-2"), - Points: []promql.Point{ + Floats: []promql.FPoint{ + { + T: 5678, + F: 9.01, + }, + { + T: 56780, + F: 90.01, + }, + }, + Histograms: []promql.HPoint{ { T: 1234, H: &histogram.FloatHistogram{ @@ -796,10 +781,6 @@ var protobufCodecScenarios = map[string]struct { NegativeBuckets: []float64{400, 500, 600, 700}, }, }, - { - T: 5678, - V: 9.01, - }, { T: 12340, H: &histogram.FloatHistogram{ @@ -821,10 +802,6 @@ var protobufCodecScenarios = map[string]struct { NegativeBuckets: []float64{4000, 5000, 6000, 7000}, }, }, - { - T: 56780, - V: 90.01, - }, }, }, }, diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 9688b215c22..5bb136e634e 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -2174,7 +2174,7 @@ func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHi return sample{t, v, h, fh} } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.v } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/pkg/compactor/split_merge_grouper.go b/pkg/compactor/split_merge_grouper.go index 442acfee625..1059748b0dc 100644 --- a/pkg/compactor/split_merge_grouper.go +++ b/pkg/compactor/split_merge_grouper.go @@ -361,5 +361,5 @@ func getMaxTime(blocks []*metadata.Meta) int64 { // defaultGroupKeyWithoutShardID returns the default group key excluding ShardIDLabelName // when computing it. func defaultGroupKeyWithoutShardID(meta metadata.Thanos) string { - return defaultGroupKey(meta.Downsample.Resolution, labels.NewBuilder(labels.FromMap(meta.Labels)).Del(mimir_tsdb.CompactorShardIDExternalLabel).Labels(nil)) + return defaultGroupKey(meta.Downsample.Resolution, labels.NewBuilder(labels.FromMap(meta.Labels)).Del(mimir_tsdb.CompactorShardIDExternalLabel).Labels()) } diff --git a/pkg/compactor/split_merge_job.go b/pkg/compactor/split_merge_job.go index 7188faba7e1..441397feb1a 100644 --- a/pkg/compactor/split_merge_job.go +++ b/pkg/compactor/split_merge_job.go @@ -56,8 +56,8 @@ func (j *job) conflicts(other *job) bool { // are never merged together, so they can't conflict. Since all blocks within the same job are expected to have the same // downsample resolution and external labels, we just check the 1st block of each job. if len(j.blocks) > 0 && len(other.blocks) > 0 { - myLabels := labels.NewBuilder(labels.FromMap(j.blocksGroup.blocks[0].Thanos.Labels)).Del(tsdb.CompactorShardIDExternalLabel).Labels(nil) - otherLabels := labels.NewBuilder(labels.FromMap(other.blocksGroup.blocks[0].Thanos.Labels)).Del(tsdb.CompactorShardIDExternalLabel).Labels(nil) + myLabels := labels.NewBuilder(labels.FromMap(j.blocksGroup.blocks[0].Thanos.Labels)).Del(tsdb.CompactorShardIDExternalLabel).Labels() + otherLabels := labels.NewBuilder(labels.FromMap(other.blocksGroup.blocks[0].Thanos.Labels)).Del(tsdb.CompactorShardIDExternalLabel).Labels() if !labels.Equal(myLabels, otherLabels) { return false } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 115fec08009..6db2fcf05db 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1701,7 +1701,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1727,7 +1727,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1752,7 +1752,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1780,7 +1780,7 @@ func BenchmarkDistributor_Push(b *testing.B) { // Add a label with a very long name. lbls.Set(fmt.Sprintf("xxx_%0.2000d", 1), "xxx") - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1808,7 +1808,7 @@ func BenchmarkDistributor_Push(b *testing.B) { // Add a label with a very long value. lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1)) - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1833,7 +1833,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels(nil) + metrics[i] = lbls.Labels() samples[i] = mimirpb.Sample{ Value: float64(i), TimestampMs: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond), diff --git a/pkg/distributor/ha_tracker_test.go b/pkg/distributor/ha_tracker_test.go index 179fbbfc24b..991236efca4 100644 --- a/pkg/distributor/ha_tracker_test.go +++ b/pkg/distributor/ha_tracker_test.go @@ -867,7 +867,7 @@ func fromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels { for _, pair := range pairs { builder.Set(pair.GetName(), pair.GetValue()) } - return builder.Labels(nil) + return builder.Labels() } // getSumOfHistogramSampleCount returns the sum of samples count of histograms matching the provided metric name diff --git a/pkg/distributor/query_test.go b/pkg/distributor/query_test.go index 49dedbe9502..fc496a0b81c 100644 --- a/pkg/distributor/query_test.go +++ b/pkg/distributor/query_test.go @@ -201,7 +201,7 @@ func makeExemplarQueryResponse(numSeries int) *ingester_client.ExemplarQueryResp for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d_%d", i, rand.Intn(10))) } - ts[i].Labels = mimirpb.FromLabelsToLabelAdapters(lbls.Labels(nil)) + ts[i].Labels = mimirpb.FromLabelsToLabelAdapters(lbls.Labels()) ts[i].Exemplars = []mimirpb.Exemplar{{ Labels: []mimirpb.LabelAdapter{{Name: "traceid", Value: "trace1"}}, Value: float64(i), diff --git a/pkg/frontend/querymiddleware/querysharding.go b/pkg/frontend/querymiddleware/querysharding.go index 76c25460b69..a892608eff9 100644 --- a/pkg/frontend/querymiddleware/querysharding.go +++ b/pkg/frontend/querymiddleware/querysharding.go @@ -409,10 +409,10 @@ func promqlResultToSamples(res *promql.Result) ([]SampleStream, error) { ss := SampleStream{ Labels: mimirpb.FromLabelsToLabelAdapters(sample.Metric), } - if sample.Point.H != nil { - ss.Histograms = mimirpb.FromPointsToHistograms([]promql.Point{sample.Point}) + if sample.H != nil { + ss.Histograms = mimirpb.FromHPointsToHistograms([]promql.HPoint{{T: sample.T, H: sample.H}}) } else { - ss.Samples = mimirpb.FromPointsToSamples([]promql.Point{sample.Point}) + ss.Samples = mimirpb.FromFPointsToSamples([]promql.FPoint{{T: sample.T, F: sample.F}}) } res = append(res, ss) } @@ -424,11 +424,11 @@ func promqlResultToSamples(res *promql.Result) ([]SampleStream, error) { ss := SampleStream{ Labels: mimirpb.FromLabelsToLabelAdapters(series.Metric), } - samples := mimirpb.FromPointsToSamples(series.Points) + samples := mimirpb.FromFPointsToSamples(series.Floats) if len(samples) > 0 { ss.Samples = samples } - histograms := mimirpb.FromPointsToHistograms(series.Points) + histograms := mimirpb.FromHPointsToHistograms(series.Histograms) if len(histograms) > 0 { ss.Histograms = histograms } diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index 8ebd7c2fee0..94cb897abc2 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -123,7 +123,7 @@ func compareExpectedAndActual(t *testing.T, expectedTs, actualTs int64, expected } } -func TestQueryShardingCorrectness(t *testing.T) { +func TestQuerySharding_Correctness(t *testing.T) { var ( numSeries = 1000 numStaleSeries = 100 @@ -867,22 +867,23 @@ func TestQuerySharding_FunctionCorrectness(t *testing.T) { newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), }) - queryableNativeHistograms := storageSeriesQueryable([]*promql.StorageSeries{ - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), - }) - testQueryShardingFunctionCorrectness(t, queryableFloats, []queryShardingFunctionCorrectnessTest{}) - testQueryShardingFunctionCorrectness(t, queryableNativeHistograms, []queryShardingFunctionCorrectnessTest{ - {fn: "histogram_count"}, - {fn: "histogram_sum"}, - {fn: "histogram_fraction", tpl: `((0,0.5,bar1{}))`}, - {fn: "histogram_quantile", tpl: `((0.5,bar1{}))`}, - }) + /* + queryableNativeHistograms := storageSeriesQueryable([]*promql.StorageSeries{ + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), + }) + testQueryShardingFunctionCorrectness(t, queryableNativeHistograms, []queryShardingFunctionCorrectnessTest{ + {fn: "histogram_count"}, + {fn: "histogram_sum"}, + {fn: "histogram_fraction", tpl: `((0,0.5,bar1{}))`}, + {fn: "histogram_quantile", tpl: `((0.5,bar1{}))`}, + }) + */ } func testQueryShardingFunctionCorrectness(t *testing.T, queryable storage.Queryable, extraTests []queryShardingFunctionCorrectnessTest) { @@ -1785,11 +1786,13 @@ func TestPromqlResultToSampleStreams(t *testing.T) { input: &promql.Result{ Value: promql.Vector{ promql.Sample{ - Point: promql.Point{T: 1, V: 1}, + T: 1, + F: 1, Metric: labels.FromStrings("a", "a1", "b", "b1"), }, promql.Sample{ - Point: promql.Point{T: 2, V: 2}, + T: 2, + F: 2, Metric: labels.FromStrings("a", "a2", "b", "b2"), }, }, @@ -1828,16 +1831,16 @@ func TestPromqlResultToSampleStreams(t *testing.T) { Value: promql.Matrix{ { Metric: labels.FromStrings("a", "a1", "b", "b1"), - Points: []promql.Point{ - {T: 1, V: 1}, - {T: 2, V: 2}, + Floats: []promql.FPoint{ + {T: 1, F: 1}, + {T: 2, F: 2}, }, }, { Metric: labels.FromStrings("a", "a2", "b", "b2"), - Points: []promql.Point{ - {T: 1, V: 8}, - {T: 2, V: 9}, + Floats: []promql.FPoint{ + {T: 1, F: 8}, + {T: 2, F: 9}, }, }, }, @@ -2037,8 +2040,9 @@ func newNativeHistogramSeries(metric labels.Labels, from, to time.Time, step tim func newSeriesInner(metric labels.Labels, from, to time.Time, step time.Duration, gen generator, histogram bool) *promql.StorageSeries { var ( - points []promql.Point - prevValue *float64 + floats []promql.FPoint + histograms []promql.HPoint + prevValue *float64 ) for ts := from; ts.Unix() <= to.Unix(); ts = ts.Add(step) { @@ -2053,27 +2057,26 @@ func newSeriesInner(metric labels.Labels, from, to time.Time, step time.Duration continue } - var point promql.Point if histogram { - point = promql.Point{ + histograms = append(histograms, promql.HPoint{ T: t, H: generateTestHistogram(v), - } + }) } else { - point = promql.Point{ + floats = append(floats, promql.FPoint{ T: t, - V: v, - } + F: v, + }) } - points = append(points, point) } // Ensure series labels are sorted. sort.Sort(metric) return promql.NewStorageSeries(promql.Series{ - Metric: metric, - Points: points, + Metric: metric, + Floats: floats, + Histograms: histograms, }) } diff --git a/pkg/ingester/activeseries/active_series_test.go b/pkg/ingester/activeseries/active_series_test.go index 14944fa895e..402b0234f0e 100644 --- a/pkg/ingester/activeseries/active_series_test.go +++ b/pkg/ingester/activeseries/active_series_test.go @@ -179,8 +179,8 @@ func TestActiveSeries_Purge_WithMatchers(t *testing.T) { func TestActiveSeries_PurgeOpt(t *testing.T) { metric := labels.NewBuilder(labels.FromStrings("__name__", "logs")) - ls1 := metric.Set("_", "ypfajYg2lsv").Labels(nil) - ls2 := metric.Set("_", "KiqbryhzUpn").Labels(nil) + ls1 := metric.Set("_", "ypfajYg2lsv").Labels() + ls2 := metric.Set("_", "KiqbryhzUpn").Labels() currentTime := time.Now() c := NewActiveSeries(&Matchers{}, 59*time.Second) diff --git a/pkg/mimirpb/compat.go b/pkg/mimirpb/compat.go index a846b89df5c..cd6036e9fbb 100644 --- a/pkg/mimirpb/compat.go +++ b/pkg/mimirpb/compat.go @@ -341,23 +341,20 @@ func fromSpansToSpansProto(s []histogram.Span) []BucketSpan { return spans } -// FromPointsToSamples converts []promql.Point to []Sample. -func FromPointsToSamples(points []promql.Point) []Sample { +// FromFPointsToSamples converts []promql.FPoint to []Sample. +func FromFPointsToSamples(points []promql.FPoint) []Sample { samples := make([]Sample, 0, len(points)) for _, point := range points { - if point.H != nil { - continue - } samples = append(samples, Sample{ TimestampMs: point.T, - Value: point.V, + Value: point.F, }) } return samples } -// FromPointsToHistograms converts []promql.Point to []FloatHistogramPair. -func FromPointsToHistograms(points []promql.Point) []FloatHistogramPair { +// FromHPointsToHistograms converts []promql.HPoint to []FloatHistogramPair. +func FromHPointsToHistograms(points []promql.HPoint) []FloatHistogramPair { samples := make([]FloatHistogramPair, 0, len(points)) for _, point := range points { h := point.H @@ -482,7 +479,7 @@ func SampleJsoniterEncode(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteArrayStart() jsonutil.MarshalTimestamp(sample.TimestampMs, stream) stream.WriteMore() - jsonutil.MarshalValue(sample.Value, stream) + jsonutil.MarshalFloat(sample.Value, stream) stream.WriteArrayEnd() } diff --git a/pkg/mimirpb/compat_test.go b/pkg/mimirpb/compat_test.go index 98695a8ba11..8f84a97a16e 100644 --- a/pkg/mimirpb/compat_test.go +++ b/pkg/mimirpb/compat_test.go @@ -216,21 +216,21 @@ func BenchmarkFromLabelAdaptersToLabelsWithCopy(b *testing.B) { } } -func TestFromPointsToSamples(t *testing.T) { - input := []promql.Point{{T: 1, V: 2}, {T: 3, V: 4}, {T: 5, H: test.GenerateTestFloatHistogram(0)}} +func TestFromFPointsToSamples(t *testing.T) { + input := []promql.FPoint{{T: 1, F: 2}, {T: 3, F: 4}} expected := []Sample{{TimestampMs: 1, Value: 2}, {TimestampMs: 3, Value: 4}} - assert.Equal(t, expected, FromPointsToSamples(input)) + assert.Equal(t, expected, FromFPointsToSamples(input)) } -func TestFromPointsToHistograms(t *testing.T) { - input := []promql.Point{{T: 1, V: 2}, {T: 3, H: test.GenerateTestFloatHistogram(0)}, {T: 5, H: test.GenerateTestFloatHistogram(1)}} +func TestFromHPointsToHistograms(t *testing.T) { + input := []promql.HPoint{{T: 3, H: test.GenerateTestFloatHistogram(0)}, {T: 5, H: test.GenerateTestFloatHistogram(1)}} expected := []FloatHistogramPair{ {TimestampMs: 3, Histogram: *FloatHistogramFromPrometheusModel(test.GenerateTestFloatHistogram(0))}, {TimestampMs: 5, Histogram: *FloatHistogramFromPrometheusModel(test.GenerateTestFloatHistogram(1))}, } - assert.Equal(t, expected, FromPointsToHistograms(input)) + assert.Equal(t, expected, FromHPointsToHistograms(input)) } func TestPreallocatingMetric(t *testing.T) { diff --git a/pkg/mimirpb/mimir.pb.go b/pkg/mimirpb/mimir.pb.go index 6bf303475e4..86d665e8492 100644 --- a/pkg/mimirpb/mimir.pb.go +++ b/pkg/mimirpb/mimir.pb.go @@ -390,7 +390,6 @@ func (m *LabelPair) GetValue() []byte { } type Sample struct { - // Fields order MUST match promql.Point so that we can cast types between them. TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` } diff --git a/pkg/mimirpb/mimir.proto b/pkg/mimirpb/mimir.proto index b3e79b63a84..634874e88a6 100644 --- a/pkg/mimirpb/mimir.proto +++ b/pkg/mimirpb/mimir.proto @@ -48,7 +48,6 @@ message LabelPair { } message Sample { - // Fields order MUST match promql.Point so that we can cast types between them. int64 timestamp_ms = 2; double value = 1; } diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index c03b08a12b3..cbea1aea3bd 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -430,24 +430,24 @@ func verifyNextSeries(t *testing.T, ss storage.SeriesSet, labels labels.Labels, // createAggrChunkWithSineSamples takes a min/maxTime and a step duration, it generates a chunk given these specs. // The minTime and maxTime are both inclusive. func createAggrChunkWithSineSamples(minTime, maxTime time.Time, step time.Duration) storepb.AggrChunk { - var samples []promql.Point + var samples []promql.FPoint minT := minTime.UnixNano() / 1000000 maxT := maxTime.UnixNano() / 1000000 stepMillis := step.Milliseconds() for t := minT; t <= maxT; t += stepMillis { - samples = append(samples, promql.Point{T: t, V: math.Sin(float64(t))}) + samples = append(samples, promql.FPoint{T: t, F: math.Sin(float64(t))}) } return createAggrChunk(minT, maxT, samples...) } -func createAggrChunkWithSamples(samples ...promql.Point) storepb.AggrChunk { +func createAggrChunkWithSamples(samples ...promql.FPoint) storepb.AggrChunk { return createAggrChunk(samples[0].T, samples[len(samples)-1].T, samples...) } -func createAggrChunk(minTime, maxTime int64, samples ...promql.Point) storepb.AggrChunk { +func createAggrChunk(minTime, maxTime int64, samples ...promql.FPoint) storepb.AggrChunk { // Ensure samples are sorted by timestamp. sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T @@ -460,7 +460,7 @@ func createAggrChunk(minTime, maxTime int64, samples ...promql.Point) storepb.Ag } for _, s := range samples { - appender.Append(s.T, s.V) + appender.Append(s.T, s.F) } return storepb.AggrChunk{ diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 3eb05495def..72125dead88 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1640,14 +1640,14 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { series3 := labels.FromStrings("__name__", "metric_3_ooo") series4 := labels.FromStrings("__name__", "metric_4_ooo_and_overlapping") - generateSeriesSamples := func(value float64) []promql.Point { - return []promql.Point{ - {T: 1589759955000, V: value}, - {T: 1589759970000, V: value}, - {T: 1589759985000, V: value}, - {T: 1589760000000, V: value}, - {T: 1589760015000, V: value}, - {T: 1589760030000, V: value}, + generateSeriesSamples := func(value float64) []promql.FPoint { + return []promql.FPoint{ + {T: 1589759955000, F: value}, + {T: 1589759970000, F: value}, + {T: 1589759985000, F: value}, + {T: 1589760000000, F: value}, + {T: 1589760015000, F: value}, + {T: 1589760030000, F: value}, } } @@ -1675,8 +1675,8 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { mockSeriesResponseWithSamples(series2, series2Samples[3:]...), // Second half. }, expected: promql.Matrix{ - {Metric: series1, Points: series1Samples}, - {Metric: series2, Points: series2Samples}, + {Metric: series1, Floats: series1Samples}, + {Metric: series2, Floats: series2Samples}, }, }, "should query metrics with out-of-order chunks": { @@ -1696,8 +1696,8 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { mockSeriesResponseWithSamples(series4, series4Samples[4:6]...), }, expected: promql.Matrix{ - {Metric: series3, Points: series3Samples}, - {Metric: series4, Points: series4Samples}, + {Metric: series3, Floats: series3Samples}, + {Metric: series4, Floats: series4Samples}, }, }, } @@ -2017,10 +2017,10 @@ func (m *blocksStoreLimitsMock) S3SSEKMSEncryptionContext(_ string) string { } func mockSeriesResponse(lbls labels.Labels, timeMillis int64, value float64) *storepb.SeriesResponse { - return mockSeriesResponseWithSamples(lbls, promql.Point{T: timeMillis, V: value}) + return mockSeriesResponseWithSamples(lbls, promql.FPoint{T: timeMillis, F: value}) } -func mockSeriesResponseWithSamples(lbls labels.Labels, samples ...promql.Point) *storepb.SeriesResponse { +func mockSeriesResponseWithSamples(lbls labels.Labels, samples ...promql.FPoint) *storepb.SeriesResponse { return mockSeriesResponseWithChunks(lbls, createAggrChunkWithSamples(samples...)) } diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index ccd5082fa76..abc37d82e16 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -487,7 +487,7 @@ func BenchmarkDistributorQueryable_Select(b *testing.B) { for i := 0; i < numLabelsPerSeries-1; i++ { commonLabelsBuilder.Set(fmt.Sprintf("label_%d", i), fmt.Sprintf("value_%d", i)) } - commonLabels := commonLabelsBuilder.Labels(nil) + commonLabels := commonLabelsBuilder.Labels() response := &client.QueryStreamResponse{Chunkseries: make([]client.TimeSeriesChunk, 0, numSeries)} for i := 0; i < numSeries; i++ { @@ -495,7 +495,7 @@ func BenchmarkDistributorQueryable_Select(b *testing.B) { lbls.Set("series_id", strconv.Itoa(i)) response.Chunkseries = append(response.Chunkseries, client.TimeSeriesChunk{ - Labels: mimirpb.FromLabelsToLabelAdapters(lbls.Labels(nil)), + Labels: mimirpb.FromLabelsToLabelAdapters(lbls.Labels()), Chunks: clientChunks, }) } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 5dddb642f27..f9fcb944a03 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -46,8 +46,9 @@ type query struct { samples func(from, through time.Time, step time.Duration) int step time.Duration - valueType func(ts model.Time) chunkenc.ValueType - assertPoint func(t testing.TB, ts int64, point promql.Point) + valueType func(ts model.Time) chunkenc.ValueType + assertFPoint func(t testing.TB, ts int64, point promql.FPoint) + assertHPoint func(t testing.TB, ts int64, point promql.HPoint) } func TestQuerier(t *testing.T) { @@ -68,10 +69,10 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from) / step) }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloat }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { - require.Equal(t, promql.Point{ + assertFPoint: func(t testing.TB, ts int64, point promql.FPoint) { + require.Equal(t, promql.FPoint{ T: ts + int64((sampleRate*4)/time.Millisecond), - V: 1000.0, + F: 1000.0, }, point) }, }, @@ -86,10 +87,10 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from)/step) + 1 }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloat }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { - require.Equal(t, promql.Point{ + assertFPoint: func(t testing.TB, ts int64, point promql.FPoint) { + require.Equal(t, promql.FPoint{ T: ts, - V: float64(ts), + F: float64(ts), }, point) }, }, @@ -103,10 +104,10 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from) / step) }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloat }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { - require.Equal(t, promql.Point{ + assertFPoint: func(t testing.TB, ts int64, point promql.FPoint) { + require.Equal(t, promql.FPoint{ T: ts + int64((sampleRate*4)/time.Millisecond)*10, - V: 1000.0, + F: 1000.0, }, point) }, }, @@ -120,10 +121,10 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from)/step) + 1 }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloat }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { - require.Equal(t, promql.Point{ + assertFPoint: func(t testing.TB, ts int64, point promql.FPoint) { + require.Equal(t, promql.FPoint{ T: ts, - V: float64(ts), + F: float64(ts), }, point) }, }, @@ -136,7 +137,7 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from)/step) + 1 }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloatHistogram }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { + assertHPoint: func(t testing.TB, ts int64, point promql.HPoint) { require.Equal(t, ts, point.T) test.RequireFloatHistogramEqual(t, test.GenerateTestHistogram(int(ts)).ToFloat(), point.H) }, @@ -150,7 +151,7 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from)/step) + 1 }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloatHistogram }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { + assertHPoint: func(t testing.TB, ts int64, point promql.HPoint) { require.Equal(t, ts, point.T) test.RequireFloatHistogramEqual(t, test.GenerateTestFloatHistogram(int(ts)), point.H) }, @@ -164,7 +165,7 @@ func TestQuerier(t *testing.T) { return int(through.Sub(from)/step) + 1 }, valueType: func(_ model.Time) chunkenc.ValueType { return chunkenc.ValFloatHistogram }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { + assertHPoint: func(t testing.TB, ts int64, point promql.HPoint) { require.Equal(t, ts, point.T) test.RequireFloatHistogramEqual(t, test.GenerateTestFloatHistogram(int(ts)), point.H) }, @@ -183,17 +184,18 @@ func TestQuerier(t *testing.T) { } return chunkenc.ValFloat }, - assertPoint: func(t testing.TB, ts int64, point promql.Point) { - if ts > int64(secondChunkStart) { - require.Equal(t, ts, point.T) - test.RequireFloatHistogramEqual(t, test.GenerateTestFloatHistogram(int(ts)), point.H) - return - } - require.Equal(t, promql.Point{ + assertFPoint: func(t testing.TB, ts int64, point promql.FPoint) { + require.True(t, ts <= int64(secondChunkStart)) + require.Equal(t, promql.FPoint{ T: ts, - V: float64(ts), + F: float64(ts), }, point) }, + assertHPoint: func(t testing.TB, ts int64, point promql.HPoint) { + require.True(t, ts > int64(secondChunkStart)) + require.Equal(t, ts, point.T) + test.RequireFloatHistogramEqual(t, test.GenerateTestFloatHistogram(int(ts)), point.H) + }, }, } @@ -300,14 +302,14 @@ func TestQuerier_QueryableReturnsChunksOutsideQueriedRange(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, m.Len()) - assert.Equal(t, []promql.Point{ - {T: 1635746400000, V: 29}, - {T: 1635746460000, V: 31}, - {T: 1635746520000, V: 37}, - {T: 1635746580000, V: 37}, - {T: 1635746640000, V: 78}, - {T: 1635746700000, V: 80}, - }, m[0].Points) + assert.Equal(t, []promql.FPoint{ + {T: 1635746400000, F: 29}, + {T: 1635746460000, F: 31}, + {T: 1635746520000, F: 37}, + {T: 1635746580000, F: 37}, + {T: 1635746640000, F: 78}, + {T: 1635746700000, F: 80}, + }, m[0].Floats) } // TestBatchMergeChunks is a regression test to catch one particular case @@ -385,7 +387,8 @@ func TestBatchMergeChunks(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, m.Len()) - require.ElementsMatch(t, m[0].Points, m[1].Points) + require.ElementsMatch(t, m[0].Floats, m[1].Floats) + require.ElementsMatch(t, m[0].Histograms, m[1].Histograms) } func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time.Duration, samplesPerChunk int, valueType func(model.Time) chunkenc.ValueType) (storage.Queryable, model.Time) { @@ -994,10 +997,14 @@ func testRangeQuery(t testing.TB, queryable storage.Queryable, end model.Time, q require.Len(t, m, 1) series := m[0] require.Equal(t, q.labels, series.Metric) - require.Equal(t, q.samples(from, through, step), len(series.Points)) + require.Equal(t, q.samples(from, through, step), len(series.Floats)+len(series.Histograms)) var ts int64 - for _, point := range series.Points { - q.assertPoint(t, ts, point) + for _, point := range series.Floats { + q.assertFPoint(t, ts, point) + ts += int64(step / time.Millisecond) + } + for _, point := range series.Histograms { + q.assertHPoint(t, ts, point) ts += int64(step / time.Millisecond) } return r diff --git a/pkg/querier/tenantfederation/tenant_federation.go b/pkg/querier/tenantfederation/tenant_federation.go index 47ceb2a21bc..e5d3a65e6e9 100644 --- a/pkg/querier/tenantfederation/tenant_federation.go +++ b/pkg/querier/tenantfederation/tenant_federation.go @@ -83,7 +83,7 @@ func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label lb.Set(additionalL.Name, additionalL.Value) } - return lb.Labels(labels.EmptyLabels()) + return lb.Labels() } func sliceToSet(values []string) map[string]struct{} { diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 0d3e4c6b797..3a67252c656 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -236,7 +236,7 @@ type RulesManager interface { Stop() // Update rules manager state. - Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc rules.RuleGroupPostProcessFunc) error + Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc rules.GroupEvalIterationFunc) error // RuleGroups returns current rules groups. RuleGroups() []*rules.Group diff --git a/pkg/ruler/manager_test.go b/pkg/ruler/manager_test.go index 4fc14a1d214..20539da3562 100644 --- a/pkg/ruler/manager_test.go +++ b/pkg/ruler/manager_test.go @@ -173,7 +173,7 @@ func (m *mockRulesManager) Stop() { close(m.done) } -func (m *mockRulesManager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc rules.RuleGroupPostProcessFunc) error { +func (m *mockRulesManager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc rules.GroupEvalIterationFunc) error { return nil } diff --git a/pkg/ruler/remotequerier_decoder.go b/pkg/ruler/remotequerier_decoder.go index db93006ce81..6a973fa25f2 100644 --- a/pkg/ruler/remotequerier_decoder.go +++ b/pkg/ruler/remotequerier_decoder.go @@ -82,10 +82,8 @@ func (jsonDecoder) vectorToPromQLVector(vec model.Vector) promql.Vector { retVal = append(retVal, promql.Sample{ Metric: b.Labels(), - Point: promql.Point{ - V: float64(p.Value), - T: int64(p.Timestamp), - }, + F: float64(p.Value), + T: int64(p.Timestamp), }) } return retVal @@ -93,10 +91,8 @@ func (jsonDecoder) vectorToPromQLVector(vec model.Vector) promql.Vector { func (jsonDecoder) scalarToPromQLVector(sc *model.Scalar) promql.Vector { return promql.Vector{promql.Sample{ - Point: promql.Point{ - V: float64(sc.Value), - T: int64(sc.Timestamp), - }, + F: float64(sc.Value), + T: int64(sc.Timestamp), Metric: labels.Labels{}, }} } @@ -129,10 +125,8 @@ func (d protobufDecoder) Decode(body []byte) (promql.Vector, error) { func (d protobufDecoder) decodeScalar(s *mimirpb.ScalarData) promql.Vector { return promql.Vector{promql.Sample{ - Point: promql.Point{ - V: s.Value, - T: s.TimestampMs, - }, + F: s.Value, + T: s.TimestampMs, Metric: labels.Labels{}, }} } @@ -149,10 +143,8 @@ func (d protobufDecoder) decodeVector(v *mimirpb.VectorData) (promql.Vector, err samples[i] = promql.Sample{ Metric: m, - Point: promql.Point{ - V: s.Value, - T: s.TimestampMs, - }, + F: s.Value, + T: s.TimestampMs, } } @@ -164,10 +156,8 @@ func (d protobufDecoder) decodeVector(v *mimirpb.VectorData) (promql.Vector, err samples[floatSampleCount+i] = promql.Sample{ Metric: m, - Point: promql.Point{ - H: s.Histogram.ToPrometheusModel(), - T: s.TimestampMs, - }, + H: s.Histogram.ToPrometheusModel(), + T: s.TimestampMs, } } diff --git a/pkg/ruler/remotequerier_test.go b/pkg/ruler/remotequerier_test.go index be5f290b65e..b8b581a0e8c 100644 --- a/pkg/ruler/remotequerier_test.go +++ b/pkg/ruler/remotequerier_test.go @@ -146,7 +146,8 @@ func TestRemoteQuerier_QueryJSONDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "bar"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, }, }, @@ -170,11 +171,13 @@ func TestRemoteQuerier_QueryJSONDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "bar"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, { Metric: labels.FromStrings("bar", "baz"), - Point: promql.Point{T: 1649092025515, V: 4.56}, + T: 1649092025515, + F: 4.56, }, }, }, @@ -186,7 +189,8 @@ func TestRemoteQuerier_QueryJSONDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.EmptyLabels(), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, }, }, @@ -302,7 +306,8 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "bar"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, }, }, @@ -329,11 +334,13 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "bar"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, { Metric: labels.FromStrings("bar", "baz"), - Point: promql.Point{T: 1649092025515, V: 4.56}, + T: 1649092025515, + F: 4.56, }, }, }, @@ -355,7 +362,8 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("bar", "baz", "foo", "blah"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, }, }, @@ -377,7 +385,8 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "baz"), - Point: promql.Point{T: 1649092025515, H: &promqlHistogram}, + T: 1649092025515, + H: &promqlHistogram, }, }, }, @@ -406,11 +415,13 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.FromStrings("foo", "baz"), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, { Metric: labels.FromStrings("foo", "bar"), - Point: promql.Point{T: 1649092025515, H: &promqlHistogram}, + T: 1649092025515, + H: &promqlHistogram, }, }, }, @@ -444,7 +455,8 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { expected: promql.Vector{ { Metric: labels.EmptyLabels(), - Point: promql.Point{T: 1649092025515, V: 1.23}, + T: 1649092025515, + F: 1.23, }, }, }, diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 647fdbd014a..54bcd349a87 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -2671,7 +2671,7 @@ func createHeadWithSeries(t testing.TB, j int, opts headGenOptions) (*tsdb.Head, lbls.Set("i", fmt.Sprintf("%07d%s", tsLabel, labelLongSuffix)) ref, err := app.Append( 0, - lbls.Labels(nil), + lbls.Labels(), int64(tsLabel)*opts.ScrapeInterval.Milliseconds(), opts.Random.Float64(), ) diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index dde2398e8aa..bd56fae4aaf 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1618,7 +1618,7 @@ func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { +func (s sample) F() float64 { return s.v } diff --git a/tools/tsdb-chunks/main_test.go b/tools/tsdb-chunks/main_test.go index 606a5b6b054..34aa0bde554 100644 --- a/tools/tsdb-chunks/main_test.go +++ b/tools/tsdb-chunks/main_test.go @@ -73,7 +73,7 @@ type sample struct { } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.v } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/tools/tsdb-index-health/main_test.go b/tools/tsdb-index-health/main_test.go index 899e3780ab5..0c9dfe606fb 100644 --- a/tools/tsdb-index-health/main_test.go +++ b/tools/tsdb-index-health/main_test.go @@ -74,7 +74,7 @@ type sample struct { } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.v } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/tools/tsdb-print-chunk/main_test.go b/tools/tsdb-print-chunk/main_test.go index f8522695b0c..f86409b41a7 100644 --- a/tools/tsdb-print-chunk/main_test.go +++ b/tools/tsdb-print-chunk/main_test.go @@ -73,7 +73,7 @@ type sample struct { } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.v } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } From dff7206eede85a08cce5888b4d94ff6df822db32 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 19 Apr 2023 00:29:40 +0800 Subject: [PATCH 3/4] Update code to work with latest mimir-prometheus commits --- pkg/frontend/querymiddleware/querysharding.go | 6 ++-- .../querymiddleware/querysharding_test.go | 2 +- .../split_by_instant_interval.go | 2 +- pkg/querier/blocks_store_queryable_test.go | 4 +-- pkg/querier/duplicates_test.go | 5 ++-- pkg/querier/querier_test.go | 30 +++++++++---------- 6 files changed, 26 insertions(+), 23 deletions(-) diff --git a/pkg/frontend/querymiddleware/querysharding.go b/pkg/frontend/querymiddleware/querysharding.go index a892608eff9..1e807bd01b1 100644 --- a/pkg/frontend/querymiddleware/querysharding.go +++ b/pkg/frontend/querymiddleware/querysharding.go @@ -148,7 +148,7 @@ func (s *querySharding) Do(ctx context.Context, r Request) (Response, error) { r = r.WithQuery(shardedQuery) shardedQueryable := newShardedQueryable(r, s.next) - qry, err := newQuery(r, s.engine, lazyquery.NewLazyQueryable(shardedQueryable)) + qry, err := newQuery(ctx, r, s.engine, lazyquery.NewLazyQueryable(shardedQueryable)) if err != nil { return nil, apierror.New(apierror.TypeBadData, err.Error()) } @@ -168,10 +168,11 @@ func (s *querySharding) Do(ctx context.Context, r Request) (Response, error) { }, nil } -func newQuery(r Request, engine *promql.Engine, queryable storage.Queryable) (promql.Query, error) { +func newQuery(ctx context.Context, r Request, engine *promql.Engine, queryable storage.Queryable) (promql.Query, error) { switch r := r.(type) { case *PrometheusRangeQueryRequest: return engine.NewRangeQuery( + ctx, queryable, nil, r.GetQuery(), @@ -181,6 +182,7 @@ func newQuery(r Request, engine *promql.Engine, queryable storage.Queryable) (pr ) case *PrometheusInstantQueryRequest: return engine.NewInstantQuery( + ctx, queryable, nil, r.GetQuery(), diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index 94cb897abc2..5271fc38ce5 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -1936,7 +1936,7 @@ type downstreamHandler struct { } func (h *downstreamHandler) Do(ctx context.Context, r Request) (Response, error) { - qry, err := newQuery(r, h.engine, h.queryable) + qry, err := newQuery(ctx, r, h.engine, h.queryable) if err != nil { return nil, err } diff --git a/pkg/frontend/querymiddleware/split_by_instant_interval.go b/pkg/frontend/querymiddleware/split_by_instant_interval.go index d659eea9070..5c327a2efa5 100644 --- a/pkg/frontend/querymiddleware/split_by_instant_interval.go +++ b/pkg/frontend/querymiddleware/split_by_instant_interval.go @@ -174,7 +174,7 @@ func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req Requ req = req.WithQuery(instantSplitQuery.String()).WithTotalQueriesHint(int32(mapperStats.GetSplitQueries())) shardedQueryable := newShardedQueryable(req, s.next) - qry, err := newQuery(req, s.engine, lazyquery.NewLazyQueryable(shardedQueryable)) + qry, err := newQuery(ctx, req, s.engine, lazyquery.NewLazyQueryable(shardedQueryable)) if err != nil { level.Warn(spanLog).Log("msg", "failed to create new query from splittable request", "err", err) return nil, apierror.New(apierror.TypeBadData, err.Error()) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 72125dead88..9975de60eae 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1744,10 +1744,10 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { }) // Query metrics. - q, err := engine.NewRangeQuery(queryable, nil, testData.query, queryStart, queryEnd, 15*time.Second) + ctx := user.InjectOrgID(context.Background(), "user-1") + q, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, queryStart, queryEnd, 15*time.Second) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "user-1") res := q.Exec(ctx) require.NoError(t, err) require.NoError(t, res.Err) diff --git a/pkg/querier/duplicates_test.go b/pkg/querier/duplicates_test.go index b0e4beb033d..3edf6a0e966 100644 --- a/pkg/querier/duplicates_test.go +++ b/pkg/querier/duplicates_test.go @@ -101,10 +101,11 @@ func runPromQLAndGetJSONResult(t *testing.T, query string, ts mimirpb.TimeSeries start := model.Time(ts.Samples[0].TimestampMs).Time() end := model.Time(ts.Samples[len(ts.Samples)-1].TimestampMs).Time() - q, err := engine.NewRangeQuery(tq, nil, query, start, end, step) + ctx := context.Background() + q, err := engine.NewRangeQuery(ctx, tq, nil, query, start, end, step) require.NoError(t, err) - res := q.Exec(context.Background()) + res := q.Exec(ctx) require.NoError(t, err) out, err := json.Marshal(res) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index f9fcb944a03..fed338363c1 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -293,10 +293,10 @@ func TestQuerier_QueryableReturnsChunksOutsideQueriedRange(t *testing.T) { }) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, logger, nil) - query, err := engine.NewRangeQuery(queryable, nil, `sum({__name__=~".+"})`, queryStart, queryEnd, queryStep) + ctx := user.InjectOrgID(context.Background(), "user-1") + query, err := engine.NewRangeQuery(ctx, queryable, nil, `sum({__name__=~".+"})`, queryStart, queryEnd, queryStep) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "user-1") r := query.Exec(ctx) m, err := r.Matrix() require.NoError(t, err) @@ -378,10 +378,10 @@ func TestBatchMergeChunks(t *testing.T) { }) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, logger, nil) - query, err := engine.NewRangeQuery(queryable, nil, `rate({__name__=~".+"}[10s])`, queryStart, queryEnd, queryStep) + ctx := user.InjectOrgID(context.Background(), "user-1") + query, err := engine.NewRangeQuery(ctx, queryable, nil, `rate({__name__=~".+"}[10s])`, queryStart, queryEnd, queryStep) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "user-1") r := query.Exec(ctx) m, err := r.Matrix() require.NoError(t, err) @@ -513,10 +513,10 @@ func TestQuerier_QueryIngestersWithinConfig(t *testing.T) { var storeQueryables []QueryableWithFilter queryable, _, _ := New(cfg, overrides, distributor, storeQueryables, nil, log.NewNopLogger(), nil) - query, err := engine.NewRangeQuery(queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) + ctx := user.InjectOrgID(context.Background(), "0") + query, err := engine.NewRangeQuery(ctx, queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") r := query.Exec(ctx) _, err = r.Matrix() @@ -597,10 +597,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { require.NoError(t, err) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) - query, err := engine.NewRangeQuery(queryable, nil, "dummy", c.queryStartTime, c.queryEndTime, time.Minute) + ctx := user.InjectOrgID(context.Background(), "0") + query, err := engine.NewRangeQuery(ctx, queryable, nil, "dummy", c.queryStartTime, c.queryEndTime, time.Minute) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") r := query.Exec(ctx) require.Nil(t, r.Err) @@ -678,10 +678,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { Timeout: 1 * time.Minute, }) - query, err := engine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) + ctx := user.InjectOrgID(context.Background(), "test") + query, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "test") r := query.Exec(ctx) if testData.expected != nil { @@ -792,7 +792,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) require.NoError(t, err) - query, err := engine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) + query, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) require.NoError(t, err) r := query.Exec(ctx) @@ -986,10 +986,10 @@ func testRangeQuery(t testing.TB, queryable storage.Queryable, end model.Time, q MaxSamples: 1e6, Timeout: 1 * time.Minute, }) - query, err := engine.NewRangeQuery(queryable, nil, q.query, from, through, step) + ctx := user.InjectOrgID(context.Background(), "0") + query, err := engine.NewRangeQuery(ctx, queryable, nil, q.query, from, through, step) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") r := query.Exec(ctx) m, err := r.Matrix() require.NoError(t, err) @@ -1150,10 +1150,10 @@ func TestQuerier_QueryStoreAfterConfig(t *testing.T) { querier.On("Select", true, mock.Anything, expectedMatchers).Return(storage.EmptySeriesSet()) queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(newMockBlocksStorageQueryable(querier))}, nil, log.NewNopLogger(), nil) - query, err := engine.NewRangeQuery(queryable, nil, "metric", c.mint, c.maxt, 1*time.Minute) + ctx := user.InjectOrgID(context.Background(), "0") + query, err := engine.NewRangeQuery(ctx, queryable, nil, "metric", c.mint, c.maxt, 1*time.Minute) require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") r := query.Exec(ctx) _, err = r.Matrix() From 320c1f241402e72f146912a6ac7130e469b21685 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 19 Apr 2023 17:23:37 +0800 Subject: [PATCH 4/4] Update TestQuerySharding_FunctionCorrectness --- .../querymiddleware/querysharding_test.go | 163 +++++++++--------- 1 file changed, 81 insertions(+), 82 deletions(-) diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index 5271fc38ce5..847fcfa469c 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -859,89 +859,47 @@ type queryShardingFunctionCorrectnessTest struct { // TestQuerySharding_FunctionCorrectness is the old test that probably at some point inspired the TestQuerySharding_Correctness, // we keep it here since it adds more test cases. func TestQuerySharding_FunctionCorrectness(t *testing.T) { - queryableFloats := storageSeriesQueryable([]*promql.StorageSeries{ - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), - newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), - }) - testQueryShardingFunctionCorrectness(t, queryableFloats, []queryShardingFunctionCorrectnessTest{}) - /* - queryableNativeHistograms := storageSeriesQueryable([]*promql.StorageSeries{ - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), - newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), - }) - testQueryShardingFunctionCorrectness(t, queryableNativeHistograms, []queryShardingFunctionCorrectnessTest{ - {fn: "histogram_count"}, - {fn: "histogram_sum"}, - {fn: "histogram_fraction", tpl: `((0,0.5,bar1{}))`}, - {fn: "histogram_quantile", tpl: `((0.5,bar1{}))`}, - }) - */ -} - -func testQueryShardingFunctionCorrectness(t *testing.T, queryable storage.Queryable, extraTests []queryShardingFunctionCorrectnessTest) { - mkQueries := func(tpl, fn string, testMatrix bool, fArgs []string) []string { - if tpl == "" { - tpl = `((bar1{}))` - } - result := strings.Replace(tpl, "", fn, -1) - - if testMatrix { - // turn selectors into ranges - result = strings.Replace(result, "}", "}[1m]", -1) - } - - if len(fArgs) > 0 { - args := "," + strings.Join(fArgs, ",") - result = strings.Replace(result, "", args, -1) - } else { - result = strings.Replace(result, "", "", -1) - } - - return []string{ - result, - "sum" + result, - "sum by (bar)" + result, - "count" + result, - "count by (bar)" + result, - } - } - tests := append([]queryShardingFunctionCorrectnessTest{ - {fn: "abs"}, - {fn: "avg_over_time", rangeQuery: true}, - {fn: "ceil"}, - {fn: "changes", rangeQuery: true}, + testsForBoth := []queryShardingFunctionCorrectnessTest{ {fn: "count_over_time", rangeQuery: true}, {fn: "days_in_month"}, {fn: "day_of_month"}, {fn: "day_of_week"}, {fn: "day_of_year"}, {fn: "delta", rangeQuery: true}, + {fn: "hour"}, + {fn: "increase", rangeQuery: true}, + {fn: "minute"}, + {fn: "month"}, + {fn: "rate", rangeQuery: true}, + {fn: "resets", rangeQuery: true}, + {fn: "sort"}, + {fn: "sort_desc"}, + {fn: "last_over_time", rangeQuery: true}, + {fn: "present_over_time", rangeQuery: true}, + {fn: "timestamp"}, + {fn: "year"}, + {fn: "clamp", args: []string{"5", "10"}}, + {fn: "clamp_max", args: []string{"5"}}, + {fn: "clamp_min", args: []string{"5"}}, + {fn: "round", args: []string{"20"}}, + {fn: "label_replace", args: []string{`"fuzz"`, `"$1"`, `"foo"`, `"b(.*)"`}}, + {fn: "label_join", args: []string{`"fuzz"`, `","`, `"foo"`, `"bar"`}}, + } + testsForFloatsOnly := []queryShardingFunctionCorrectnessTest{ + {fn: "abs"}, + {fn: "avg_over_time", rangeQuery: true}, + {fn: "ceil"}, + {fn: "changes", rangeQuery: true}, {fn: "deriv", rangeQuery: true}, {fn: "exp"}, {fn: "floor"}, - {fn: "hour"}, {fn: "idelta", rangeQuery: true}, - {fn: "increase", rangeQuery: true}, {fn: "irate", rangeQuery: true}, {fn: "ln"}, {fn: "log10"}, {fn: "log2"}, {fn: "max_over_time", rangeQuery: true}, {fn: "min_over_time", rangeQuery: true}, - {fn: "minute"}, - {fn: "month"}, - {fn: "rate", rangeQuery: true}, - {fn: "resets", rangeQuery: true}, - {fn: "sort"}, - {fn: "sort_desc"}, {fn: "sqrt"}, {fn: "deg"}, {fn: "asinh"}, @@ -960,23 +918,65 @@ func testQueryShardingFunctionCorrectness(t *testing.T, queryable storage.Querya {fn: "stddev_over_time", rangeQuery: true}, {fn: "stdvar_over_time", rangeQuery: true}, {fn: "sum_over_time", rangeQuery: true}, - {fn: "last_over_time", rangeQuery: true}, - {fn: "present_over_time", rangeQuery: true}, {fn: "quantile_over_time", rangeQuery: true, tpl: `((0.5,bar1{}))`}, {fn: "quantile_over_time", rangeQuery: true, tpl: `((0.99,bar1{}))`}, - {fn: "timestamp"}, - {fn: "year"}, {fn: "sgn"}, - {fn: "clamp", args: []string{"5", "10"}}, - {fn: "clamp_max", args: []string{"5"}}, - {fn: "clamp_min", args: []string{"5"}}, {fn: "predict_linear", args: []string{"1"}, rangeQuery: true}, - {fn: "round", args: []string{"20"}}, {fn: "holt_winters", args: []string{"0.5", "0.7"}, rangeQuery: true}, - {fn: "label_replace", args: []string{`"fuzz"`, `"$1"`, `"foo"`, `"b(.*)"`}}, - {fn: "label_join", args: []string{`"fuzz"`, `","`, `"foo"`, `"bar"`}}, - }, extraTests...) + } + testsForNativeHistogramsOnly := []queryShardingFunctionCorrectnessTest{ + {fn: "histogram_count"}, + {fn: "histogram_sum"}, + {fn: "histogram_fraction", tpl: `((0,0.5,bar1{}))`}, + {fn: "histogram_quantile", tpl: `((0.5,bar1{}))`}, + } + queryableFloats := storageSeriesQueryable([]*promql.StorageSeries{ + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), + newSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), + }) + testQueryShardingFunctionCorrectness(t, queryableFloats, append(testsForBoth, testsForFloatsOnly...), testsForNativeHistogramsOnly) + queryableNativeHistograms := storageSeriesQueryable([]*promql.StorageSeries{ + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "barr"), start.Add(-lookbackDelta), end, step, factor(5)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "bazz"), start.Add(-lookbackDelta), end, step, factor(7)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(12)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bozz"), start.Add(-lookbackDelta), end, step, factor(11)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blop", "foo", "buzz"), start.Add(-lookbackDelta), end, step, factor(8)), + newNativeHistogramSeries(labels.FromStrings("__name__", "bar1", "baz", "blip", "bar", "blap", "foo", "bazz"), start.Add(-lookbackDelta), end, step, arithmeticSequence(10)), + }) + testQueryShardingFunctionCorrectness(t, queryableNativeHistograms, append(testsForBoth, testsForNativeHistogramsOnly...), testsForFloatsOnly) +} + +func testQueryShardingFunctionCorrectness(t *testing.T, queryable storage.Queryable, tests []queryShardingFunctionCorrectnessTest, testsToIgnore []queryShardingFunctionCorrectnessTest) { + mkQueries := func(tpl, fn string, testMatrix bool, fArgs []string) []string { + if tpl == "" { + tpl = `((bar1{}))` + } + result := strings.Replace(tpl, "", fn, -1) + + if testMatrix { + // turn selectors into ranges + result = strings.Replace(result, "}", "}[1m]", -1) + } + + if len(fArgs) > 0 { + args := "," + strings.Join(fArgs, ",") + result = strings.Replace(result, "", args, -1) + } else { + result = strings.Replace(result, "", "", -1) + } + return []string{ + result, + "sum" + result, + "sum by (bar)" + result, + "count" + result, + "count by (bar)" + result, + } + } for _, tc := range tests { const numShards = 4 for _, query := range mkQueries(tc.tpl, tc.fn, tc.rangeQuery, tc.args) { @@ -1032,10 +1032,9 @@ func testQueryShardingFunctionCorrectness(t *testing.T, queryable storage.Querya "scalar": {}, "vector": {}, "pi": {}, - // These are tested only for native histograms but not for floats: - "histogram_count": {}, - "histogram_sum": {}, - "histogram_fraction": {}, + } + for _, tc := range testsToIgnore { + fnToIgnore[tc.fn] = struct{}{} } for expectedFn := range promql.FunctionCalls {