From d542483e8c03bb58332c277ad7fd8e9d8eb547cb Mon Sep 17 00:00:00 2001 From: Aaron George Date: Wed, 30 Nov 2022 00:08:01 +0000 Subject: [PATCH 01/93] k8s discovery: Ensure that the pod IP is in the status before adding to target group Signed-off-by: Aaron George Signed-off-by: Aaron George --- discovery/kubernetes/endpoints.go | 25 +++++++------ discovery/kubernetes/endpoints_test.go | 43 ++++++++++++++++++++++ discovery/kubernetes/endpointslice.go | 25 +++++++------ discovery/kubernetes/endpointslice_test.go | 43 ++++++++++++++++++++++ 4 files changed, 114 insertions(+), 22 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 1f39c23e71..8c26af3c40 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -375,18 +375,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 91b1b0c676..21190b905f 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -825,3 +825,46 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpoints() + ep.Namespace = "ns" + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 594759f454..c6e7394968 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -393,18 +393,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index a0ae543fc9..a1fcffaba2 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -1074,3 +1074,46 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpointSliceV1() + ep.Namespace = "ns" + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + Containers: []corev1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} From 0032ce06449781d0fc0bb5a4731e87e59aa3efae Mon Sep 17 00:00:00 2001 From: Patryk Prus Date: Sun, 26 Feb 2023 21:05:27 -0500 Subject: [PATCH 02/93] Render background in images to play nicely with dark mode Signed-off-by: Patryk Prus --- README.md | 2 +- documentation/images/architecture.svg | 4 +++- documentation/images/internal_architecture.svg | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9becf71aa1..8b89bb01e5 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste ## Architecture overview -![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) +![Architecture overview](documentation/images/architecture.svg) ## Install diff --git a/documentation/images/architecture.svg b/documentation/images/architecture.svg index df93e13cb2..4e1e85995d 100644 --- a/documentation/images/architecture.svg +++ b/documentation/images/architecture.svg @@ -1,2 +1,4 @@ + + -
pull metrics


[Not supported by viewer]
HDD / SSD
[Not supported by viewer]
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
[Not supported by viewer]
Node
<b>Node</b>
Service Discovery

Service Discovery<div><br></div>
             find 
                 targets
[Not supported by viewer]
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


[Not supported by viewer]
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
[Not supported by viewer]
              notify
[Not supported by viewer]
...
...
\ No newline at end of file +
pull metrics


pull metrics...
HDD / SSD
HDD / SSD
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
Prometheus Server
Node
Node
Service Discovery

Service Discovery
             find 
                 targets
find...
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


push alerts...
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
DNSKubernetesConsul...Custom in...
              notify
              notify
...
...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/images/internal_architecture.svg b/documentation/images/internal_architecture.svg index 5948186a7d..1242548ddb 100644 --- a/documentation/images/internal_architecture.svg +++ b/documentation/images/internal_architecture.svg @@ -1,2 +1,4 @@ + + -
Fanout Storage
Fanout Storage
read/write
series data 
[Not supported by viewer]
Local
Storage
Local<br>Storage
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
[Not supported by viewer]
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Discovery
update
Alertmanager
targets
[Not supported by viewer]
Alertmanagers
Alertmanagers
send
alerts
[Not supported by viewer]
read/write
series data
[Not supported by viewer]
scrape
metrics
[Not supported by viewer]
Scrape Manager
Scrape Manager
append 
samples 
[Not supported by viewer]
update scrape targets
[Not supported by viewer]
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discovery
query
query
PromQL
[Not supported by viewer]
read series data
read series data
Remote Read Endpoints
Remote Read Endpoints<br>
Remote Write Endpoints
Remote Write Endpoints
Targets
Targets
Service Discovery
Service Discovery
Web API/UI
Web API/UI
query
query
PromQL
[Not supported by viewer]
Reload Handler
Reload Handler
Termination
Handler
Termination<br>Handler<br>
(most other
components)
[Not supported by viewer]
reload
reload
terminate
terminate<br>
view/control
view/control
view/control
view/control
Web Clients
Web Clients
Prometheus Server
<b>Prometheus Server</b>
\ No newline at end of file +
Fanout Storage
Fanout Storage
read/write
series data 
read/write...
Local
Storage
Local...
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
send...
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Disco...
update
Alertmanager
targets
update...
Alertmanagers
Alertmanagers
send
alerts
send...
read/write
series data
read/write...
scrape
metrics
scrape...
Scrape Manager
Scrape Manager
append 
samples 
append...
update scrape targets
update scrape targets
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discove...
query
query
PromQL
PromQL
read series data
read series data
Remote Read Endpoints
Remote Read En...
Remote Write Endpoints
Remote Write E...
Targets
Targets
Service Discovery
Service Discov...
Web API/UI
Web API/UI
query
query
PromQL
PromQL
Reload Handler
Reload Handler
Termination
Handler
Termination...
(most other
components)
(most other...
reload
reload
terminate
termin...
view/control
view/c...
view/control
view/control
Web Clients
Web Clients
Prometheus Server
Prometheus Server
Text is not SVG - cannot display
\ No newline at end of file From 9e5cc340c3d67fb8c1ff8d1e701e68de6859b268 Mon Sep 17 00:00:00 2001 From: Francis Begyn Date: Sat, 12 Nov 2022 20:19:33 +0100 Subject: [PATCH 03/93] stop github actions from sending me mail Currently github actions keep sending me mails about things that should only run on the prometheus organisation actions. This change makes sure to check who owns the repository before running the CI workflow. Signed-off-by: Francis Begyn --- .github/workflows/buf.yml | 1 + .github/workflows/lock.yml | 1 + .github/workflows/repo_sync.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 2bfee71383..567ecc000e 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -7,6 +7,7 @@ jobs: buf: name: lint and publish runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@v3 - uses: bufbuild/buf-setup-action@v1.13.1 diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 1725fb4f2a..c6ac3d74ef 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -14,6 +14,7 @@ concurrency: jobs: action: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - uses: dessant/lock-threads@v4 with: diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 37be80d45c..9526cd2fec 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -6,6 +6,7 @@ on: jobs: repo_sync: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' container: image: quay.io/prometheus/golang-builder steps: From 1cc28ce9ca52453d6fc73a398158c4c135f90a47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Scheibe?= Date: Sat, 18 Mar 2023 20:11:35 +0100 Subject: [PATCH 04/93] chore: Fix documentation on signal to shut down instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: René Scheibe --- docs/getting_started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 11d8d0fb82..e89ac705ee 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -264,4 +264,4 @@ process ID. While Prometheus does have recovery mechanisms in the case that there is an abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly shutdown a Prometheus instance. If you're running on Linux this can be performed -by using `kill -s SIGHUP `, replacing `` with your Prometheus process ID. +by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. From 97c7fffbb870a83625a7846e84e60c577d4575e4 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 22 Mar 2023 10:02:10 +0100 Subject: [PATCH 05/93] parser: Allow parsing arbitrary functions In Thanos we would like to start experimenting with custom functions that are currently not part of the PromQL spec. We would do this by adding an implementation for those functions in the Thanos engine: https://github.com/thanos-community/promql-engine and allow users to decide which engine they want to use on a per-query basis. Since we use the PromQL parser from Prometheus, injecting functions in the global `Functions` variable would mean they also become available for the Prometheus engine. To avoid this side-effect, this commit exposes a Parser interface in which the supported functions can be injected as an option. If not functions are injected, the parser implementation will default to the functions defined in the global Functions variable. Signed-off-by: Filip Petkovski --- promql/parser/functions.go | 4 +- promql/parser/generated_parser.y | 2 +- promql/parser/generated_parser.y.go | 2 +- promql/parser/parse.go | 117 ++++++++++++++++++---------- promql/parser/parse_test.go | 21 ++++- 5 files changed, 97 insertions(+), 49 deletions(-) diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 450021328b..479c7f635d 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -387,7 +387,7 @@ var Functions = map[string]*Function{ } // getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := Functions[name] +func getFunction(name string, functions map[string]*Function) (*Function, bool) { + function, ok := functions[name] return function, ok } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 461e854ac1..fe3adfb9a0 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -339,7 +339,7 @@ grouping_label : maybe_label function_call : IDENTIFIER function_call_body { - fn, exist := getFunction($1.Val) + fn, exist := getFunction($1.Val, yylex.(*parser).functions) if !exist{ yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val) } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 3a0e8bf69b..1ac95036af 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1210,7 +1210,7 @@ yydefault: yyDollar = yyS[yypt-2 : yypt+1] //line promql/parser/generated_parser.y:341 { - fn, exist := getFunction(yyDollar[1].item.Val) + fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) if !exist { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val) } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc6..7e7ec71f55 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -37,12 +37,20 @@ var parserPool = sync.Pool{ }, } +type Parser interface { + ParseExpr() (Expr, error) + Close() +} + type parser struct { lex Lexer inject ItemType injecting bool + // functions contains all functions supported by the parser instance. + functions map[string]*Function + // Everytime an Item is lexed that could be the end // of certain expressions its end position is stored here. lastClosing Pos @@ -53,6 +61,62 @@ type parser struct { parseErrors ParseErrors } +type Opt func(p *parser) + +func WithFunctions(functions map[string]*Function) Opt { + return func(p *parser) { + p.functions = functions + } +} + +// NewParser returns a new parser. +func NewParser(input string, opts ...Opt) *parser { + p := parserPool.Get().(*parser) + + p.functions = Functions + p.injecting = false + p.parseErrors = nil + p.generatedParserResult = nil + + // Clear lexer struct before reusing. + p.lex = Lexer{ + input: input, + state: lexStatements, + } + + // Apply user define options. + for _, opt := range opts { + opt(p) + } + + return p +} + +func (p *parser) ParseExpr() (expr Expr, err error) { + defer p.recover(&err) + + parseResult := p.parseGenerated(START_EXPRESSION) + + if parseResult != nil { + expr = parseResult.(Expr) + } + + // Only typecheck when there are no syntax errors. + if len(p.parseErrors) == 0 { + p.checkAST(expr) + } + + if len(p.parseErrors) != 0 { + err = p.parseErrors + } + + return expr, err +} + +func (p *parser) Close() { + defer parserPool.Put(p) +} + // ParseErr wraps a parsing error with line and position context. type ParseErr struct { PositionRange PositionRange @@ -105,32 +169,15 @@ func (errs ParseErrors) Error() string { // ParseExpr returns the expression parsed from the input. func ParseExpr(input string) (expr Expr, err error) { - p := newParser(input) - defer parserPool.Put(p) - defer p.recover(&err) - - parseResult := p.parseGenerated(START_EXPRESSION) - - if parseResult != nil { - expr = parseResult.(Expr) - } - - // Only typecheck when there are no syntax errors. - if len(p.parseErrors) == 0 { - p.checkAST(expr) - } - - if len(p.parseErrors) != 0 { - err = p.parseErrors - } - - return expr, err + p := NewParser(input) + defer p.Close() + return p.ParseExpr() } // ParseMetric parses the input into a metric func ParseMetric(input string) (m labels.Labels, err error) { - p := newParser(input) - defer parserPool.Put(p) + p := NewParser(input) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_METRIC) @@ -148,8 +195,8 @@ func ParseMetric(input string) (m labels.Labels, err error) { // ParseMetricSelector parses the provided textual metric selector into a list of // label matchers. func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { - p := newParser(input) - defer parserPool.Put(p) + p := NewParser(input) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_METRIC_SELECTOR) @@ -164,22 +211,6 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { return m, err } -// newParser returns a new parser. -func newParser(input string) *parser { - p := parserPool.Get().(*parser) - - p.injecting = false - p.parseErrors = nil - p.generatedParserResult = nil - - // Clear lexer struct before reusing. - p.lex = Lexer{ - input: input, - state: lexStatements, - } - return p -} - // SequenceValue is an omittable value in a sequence of time series values. type SequenceValue struct { Value float64 @@ -200,10 +231,10 @@ type seriesDescription struct { // ParseSeriesDesc parses the description of a time series. func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) { - p := newParser(input) + p := NewParser(input) p.lex.seriesDesc = true - defer parserPool.Put(p) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_SERIES_DESCRIPTION) @@ -799,7 +830,7 @@ func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { } func MustGetFunction(name string) *Function { - f, ok := getFunction(name) + f, ok := getFunction(name, Functions) if !ok { panic(fmt.Errorf("function %q does not exist", name)) } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index df66d9381a..ce609a4eea 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3714,7 +3714,7 @@ func TestParseSeries(t *testing.T) { } func TestRecoverParserRuntime(t *testing.T) { - p := newParser("foo bar") + p := NewParser("foo bar") var err error defer func() { @@ -3728,7 +3728,7 @@ func TestRecoverParserRuntime(t *testing.T) { } func TestRecoverParserError(t *testing.T) { - p := newParser("foo bar") + p := NewParser("foo bar") var err error e := errors.New("custom error") @@ -3776,3 +3776,20 @@ func TestExtractSelectors(t *testing.T) { require.Equal(t, expected, ExtractSelectors(expr)) } } + +func TestParseCustomFunctions(t *testing.T) { + funcs := Functions + funcs["custom_func"] = &Function{ + Name: "custom_func", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + } + input := "custom_func(metric[1m])" + p := NewParser(input, WithFunctions(funcs)) + expr, err := p.ParseExpr() + require.NoError(t, err) + + call, ok := expr.(*Call) + require.True(t, ok) + require.Equal(t, "custom_func", call.Func.Name) +} From 3d7783e6633dcfd79078ac30e100873c6f53fca3 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 22 Mar 2023 10:20:16 +0100 Subject: [PATCH 06/93] Add nolint for NewParser function Signed-off-by: Filip Petkovski --- promql/parser/parse.go | 1 + 1 file changed, 1 insertion(+) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 7e7ec71f55..ad8890f29f 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -70,6 +70,7 @@ func WithFunctions(functions map[string]*Function) Opt { } // NewParser returns a new parser. +// nolint:revive func NewParser(input string, opts ...Opt) *parser { p := parserPool.Get().(*parser) From 034eb2b3f2b31589ce1ebd6402e1fb6465b0afd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 4 Apr 2023 13:28:02 +0200 Subject: [PATCH 07/93] promtool: read from stdin if no filenames are provided in check rules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 21 +++++++++++++++++---- docs/command-line/promtool.md | 4 ++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 064e7a04f7..abac38d5fa 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -131,8 +131,8 @@ func main() { checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", - "The rule files to check.", - ).Required().ExistingFiles() + "The rule files to check, default is read from standard input (STDIN).", + ).Default("-").Strings() checkRulesLint := checkRulesCmd.Flag( "lint", "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", @@ -715,9 +715,22 @@ func CheckRules(ls lintConfig, files ...string) int { } func checkRules(filename string, lintSettings lintConfig) (int, []error) { - fmt.Println("Checking", filename) + var rgs *rulefmt.RuleGroups + var errs []error + if filename == "-" || filename == "" { + var buf bytes.Buffer + tee := io.TeeReader(os.Stdin, &buf) + if _, err := buf.ReadFrom(tee); err != nil { + errs = append(errs, err) + return failureExitCode, errs + } + fmt.Println("Checking stdin") + rgs, errs = rulefmt.Parse(buf.Bytes()) + } else { + fmt.Println("Checking", filename) + rgs, errs = rulefmt.ParseFile(filename) + } - rgs, errs := rulefmt.ParseFile(filename) if errs != nil { return successExitCode, errs } diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 543034f157..6b67408c9f 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -179,9 +179,9 @@ Check if the rule files are valid or not. ###### Arguments -| Argument | Description | Required | +| Argument | Description | Default | | --- | --- | --- | -| rule-files | The rule files to check. | Yes | +| rule-files | The rule files to check, default is read from standard input (STDIN). | `-` | From 2b7202c4ccf3ff8e61d4c509c6f6b4283ef1d9b5 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Fri, 9 Dec 2022 15:27:56 +0800 Subject: [PATCH 08/93] Validate the metric names and labels in the remote write handler Signed-off-by: Xiaochao Dong (@damnever) --- storage/remote/write_handler.go | 28 +++++++++++++++++-- storage/remote/write_handler_test.go | 42 ++++++++++++++++++++++++---- web/api/v1/api.go | 2 +- 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 45304c43c4..590a55adb0 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -22,6 +22,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" @@ -30,15 +32,28 @@ import ( type writeHandler struct { logger log.Logger appendable storage.Appendable + + samplesWithInvalidLabelsTotal prometheus.Counter } // NewWriteHandler creates a http.Handler that accepts remote write requests and // writes them to the provided appendable. -func NewWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler { - return &writeHandler{ +func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler { + h := &writeHandler{ logger: logger, appendable: appendable, + + samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "remote_write_invalid_labels_samples_total", + Help: "The total number of remote write samples which contains invalid labels.", + }), } + if reg != nil { + reg.MustRegister(h.samplesWithInvalidLabelsTotal) + } + return h } func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -85,6 +100,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { outOfOrderExemplarErrs := 0 + samplesWithInvalidLabels := 0 app := h.appendable.Appender(ctx) defer func() { @@ -98,6 +114,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err var exemplarErr error for _, ts := range req.Timeseries { labels := labelProtosToLabels(ts.Labels) + if !labels.IsValid() { + level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String()) + samplesWithInvalidLabels++ + continue + } for _, s := range ts.Samples { _, err = app.Append(0, labels, s.Timestamp, s.Value) if err != nil { @@ -150,6 +171,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err if outOfOrderExemplarErrs > 0 { _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } + if samplesWithInvalidLabels > 0 { + h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) + } return nil } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 58c4439fa8..8774946ead 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -20,6 +20,8 @@ import ( "io" "net/http" "net/http/httptest" + "strconv" + "strings" "testing" "time" @@ -43,7 +45,7 @@ func TestRemoteWriteHandler(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{} - handler := NewWriteHandler(nil, appendable) + handler := NewWriteHandler(nil, nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -94,7 +96,7 @@ func TestOutOfOrderSample(t *testing.T) { appendable := &mockAppendable{ latestSample: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -119,7 +121,7 @@ func TestOutOfOrderExemplar(t *testing.T) { appendable := &mockAppendable{ latestExemplar: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -142,7 +144,7 @@ func TestOutOfOrderHistogram(t *testing.T) { appendable := &mockAppendable{ latestHistogram: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -151,6 +153,34 @@ func TestOutOfOrderHistogram(t *testing.T) { require.Equal(t, http.StatusBadRequest, resp.StatusCode) } +func BenchmarkRemoteWritehandler(b *testing.B) { + const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" + reqs := []*http.Request{} + for i := 0; i < b.N; i++ { + num := strings.Repeat(strconv.Itoa(i), 16) + buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric"}, + {Name: "test_label_name_" + num, Value: labelValue + num}, + }, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)}, + }}, nil, nil, nil) + require.NoError(b, err) + req, err := http.NewRequest("", "", bytes.NewReader(buf)) + require.NoError(b, err) + reqs = append(reqs, req) + } + + appendable := &mockAppendable{} + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) + recorder := httptest.NewRecorder() + + b.ResetTimer() + for _, req := range reqs { + handler.ServeHTTP(recorder, req) + } +} + func TestCommitErr(t *testing.T) { buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) @@ -161,7 +191,7 @@ func TestCommitErr(t *testing.T) { appendable := &mockAppendable{ commitErr: fmt.Errorf("commit error"), } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -187,7 +217,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.NoError(b, db.Close()) }) - handler := NewWriteHandler(log.NewNopLogger(), db.Head()) + handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head()) buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil) require.NoError(b, err) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 3b6ade562a..dcc520ab1d 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -284,7 +284,7 @@ func NewAPI( } if ap != nil { - a.remoteWriteHandler = remote.NewWriteHandler(logger, ap) + a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap) } return a From 8472596fd0673b972ca8d1e957ff997fe26db401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 5 Apr 2023 11:24:49 +0200 Subject: [PATCH 09/93] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 18 ++++++++++++------ docs/command-line/promtool.md | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index abac38d5fa..b3d743e441 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -132,7 +132,7 @@ func main() { ruleFiles := checkRulesCmd.Arg( "rule-files", "The rule files to check, default is read from standard input (STDIN).", - ).Default("-").Strings() + ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", @@ -690,6 +690,11 @@ func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false + // add empty string to avoid matching filename + if len(files) == 0 { + files = append(files, "") + } + for _, f := range files { if n, errs := checkRules(f, ls); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") @@ -717,15 +722,16 @@ func CheckRules(ls lintConfig, files ...string) int { func checkRules(filename string, lintSettings lintConfig) (int, []error) { var rgs *rulefmt.RuleGroups var errs []error - if filename == "-" || filename == "" { - var buf bytes.Buffer - tee := io.TeeReader(os.Stdin, &buf) - if _, err := buf.ReadFrom(tee); err != nil { + + // if filename is an empty string it is a stdin + if filename == "" { + data, err := io.ReadAll(os.Stdin) + if err != nil { errs = append(errs, err) return failureExitCode, errs } fmt.Println("Checking stdin") - rgs, errs = rulefmt.Parse(buf.Bytes()) + rgs, errs = rulefmt.Parse(data) } else { fmt.Println("Checking", filename) rgs, errs = rulefmt.ParseFile(filename) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 6b67408c9f..59c46dd79b 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -179,9 +179,9 @@ Check if the rule files are valid or not. ###### Arguments -| Argument | Description | Default | -| --- | --- | --- | -| rule-files | The rule files to check, default is read from standard input (STDIN). | `-` | +| Argument | Description | +| --- | --- | +| rule-files | The rule files to check, default is read from standard input (STDIN). | From 0c75f76193b71f4860e95e5b9dd108361a124380 Mon Sep 17 00:00:00 2001 From: znley Date: Thu, 6 Apr 2023 11:07:10 +0800 Subject: [PATCH 10/93] fix: RLIM_INFINITY type is uint64 on loong64 Signed-off-by: znley --- util/runtime/limits_default.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index c3e0b4701a..d49b3bc1ed 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -18,16 +18,18 @@ package runtime import ( "fmt" + "math" "syscall" ) -// syscall.RLIM_INFINITY is a constant and its default type is int. -// It needs to be converted to an int64 variable to be compared with uint64 values. -// See https://golang.org/ref/spec#Conversions -var unlimited int64 = syscall.RLIM_INFINITY +// syscall.RLIM_INFINITY is a constant. +// Its type is int on most architectures but there are exceptions such as loong64. +// Uniform it to uint accorind to the standard. +// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html +var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 func limitToString(v uint64, unit string) string { - if v == uint64(unlimited) { + if v == unlimited { return "unlimited" } return fmt.Sprintf("%d%s", v, unit) From d5b33e1419b0fdc70719a45b747e155fa1a0781c Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 8 Apr 2023 10:26:06 +0200 Subject: [PATCH 11/93] =?UTF-8?q?yamllint:=20don=E2=80=99t=20check=20keys?= =?UTF-8?q?=20and=20add=20file=20extension=20to=20config=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Matthieu MOREL Signed-off-by: Matthieu MOREL --- .yamllint => .yamllint.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) rename .yamllint => .yamllint.yml (90%) diff --git a/.yamllint b/.yamllint.yml similarity index 90% rename from .yamllint rename to .yamllint.yml index 19552574b2..955a5a6270 100644 --- a/.yamllint +++ b/.yamllint.yml @@ -20,5 +20,4 @@ rules: config/testdata/section_key_dup.bad.yml line-length: disable truthy: - ignore: | - .github/workflows/*.yml + check-keys: false From ecf6bfa619009255d3cb615d649cb66ca4c217fc Mon Sep 17 00:00:00 2001 From: John Losito Date: Fri, 21 Apr 2023 09:26:16 -0400 Subject: [PATCH 12/93] Update configuration.md Signed-off-by: John Losito --- docs/configuration/configuration.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f27f8256a5..c5ecc292cf 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -350,6 +350,7 @@ metric_relabel_configs: # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] + # Per-scrape limit on number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. From b1bab7bc54ed06f9a8619e7dd09b7b2ed55d3ee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 27 Apr 2023 13:23:52 +0200 Subject: [PATCH 13/93] feat(promtool): add push metrics command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 15 +++ cmd/promtool/metrics.go | 234 ++++++++++++++++++++++++++++++++++ docs/command-line/promtool.md | 43 +++++++ 3 files changed, 292 insertions(+) create mode 100644 cmd/promtool/metrics.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index de002a0b28..c4077954ea 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -178,6 +178,18 @@ func main() { queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String() queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings() + pushCmd := app.Command("push", "Push to a Prometheus server.") + pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write.") + pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&serverURL) + metricFiles := pushMetricsCmd.Arg( + "metric-files", + "The metric files to push.", + ).Required().ExistingFiles() + metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() + pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() + pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() + testCmd := app.Command("test", "Unit testing.") testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesFiles := testRulesCmd.Arg( @@ -301,6 +313,9 @@ func main() { case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) + case pushMetricsCmd.FullCommand(): + os.Exit(PushMetrics(serverURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go new file mode 100644 index 0000000000..21fcd3e662 --- /dev/null +++ b/cmd/promtool/metrics.go @@ -0,0 +1,234 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "time" + + "github.com/golang/snappy" + dto "github.com/prometheus/client_model/go" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" +) + +// Push metrics to a prometheus remote write. +func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { + // remote write should respect specification: https://prometheus.io/docs/concepts/remote_write_spec/ + failed := false + + addressURL, err := url.Parse(url.String()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // build remote write client + writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{ + URL: &config_util.URL{URL: addressURL}, + Timeout: model.Duration(timeout), + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // set custom tls config from httpConfigFilePath + // set custom headers to every request + client, ok := writeClient.(*remote.Client) + if !ok { + fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient)) + return failureExitCode + } + client.Client.Transport = &setHeadersTransport{ + RoundTripper: roundTripper, + headers: headers, + } + + for _, f := range files { + var data []byte + var err error + data, err = os.ReadFile(f) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + fmt.Printf("Parsing metric file %s\n", f) + metricsData, err := parseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + raw, err := metricsData.Marshal() + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + // Encode the request body into snappy encoding. + compressed := snappy.Encode(nil, raw) + err = client.Store(context.Background(), compressed) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + fmt.Printf("Successfully pushed metric file %s\n", f) + } + + if failed { + return failureExitCode + } + + return successExitCode +} + +type setHeadersTransport struct { + http.RoundTripper + headers map[string]string +} + +func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) { + for key, value := range s.headers { + req.Header.Set(key, value) + } + return s.RoundTripper.RoundTrip(req) +} + +var MetricMetadataTypeValue = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +// formatMetrics convert metric family to a writerequest +func formatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { + wr := &prompb.WriteRequest{} + + // build metric list + sortedMetricNames := make([]string, 0, len(mf)) + for metric := range mf { + sortedMetricNames = append(sortedMetricNames, metric) + } + // sort metrics name in lexicographical order + sort.Strings(sortedMetricNames) + + for _, metricName := range sortedMetricNames { + // Set metadata writerequest + mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] + metadata := prompb.MetricMetadata{ + MetricFamilyName: mf[metricName].GetName(), + Type: prompb.MetricMetadata_MetricType(mtype), + Help: mf[metricName].GetHelp(), + } + wr.Metadata = append(wr.Metadata, metadata) + + for _, metric := range mf[metricName].Metric { + var timeserie prompb.TimeSeries + + // build labels map + labels := make(map[string]string, len(metric.Label)+2) + labels[model.MetricNameLabel] = metricName + labels[model.JobLabel] = jobLabel + + for _, label := range metric.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + // build labels name list + sortedLabelNames := make([]string, 0, len(labels)) + for label := range labels { + sortedLabelNames = append(sortedLabelNames, label) + } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + for _, label := range sortedLabelNames { + timeserie.Labels = append(timeserie.Labels, prompb.Label{ + Name: label, + Value: labels[label], + }) + } + + timeserie.Samples = []prompb.Sample{ + { + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + Value: getMetricsValue(metric), + }, + } + + wr.Timeseries = append(wr.Timeseries, timeserie) + } + } + return wr, nil +} + +// parseMetricsTextReader consumes an io.Reader and returns the MetricFamily +func parseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return mf, nil +} + +// getMetricsValue return the value of a timeserie without the need to give value type +func getMetricsValue(m *dto.Metric) float64 { + switch { + case m.Gauge != nil: + return m.GetGauge().GetValue() + case m.Counter != nil: + return m.GetCounter().GetValue() + case m.Untyped != nil: + return m.GetUntyped().GetValue() + default: + return 0. + } +} + +// parseMetricsTextAndFormat return the data in the expected prometheus metrics write request format +func parseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { + mf, err := parseMetricsTextReader(input) + if err != nil { + return nil, err + } + + return formatMetrics(mf, jobLabel) +} diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index e149d374a0..ac159a9214 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -27,6 +27,7 @@ Tooling for the Prometheus monitoring system. | check | Check the resources for validity. | | query | Run query against a Prometheus server. | | debug | Fetch debug information. | +| push | Push to a Prometheus server. | | test | Unit testing. | | tsdb | Run tsdb commands. | @@ -372,6 +373,48 @@ Fetch all debug information. +### `promtool push` + +Push to a Prometheus server. + + + +#### Flags + +| Flag | Description | +| --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | + + + + +##### `promtool push metrics` + +Push metrics to a prometheus remote write. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --job-label | Job label to attach to metrics. | `promtool` | +| --timeout | The time to wait for pushing metrics. | `30s` | +| --header | Prometheus remote write header. | | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| remote-write-url | Prometheus remote write url to push metrics. | Yes | +| metric-files | The metric files to push. | Yes | + + + + ### `promtool test` Unit testing. From c3f267d862cf91b6d55a5f2d218c5c6c0bd07ce3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 22 Mar 2023 12:11:49 +0000 Subject: [PATCH 14/93] Alerts: more efficient generation of target labels Use a label builder instead of a slice when creating labels for the target alertmanagers. This can be passed directly to `relabel.ProcessBuilder`, skipping a copy. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index c3b2e5c7e0..d6705edd3e 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -701,36 +701,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tlset := range tg.Targets { - lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels)) + lb.Reset(labels.EmptyLabels()) for ln, lv := range tlset { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } // Set configured scheme as the initial scheme label for overwrite. - lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) - lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)}) + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) // Combine target labels with target group labels. for ln, lv := range tg.Labels { if _, ok := tlset[ln]; !ok { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } } - lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) continue } - addr := lset.Get(model.AddressLabel) + addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - res = append(res, alertmanagerLabels{lset}) + res = append(res, alertmanagerLabels{lb.Labels()}) } return res, droppedAlertManagers, nil } From 3711339a7d4f787b136a2ef1b282dd9b9ea161b9 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 22 Mar 2023 12:26:17 +0000 Subject: [PATCH 15/93] Alerts: more efficient relabel on Send Re-use `labels.Builder` and use `relabel.ProcessBuilder` to skip a conversion step. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index d6705edd3e..891372c43e 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -349,19 +349,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - // Attach external labels before relabelling and sending. - for _, a := range alerts { - lb := labels.NewBuilder(a.Labels) - - n.opts.ExternalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - a.Labels = lb.Labels() - } - alerts = n.relabelAlerts(alerts) if len(alerts) == 0 { return @@ -390,15 +377,25 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } +// Attach external labels and process relabelling rules. func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert - for _, alert := range alerts { - labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if keep { - alert.Labels = labels - relabeledAlerts = append(relabeledAlerts, alert) + for _, a := range alerts { + lb.Reset(a.Labels) + n.opts.ExternalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + if !keep { + continue } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) } return relabeledAlerts } From c8e7f95a3cbfbb26eb48e86155ca1f4cfe20dc8f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 21 May 2023 09:20:07 +0200 Subject: [PATCH 16/93] ci(lint): enable predeclared linter Signed-off-by: Matthieu MOREL --- .golangci.yml | 3 ++- tsdb/exemplar.go | 6 +++--- tsdb/isolation.go | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d1cd86ed59..fc2721455c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,8 +13,9 @@ linters: - gocritic - gofumpt - goimports - - revive - misspell + - predeclared + - revive - unconvert - unused diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index ad3b2ef39b..01718bb57d 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -216,9 +216,9 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. return ce.validateExemplar(seriesLabels, e, false) } -// Not thread safe. The append parameters tells us whether this is an external validation, or internal +// Not thread safe. The appended parameters tells us whether this is an external validation, or internal // as a result of an AddExemplar call, in which case we should update any relevant metrics. -func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error { +func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, appended bool) error { if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -250,7 +250,7 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp } if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { - if append { + if appended { ce.metrics.outOfOrderExemplars.Inc() } return storage.ErrOutOfOrderExemplar diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 401e5885a0..e436884a8d 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -244,9 +244,9 @@ type txRing struct { txIDCount int // How many ids in the ring. } -func newTxRing(cap int) *txRing { +func newTxRing(capacity int) *txRing { return &txRing{ - txIDs: make([]uint64, cap), + txIDs: make([]uint64, capacity), } } From 3524a16aa03d7b91a6d0484aa4ed6f1d443837f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 23 May 2023 10:29:17 +0200 Subject: [PATCH 17/93] feat: add suggested changes, tests, and stdin support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 11 +-- cmd/promtool/metrics.go | 157 +++++++--------------------------- docs/command-line/promtool.md | 4 +- util/fmtutil/format.go | 142 ++++++++++++++++++++++++++++++ util/fmtutil/format_test.go | 71 +++++++++++++++ 5 files changed, 251 insertions(+), 134 deletions(-) create mode 100644 util/fmtutil/format.go create mode 100644 util/fmtutil/format_test.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c4077954ea..3b5ba78e4c 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -81,6 +81,7 @@ func main() { var ( httpRoundTripper = api.DefaultRoundTripper serverURL *url.URL + remoteWriteURL *url.URL httpConfigFilePath string ) @@ -180,12 +181,12 @@ func main() { pushCmd := app.Command("push", "Push to a Prometheus server.") pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) - pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write.") - pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&serverURL) + pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).") + pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL) metricFiles := pushMetricsCmd.Arg( "metric-files", - "The metric files to push.", - ).Required().ExistingFiles() + "The metric files to push, default is read from standard input (STDIN).", + ).ExistingFiles() metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() @@ -314,7 +315,7 @@ func main() { os.Exit(CheckMetrics(*checkMetricsExtended)) case pushMetricsCmd.FullCommand(): - os.Exit(PushMetrics(serverURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 21fcd3e662..c845b5a587 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -21,22 +21,18 @@ import ( "net/http" "net/url" "os" - "sort" "time" "github.com/golang/snappy" - dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/util/fmtutil" ) -// Push metrics to a prometheus remote write. +// Push metrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { - // remote write should respect specification: https://prometheus.io/docs/concepts/remote_write_spec/ failed := false addressURL, err := url.Parse(url.String()) @@ -67,18 +63,36 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin headers: headers, } - for _, f := range files { + // add empty string to avoid matching filename + if len(files) == 0 { + files = append(files, "") + } + + for _, file := range files { var data []byte var err error - data, err = os.ReadFile(f) - if err != nil { - fmt.Fprintln(os.Stderr, err) - failed = true - continue - } - fmt.Printf("Parsing metric file %s\n", f) - metricsData, err := parseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + // if file is an empty string it is a stdin + if file == "" { + data, err = io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + break + } + + fmt.Printf("Parsing stdin\n") + } else { + data, err = os.ReadFile(file) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + fmt.Printf("Parsing metric file %s\n", file) + } + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) if err != nil { fmt.Fprintln(os.Stderr, err) failed = true @@ -100,7 +114,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin failed = true continue } - fmt.Printf("Successfully pushed metric file %s\n", f) + fmt.Printf("Successfully pushed metric file %s\n", file) } if failed { @@ -121,114 +135,3 @@ func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, erro } return s.RoundTripper.RoundTrip(req) } - -var MetricMetadataTypeValue = map[string]int32{ - "UNKNOWN": 0, - "COUNTER": 1, - "GAUGE": 2, - "HISTOGRAM": 3, - "GAUGEHISTOGRAM": 4, - "SUMMARY": 5, - "INFO": 6, - "STATESET": 7, -} - -// formatMetrics convert metric family to a writerequest -func formatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { - wr := &prompb.WriteRequest{} - - // build metric list - sortedMetricNames := make([]string, 0, len(mf)) - for metric := range mf { - sortedMetricNames = append(sortedMetricNames, metric) - } - // sort metrics name in lexicographical order - sort.Strings(sortedMetricNames) - - for _, metricName := range sortedMetricNames { - // Set metadata writerequest - mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] - metadata := prompb.MetricMetadata{ - MetricFamilyName: mf[metricName].GetName(), - Type: prompb.MetricMetadata_MetricType(mtype), - Help: mf[metricName].GetHelp(), - } - wr.Metadata = append(wr.Metadata, metadata) - - for _, metric := range mf[metricName].Metric { - var timeserie prompb.TimeSeries - - // build labels map - labels := make(map[string]string, len(metric.Label)+2) - labels[model.MetricNameLabel] = metricName - labels[model.JobLabel] = jobLabel - - for _, label := range metric.Label { - labelname := label.GetName() - if labelname == model.JobLabel { - labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) - } - labels[labelname] = label.GetValue() - } - - // build labels name list - sortedLabelNames := make([]string, 0, len(labels)) - for label := range labels { - sortedLabelNames = append(sortedLabelNames, label) - } - // sort labels name in lexicographical order - sort.Strings(sortedLabelNames) - - for _, label := range sortedLabelNames { - timeserie.Labels = append(timeserie.Labels, prompb.Label{ - Name: label, - Value: labels[label], - }) - } - - timeserie.Samples = []prompb.Sample{ - { - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), - Value: getMetricsValue(metric), - }, - } - - wr.Timeseries = append(wr.Timeseries, timeserie) - } - } - return wr, nil -} - -// parseMetricsTextReader consumes an io.Reader and returns the MetricFamily -func parseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { - var parser expfmt.TextParser - mf, err := parser.TextToMetricFamilies(input) - if err != nil { - return nil, err - } - return mf, nil -} - -// getMetricsValue return the value of a timeserie without the need to give value type -func getMetricsValue(m *dto.Metric) float64 { - switch { - case m.Gauge != nil: - return m.GetGauge().GetValue() - case m.Counter != nil: - return m.GetCounter().GetValue() - case m.Untyped != nil: - return m.GetUntyped().GetValue() - default: - return 0. - } -} - -// parseMetricsTextAndFormat return the data in the expected prometheus metrics write request format -func parseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { - mf, err := parseMetricsTextReader(input) - if err != nil { - return nil, err - } - - return formatMetrics(mf, jobLabel) -} diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index ac159a9214..024c71e51a 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -390,7 +390,7 @@ Push to a Prometheus server. ##### `promtool push metrics` -Push metrics to a prometheus remote write. +Push metrics to a prometheus remote write (for testing purpose only). @@ -410,7 +410,7 @@ Push metrics to a prometheus remote write. | Argument | Description | Required | | --- | --- | --- | | remote-write-url | Prometheus remote write url to push metrics. | Yes | -| metric-files | The metric files to push. | Yes | +| metric-files | The metric files to push, default is read from standard input (STDIN). | | diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go new file mode 100644 index 0000000000..9a06d6bb15 --- /dev/null +++ b/util/fmtutil/format.go @@ -0,0 +1,142 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmtutil + +import ( + "fmt" + "io" + "sort" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/prompb" +) + +var MetricMetadataTypeValue = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +// FormatMetrics convert metric family to a writerequest. +func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { + wr := &prompb.WriteRequest{} + + // build metric list + sortedMetricNames := make([]string, 0, len(mf)) + for metric := range mf { + sortedMetricNames = append(sortedMetricNames, metric) + } + // sort metrics name in lexicographical order + sort.Strings(sortedMetricNames) + + for _, metricName := range sortedMetricNames { + // Set metadata writerequest + mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] + metadata := prompb.MetricMetadata{ + MetricFamilyName: mf[metricName].GetName(), + Type: prompb.MetricMetadata_MetricType(mtype), + Help: mf[metricName].GetHelp(), + } + wr.Metadata = append(wr.Metadata, metadata) + + for _, metric := range mf[metricName].Metric { + var timeserie prompb.TimeSeries + + // build labels map + labels := make(map[string]string, len(metric.Label)+2) + labels[model.MetricNameLabel] = metricName + labels[model.JobLabel] = jobLabel + + for _, label := range metric.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + // build labels name list + sortedLabelNames := make([]string, 0, len(labels)) + for label := range labels { + sortedLabelNames = append(sortedLabelNames, label) + } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + for _, label := range sortedLabelNames { + timeserie.Labels = append(timeserie.Labels, prompb.Label{ + Name: label, + Value: labels[label], + }) + } + + timestamp := metric.GetTimestampMs() + if timestamp == 0 { + timestamp = time.Now().UnixNano() / int64(time.Millisecond) + } + + timeserie.Samples = []prompb.Sample{ + { + Timestamp: timestamp, + Value: getMetricsValue(metric), + }, + } + + wr.Timeseries = append(wr.Timeseries, timeserie) + } + } + return wr, nil +} + +// getMetricsValue return the value of a timeserie without the need to give value type +func getMetricsValue(m *dto.Metric) float64 { + switch { + case m.Gauge != nil: + return m.GetGauge().GetValue() + case m.Counter != nil: + return m.GetCounter().GetValue() + case m.Untyped != nil: + return m.GetUntyped().GetValue() + default: + return 0. + } +} + +// ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. +func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return mf, nil +} + +// ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. +func ParseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { + mf, err := ParseMetricsTextReader(input) + if err != nil { + return nil, err + } + return FormatMetrics(mf, jobLabel) +} diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go new file mode 100644 index 0000000000..ef3b7fcd40 --- /dev/null +++ b/util/fmtutil/format_test.go @@ -0,0 +1,71 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmtutil + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/prompb" +) + +var writeRequestFixture = &prompb.WriteRequest{ + Metadata: []prompb.MetricMetadata{ + { + MetricFamilyName: "test_metric1", + Type: 2, + Help: "this is a test metric", + }, + }, + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 1, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, + }, + }, +} + +func TestParseMetricsTextAndFormat(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP test_metric1 this is a test metric + # TYPE test_metric1 gauge + test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 + test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 + `)) + + expected, err := ParseMetricsTextAndFormat(input, "promtool") + require.NoError(t, err) + + require.Equal(t, writeRequestFixture, expected) +} From 934c5ddb8d223c6363033a48919e014c63e15ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 24 May 2023 10:55:49 +0200 Subject: [PATCH 18/93] feat: make push metrics labels generic and repeatable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 4 ++-- cmd/promtool/metrics.go | 25 +++++++++++++++---------- docs/command-line/promtool.md | 2 +- util/fmtutil/format.go | 15 ++++++++++----- util/fmtutil/format_test.go | 3 ++- 5 files changed, 30 insertions(+), 19 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 3b5ba78e4c..c76790e13b 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -187,7 +187,7 @@ func main() { "metric-files", "The metric files to push, default is read from standard input (STDIN).", ).ExistingFiles() - metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() + pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() @@ -315,7 +315,7 @@ func main() { os.Exit(CheckMetrics(*checkMetricsExtended)) case pushMetricsCmd.FullCommand(): - os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...)) case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index c845b5a587..8abe32cf41 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -32,7 +32,7 @@ import ( ) // Push metrics to a prometheus remote write (for testing purpose only). -func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { +func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { failed := false addressURL, err := url.Parse(url.String()) @@ -76,32 +76,32 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin if file == "" { data, err = io.ReadAll(os.Stdin) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true break } - fmt.Printf("Parsing stdin\n") + fmt.Printf("Parsing input from stdin\n") } else { data, err = os.ReadFile(file) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - fmt.Printf("Parsing metric file %s\n", file) + fmt.Printf("Parsing input from metric file %s\n", file) } - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } raw, err := metricsData.Marshal() if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } @@ -110,11 +110,16 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin compressed := snappy.Encode(nil, raw) err = client.Store(context.Background(), compressed) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - fmt.Printf("Successfully pushed metric file %s\n", file) + + if file == "" { + fmt.Printf(" SUCCESS: metric pushed to remote write.\n") + } else { + fmt.Printf(" SUCCESS: metric file %s pushed to remote write.\n", file) + } } if failed { diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 024c71e51a..c78900b991 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -398,7 +398,7 @@ Push metrics to a prometheus remote write (for testing purpose only). | Flag | Description | Default | | --- | --- | --- | -| --job-label | Job label to attach to metrics. | `promtool` | +| --label | Label to attach to metrics. Can be specified multiple times. | `job=promtool` | | --timeout | The time to wait for pushing metrics. | `30s` | | --header | Prometheus remote write header. | | diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 9a06d6bb15..b5bb9469ce 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -38,7 +38,7 @@ var MetricMetadataTypeValue = map[string]int32{ } // FormatMetrics convert metric family to a writerequest. -func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { +func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -63,10 +63,15 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.Wr var timeserie prompb.TimeSeries // build labels map - labels := make(map[string]string, len(metric.Label)+2) + labels := make(map[string]string, len(metric.Label)+len(extraLabels)) labels[model.MetricNameLabel] = metricName - labels[model.JobLabel] = jobLabel + // add extra labels + for key, value := range extraLabels { + labels[key] = value + } + + // add metric labels for _, label := range metric.Label { labelname := label.GetName() if labelname == model.JobLabel { @@ -133,10 +138,10 @@ func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, erro } // ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. -func ParseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { +func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { mf, err := ParseMetricsTextReader(input) if err != nil { return nil, err } - return FormatMetrics(mf, jobLabel) + return FormatMetrics(mf, labels) } diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index ef3b7fcd40..9deed2de90 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -63,8 +63,9 @@ func TestParseMetricsTextAndFormat(t *testing.T) { test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 `)) + labels := map[string]string{"job": "promtool"} - expected, err := ParseMetricsTextAndFormat(input, "promtool") + expected, err := ParseMetricsTextAndFormat(input, labels) require.NoError(t, err) require.Equal(t, writeRequestFixture, expected) From 37e5249e33e4c2d7bc49c4ac587faf12eb6785b7 Mon Sep 17 00:00:00 2001 From: zenador Date: Wed, 24 May 2023 19:00:21 +0800 Subject: [PATCH 19/93] Use DefaultSamplesPerChunk in tsdb (#12387) Signed-off-by: Jeanette Tan --- tsdb/db.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsdb/db.go b/tsdb/db.go index a0d0a4b260..12974150be 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -640,6 +640,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.HeadChunksWriteQueueSize < 0 { opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize } + if opts.SamplesPerChunk <= 0 { + opts.SamplesPerChunk = DefaultSamplesPerChunk + } if opts.MaxBlockChunkSegmentSize <= 0 { opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize } From 89af3517304bcd48a8f20686c0e3317544b6985d Mon Sep 17 00:00:00 2001 From: Justin Lei <97976793+leizor@users.noreply.github.com> Date: Thu, 25 May 2023 02:18:41 -0700 Subject: [PATCH 20/93] Remove samplesPerChunk from memSeries (#12390) Signed-off-by: Justin Lei --- tsdb/head.go | 14 +++++----- tsdb/head_append.go | 24 ++++++++--------- tsdb/head_test.go | 66 ++++++++++++++++++++++----------------------- tsdb/head_wal.go | 6 ++--- 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index f094b3662c..a1d61fd6ab 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1614,7 +1614,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.opts.IsolationDisabled, h.opts.SamplesPerChunk) + return newMemSeries(lset, id, h.opts.IsolationDisabled) }) if err != nil { return nil, false, err @@ -1922,8 +1922,7 @@ type memSeries struct { mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - samplesPerChunk int // Target number of samples per chunk. - nextAt int64 // Timestamp at which to cut the next chunk. + nextAt int64 // Timestamp at which to cut the next chunk. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 @@ -1951,12 +1950,11 @@ type memSeriesOOOFields struct { firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool, samplesPerChunk int) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, - samplesPerChunk: samplesPerChunk, + lset: lset, + ref: id, + nextAt: math.MinInt64, } if !isolationDisabled { s.txs = newTxRing(4) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 060d32b7f7..44847dceb2 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -987,7 +987,7 @@ func (a *headAppender) Commit() (err error) { samplesAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) if ok { if s.T < inOrderMint { inOrderMint = s.T @@ -1016,7 +1016,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() - ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1042,7 +1042,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() - ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1122,8 +1122,8 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange) +func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1144,7 +1144,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // appendHistogram adds the histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1157,7 +1157,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1238,7 +1238,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // appendFloatHistogram adds the float histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1251,7 +1251,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1334,7 +1334,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. func (s *memSeries) appendPreprocessor( - t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, + t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int, ) (c *memChunk, sampleInOrder, chunkCreated bool) { c = s.head() @@ -1372,7 +1372,7 @@ func (s *memSeries) appendPreprocessor( // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == s.samplesPerChunk/4 { + if numSamples == samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1380,7 +1380,7 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 { + if t >= s.nextAt || numSamples >= samplesPerChunk*2 { c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index af3df378e7..14468e0716 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -285,8 +285,8 @@ func BenchmarkLoadWAL(b *testing.B) { require.NoError(b, err) for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. - s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled, DefaultSamplesPerChunk) - s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT) + s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) + s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT, DefaultSamplesPerChunk) s.mmapCurrentHeadChunk(chunkDiskMapper) } require.NoError(b, chunkDiskMapper.Close()) @@ -807,10 +807,10 @@ func TestMemSeries_truncateChunks(t *testing.T) { }, } - s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) for i := 0; i < 4000; i += 5 { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "sample append failed") } @@ -1338,24 +1338,24 @@ func TestMemSeries_append(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1368,7 +1368,7 @@ func TestMemSeries_append(t *testing.T) { // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // at approximately 120 samples per chunk. for i := 1; i < 1000; i++ { - ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") } @@ -1392,7 +1392,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { }() chunkRange := int64(1000) - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) histograms := tsdbutil.GenerateTestHistograms(4) histogramWithOneMoreBucket := histograms[3].Copy() @@ -1404,19 +1404,19 @@ func TestMemSeries_appendHistogram(t *testing.T) { // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1426,7 +1426,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") @@ -1448,7 +1448,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { }) chunkRange := DefaultBlockDuration - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // At this slow rate, we will fill the chunk in two block durations. slowRate := (DefaultBlockDuration * 2) / samplesPerChunk @@ -1456,7 +1456,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { var nextTs int64 var totalAppendedSamples int for i := 0; i < samplesPerChunk/4; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.Truef(t, ok, "slow sample %d was not appended", i) nextTs += slowRate totalAppendedSamples++ @@ -1465,12 +1465,12 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { // Suddenly, the rate increases and we receive a sample every millisecond. for i := 0; i < math.MaxUint16; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.Truef(t, ok, "quick sample %d was not appended", i) nextTs++ totalAppendedSamples++ } - ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "new chunk sample was not appended") require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk") @@ -1495,18 +1495,18 @@ func TestGCChunkAccess(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1548,18 +1548,18 @@ func TestGCSeriesAccess(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1795,10 +1795,10 @@ func TestHeadReadWriterRepair(t *testing.T) { require.True(t, created, "series was not created") for i := 0; i < 7; i++ { - ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunk was not created") - ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunk was created") h.chunkDiskMapper.CutNewFile() @@ -2146,7 +2146,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) - ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load()) + ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load(), DefaultSamplesPerChunk) require.True(t, ok, "Series append failed.") require.Equal(t, 0, s.txs.txIDCount, "Series should not have an appendID after append with appendID=0.") } @@ -2610,10 +2610,10 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) for i := 0; i < 7; i++ { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "sample append failed") } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6e81f17932..9741d1da04 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -588,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated { + if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } @@ -618,9 +618,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } var chunkCreated bool if s.h != nil { - _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange) + _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) } else { - _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange) + _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) } if chunkCreated { h.metrics.chunksCreated.Inc() From cb045c0e4b94bbf3eee174d91b5ef2b8553948d5 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 25 May 2023 11:49:43 +0200 Subject: [PATCH 21/93] Fix wording from "jitterSeed" -> "offsetSeed" for server-wide scrape offsets In digital communication, "jitter" usually refers to how much a signal deviates from true periodicity, see https://en.wikipedia.org/wiki/Jitter. The way we are using the "jitterSeed" in Prometheus does not affect the true periodicity at all, but just introduces a constant phase shift (or offset) within the period. So it would be more correct and less confusing to call the "jitterSeed" an "offsetSeed" instead. Signed-off-by: Julius Volz --- scrape/manager.go | 12 ++++++------ scrape/manager_test.go | 18 +++++++++--------- scrape/scrape.go | 14 +++++++------- scrape/target_test.go | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/scrape/manager.go b/scrape/manager.go index d75fe30cf5..d7cf6792c2 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -150,7 +150,7 @@ type Manager struct { append storage.Appendable graceShut chan struct{} - jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup. + offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. mtxScrape sync.Mutex // Guards the fields below. scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool @@ -214,7 +214,7 @@ func (m *Manager) reload() { level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) continue } - sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts) if err != nil { level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) continue @@ -234,8 +234,8 @@ func (m *Manager) reload() { wg.Wait() } -// setJitterSeed calculates a global jitterSeed per server relying on extra label set. -func (m *Manager) setJitterSeed(labels labels.Labels) error { +// setOffsetSeed calculates a global offsetSeed per server relying on extra label set. +func (m *Manager) setOffsetSeed(labels labels.Labels) error { h := fnv.New64a() hostname, err := osutil.GetFQDN() if err != nil { @@ -244,7 +244,7 @@ func (m *Manager) setJitterSeed(labels labels.Labels) error { if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil { return err } - m.jitterSeed = h.Sum64() + m.offsetSeed = h.Sum64() return nil } @@ -281,7 +281,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } m.scrapeConfigs = c - if err := m.setJitterSeed(cfg.GlobalConfig.ExternalLabels); err != nil { + if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil { return err } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index d05d25fa2c..50f6320137 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -596,7 +596,7 @@ func TestManagerTargetsUpdates(t *testing.T) { } } -func TestSetJitter(t *testing.T) { +func TestSetOffsetSeed(t *testing.T) { getConfig := func(prometheus string) *config.Config { cfgText := ` global: @@ -617,24 +617,24 @@ global: // Load the first config. cfg1 := getConfig("ha1") - if err := scrapeManager.setJitterSeed(cfg1.GlobalConfig.ExternalLabels); err != nil { + if err := scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels); err != nil { t.Error(err) } - jitter1 := scrapeManager.jitterSeed + offsetSeed1 := scrapeManager.offsetSeed - if jitter1 == 0 { - t.Error("Jitter has to be a hash of uint64") + if offsetSeed1 == 0 { + t.Error("Offset seed has to be a hash of uint64") } // Load the first config. cfg2 := getConfig("ha2") - if err := scrapeManager.setJitterSeed(cfg2.GlobalConfig.ExternalLabels); err != nil { + if err := scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels); err != nil { t.Error(err) } - jitter2 := scrapeManager.jitterSeed + offsetSeed2 := scrapeManager.offsetSeed - if jitter1 == jitter2 { - t.Error("Jitter should not be the same on different set of external labels") + if offsetSeed1 == offsetSeed2 { + t.Error("Offset seed should not be the same on different set of external labels") } } diff --git a/scrape/scrape.go b/scrape/scrape.go index a97cbf539f..8c4cc51e74 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -279,7 +279,7 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop" type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { targetScrapePools.Inc() if logger == nil { logger = log.NewNopLogger() @@ -325,7 +325,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, func(ctx context.Context) storage.Appender { return app.Appender(ctx) }, cache, - jitterSeed, + offsetSeed, opts.honorTimestamps, opts.sampleLimit, opts.bucketLimit, @@ -775,7 +775,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append type scraper interface { scrape(ctx context.Context, w io.Writer) (string, error) Report(start time.Time, dur time.Duration, err error) - offset(interval time.Duration, jitterSeed uint64) time.Duration + offset(interval time.Duration, offsetSeed uint64) time.Duration } // targetScraper implements the scraper interface for a target. @@ -891,7 +891,7 @@ type scrapeLoop struct { cache *scrapeCache lastScrapeSize int buffers *pool.Pool - jitterSeed uint64 + offsetSeed uint64 honorTimestamps bool forcedErr error forcedErrMtx sync.Mutex @@ -1175,7 +1175,7 @@ func newScrapeLoop(ctx context.Context, reportSampleMutator labelsMutator, appender func(ctx context.Context) storage.Appender, cache *scrapeCache, - jitterSeed uint64, + offsetSeed uint64, honorTimestamps bool, sampleLimit int, bucketLimit int, @@ -1217,7 +1217,7 @@ func newScrapeLoop(ctx context.Context, sampleMutator: sampleMutator, reportSampleMutator: reportSampleMutator, stopped: make(chan struct{}), - jitterSeed: jitterSeed, + offsetSeed: offsetSeed, l: l, parentCtx: ctx, appenderCtx: appenderCtx, @@ -1238,7 +1238,7 @@ func newScrapeLoop(ctx context.Context, func (sl *scrapeLoop) run(errc chan<- error) { select { - case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)): + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): // Continue after a scraping offset. case <-sl.ctx.Done(): close(sl.stopped) diff --git a/scrape/target_test.go b/scrape/target_test.go index 12d3b5a4d7..4f0c840cd0 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -59,7 +59,7 @@ func TestTargetLabels(t *testing.T) { func TestTargetOffset(t *testing.T) { interval := 10 * time.Second - jitter := uint64(0) + offsetSeed := uint64(0) offsets := make([]time.Duration, 10000) @@ -68,7 +68,7 @@ func TestTargetOffset(t *testing.T) { target := newTestTarget("example.com:80", 0, labels.FromStrings( "label", fmt.Sprintf("%d", i), )) - offsets[i] = target.offset(interval, jitter) + offsets[i] = target.offset(interval, offsetSeed) } // Put the offsets into buckets and validate that they are all From ce236c61ab2e0cb6a9d5ef6a2e2ad720746861a0 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 25 May 2023 13:47:34 +0200 Subject: [PATCH 22/93] Mark 2.45 as LTS As the 2.37 LTS is going EOL in July 2023, let's mark 2.45 as LTS. I have synced with Jesus about this. He will bootstrap the release and after a few week I will do the maintenance for the lifetime of the LTS. Signed-off-by: Julien Pivotto --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index d7f24dabd5..f5c907fe99 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -49,7 +49,7 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | +| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | | v2.46 | 2023-07-12 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 4c4454e4c9e12ad08e765335ba11199f296ad986 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 25 May 2023 13:12:32 -0700 Subject: [PATCH 23/93] Group args to append to memSeries in chunkOpts Signed-off-by: Justin Lei --- tsdb/head_append.go | 37 ++++++++++----- tsdb/head_test.go | 113 +++++++++++++++++++++++++++++++------------- tsdb/head_wal.go | 12 +++-- 3 files changed, 113 insertions(+), 49 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 44847dceb2..a77c8a4ebc 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -881,9 +881,13 @@ func (a *headAppender) Commit() (err error) { oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef oooRecords [][]byte oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - chunkRange = a.head.chunkRange.Load() series *memSeries - enc record.Encoder + appendChunkOpts = chunkOpts{ + chunkDiskMapper: a.head.chunkDiskMapper, + chunkRange: a.head.chunkRange.Load(), + samplesPerChunk: a.head.opts.SamplesPerChunk, + } + enc record.Encoder ) defer func() { for i := range oooRecords { @@ -987,7 +991,7 @@ func (a *headAppender) Commit() (err error) { samplesAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) if ok { if s.T < inOrderMint { inOrderMint = s.T @@ -1016,7 +1020,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() - ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1042,7 +1046,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() - ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1118,12 +1122,19 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk return ok, chunkCreated, mmapRef } +// chunkOpts are chunk-level options that are passed when appending to a memSeries. +type chunkOpts struct { + chunkDiskMapper *chunks.ChunkDiskMapper + chunkRange int64 + samplesPerChunk int +} + // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange, samplesPerChunk) +func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1144,7 +1155,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // appendHistogram adds the histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1157,7 +1168,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1193,7 +1204,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // - okToAppend and no inserts → Chunk is ready to support our histogram. switch { case !okToAppend || counterReset: - c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange) chunkCreated = true case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all @@ -1238,7 +1249,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // appendFloatHistogram adds the float histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1251,7 +1262,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1287,7 +1298,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // - okToAppend and no inserts → Chunk is ready to support our histogram. switch { case !okToAppend || counterReset: - c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange) chunkCreated = true case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 14468e0716..8eb218b5ac 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -283,10 +283,15 @@ func BenchmarkLoadWAL(b *testing.B) { if c.mmappedChunkT != 0 { chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) require.NoError(b, err) + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: c.mmappedChunkT, + samplesPerChunk: DefaultSamplesPerChunk, + } for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) - s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT, DefaultSamplesPerChunk) + s.append(c.mmappedChunkT, 42, 0, cOpts) s.mmapCurrentHeadChunk(chunkDiskMapper) } require.NoError(b, chunkDiskMapper.Close()) @@ -799,7 +804,11 @@ func TestMemSeries_truncateChunks(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 2000 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 2000, + samplesPerChunk: DefaultSamplesPerChunk, + } memChunkPool := sync.Pool{ New: func() interface{} { @@ -810,7 +819,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) for i := 0; i < 4000; i += 5 { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } @@ -1336,26 +1345,30 @@ func TestMemSeries_append(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 500 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 500, + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(998, 1, 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 2, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 3, 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1001, 4, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1368,7 +1381,7 @@ func TestMemSeries_append(t *testing.T) { // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // at approximately 120 samples per chunk. for i := 1; i < 1000; i++ { - ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(1001+int64(i), float64(i), 0, cOpts) require.True(t, ok, "append failed") } @@ -1390,7 +1403,11 @@ func TestMemSeries_appendHistogram(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - chunkRange := int64(1000) + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: int64(1000), + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) @@ -1404,19 +1421,19 @@ func TestMemSeries_appendHistogram(t *testing.T) { // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1426,7 +1443,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") @@ -1446,7 +1463,11 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { t.Cleanup(func() { require.NoError(t, chunkDiskMapper.Close()) }) - chunkRange := DefaultBlockDuration + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: DefaultBlockDuration, + samplesPerChunk: samplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) @@ -1456,7 +1477,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { var nextTs int64 var totalAppendedSamples int for i := 0; i < samplesPerChunk/4; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "slow sample %d was not appended", i) nextTs += slowRate totalAppendedSamples++ @@ -1465,12 +1486,12 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { // Suddenly, the rate increases and we receive a sample every millisecond. for i := 0; i < math.MaxUint16; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "quick sample %d was not appended", i) nextTs++ totalAppendedSamples++ } - ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, cOpts) require.True(t, ok, "new chunk sample was not appended") require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk") @@ -1490,23 +1511,29 @@ func TestGCChunkAccess(t *testing.T) { require.NoError(t, h.Close()) }() + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + h.initTime(0) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(0, 0, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 1000, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1999, 1999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1543,23 +1570,29 @@ func TestGCSeriesAccess(t *testing.T) { require.NoError(t, h.Close()) }() + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + h.initTime(0) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(0, 0, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 1000, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1999, 1999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1791,14 +1824,20 @@ func TestHeadReadWriterRepair(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.mmapChunkCorruptionTotal)) require.NoError(t, h.Init(math.MinInt64)) + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) require.True(t, created, "series was not created") for i := 0; i < 7; i++ { - ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunk was not created") - ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunk was created") h.chunkDiskMapper.CutNewFile() @@ -2144,9 +2183,15 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { h.initTime(0) + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: h.chunkRange.Load(), + samplesPerChunk: DefaultSamplesPerChunk, + } + s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) - ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load(), DefaultSamplesPerChunk) + ok, _ := s.append(0, 0, 0, cOpts) require.True(t, ok, "Series append failed.") require.Equal(t, 0, s.txs.txIDCount, "Series should not have an appendID after append with appendID=0.") } @@ -2608,12 +2653,16 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 500 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 500, + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) for i := 0; i < 7; i++ { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 9741d1da04..71120c55e1 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -564,7 +564,11 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp minValidTime := h.minValidTime.Load() mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) - chunkRange := h.chunkRange.Load() + appendChunkOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: h.chunkRange.Load(), + samplesPerChunk: h.opts.SamplesPerChunk, + } for in := range wp.input { if in.existingSeries != nil { @@ -588,7 +592,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk); chunkCreated { + if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } @@ -618,9 +622,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } var chunkCreated bool if s.h != nil { - _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) + _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) } else { - _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) + _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts) } if chunkCreated { h.metrics.chunksCreated.Inc() From e73d8b208408cf5566541a68c610776935f8789d Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 25 May 2023 13:35:09 -0700 Subject: [PATCH 24/93] Also pass chunkOpts into appendPreprocessor Signed-off-by: Justin Lei --- tsdb/head_append.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index a77c8a4ebc..3d828d0667 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1134,7 +1134,7 @@ type chunkOpts struct { // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1168,7 +1168,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1262,7 +1262,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1344,9 +1344,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // appendPreprocessor takes care of cutting new chunks and m-mapping old chunks. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. -func (s *memSeries) appendPreprocessor( - t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int, -) (c *memChunk, sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) { c = s.head() if c == nil { @@ -1355,7 +1353,7 @@ func (s *memSeries) appendPreprocessor( return c, false, false } // There is no head chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } @@ -1367,7 +1365,7 @@ func (s *memSeries) appendPreprocessor( if c.chunk.Encoding() != e { // The chunk encoding expected by this append is different than the head chunk's // encoding. So we cut a new chunk with the expected encoding. - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } @@ -1376,14 +1374,14 @@ func (s *memSeries) appendPreprocessor( // It could be the new chunk created after reading the chunk snapshot, // hence we fix the minTime of the chunk here. c.minTime = t - s.nextAt = rangeForTimestamp(c.minTime, chunkRange) + s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange) } // If we reach 25% of a chunk's desired sample count, predict an end time // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == samplesPerChunk/4 { + if numSamples == o.samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1391,8 +1389,8 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= samplesPerChunk*2 { - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 { + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } From a308ea773d124de39958e0b50282115c341dc247 Mon Sep 17 00:00:00 2001 From: marcoderama Date: Fri, 26 May 2023 16:39:55 -0700 Subject: [PATCH 25/93] Update functions.md Fix small typo Signed-off-by: marcoderama --- docs/querying/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c8831c0792..e1a0b4a769 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -359,7 +359,7 @@ increase(http_requests_total{job="api-server"}[5m]) ``` `increase` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the increase between +component (sum and count of observations, buckets) is the increase between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. From 4c27fa0c1d718894a1a584f75f18a881950c93a3 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Sun, 28 May 2023 15:00:48 +0200 Subject: [PATCH 26/93] Update OpenTelemetry dependencies Signed-off-by: Julien Pivotto --- go.mod | 42 ++++++++++++++++-------------- go.sum | 82 +++++++++++++++++++++++++++++++--------------------------- 2 files changed, 66 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index c902854388..998c173cda 100644 --- a/go.mod +++ b/go.mod @@ -49,27 +49,27 @@ require ( github.com/prometheus/exporter-toolkit v0.9.1 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.3 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 - go.opentelemetry.io/otel v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 - go.opentelemetry.io/otel/sdk v1.14.0 - go.opentelemetry.io/otel/trace v1.14.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 + go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.9.0 + golang.org/x/net v0.10.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.7.0 + golang.org/x/sys v0.8.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.8.0 google.golang.org/api v0.114.0 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 - google.golang.org/grpc v1.53.0 + google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e + google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -87,10 +87,12 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect ) require ( - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -101,8 +103,8 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -113,7 +115,7 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect @@ -125,7 +127,7 @@ require ( github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect @@ -168,13 +170,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect - go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.7.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 9ecc5c7275..545c85a9e1 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -109,8 +109,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -130,8 +130,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -216,8 +216,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -297,8 +297,9 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -737,8 +738,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -780,24 +782,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= -go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -914,8 +916,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1006,14 +1008,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1148,8 +1150,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e h1:AZX1ra8YbFMSb7+1pI8S9v4rrgRR7jU1FmuFSSjTVcQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1171,8 +1177,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 4341b98eb2f4dc3d41d5afa9726feef7ea35e3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Sun, 28 May 2023 19:55:00 +0200 Subject: [PATCH 27/93] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index b3d743e441..c55e5be1ef 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -131,7 +131,7 @@ func main() { checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", - "The rule files to check, default is read from standard input (STDIN).", + "The rule files to check, default is read from standard input.", ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", @@ -690,7 +690,7 @@ func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false - // add empty string to avoid matching filename + // Add empty string to avoid matching filename. if len(files) == 0 { files = append(files, "") } @@ -723,7 +723,7 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) { var rgs *rulefmt.RuleGroups var errs []error - // if filename is an empty string it is a stdin + // Empty string is stdin input. if filename == "" { data, err := io.ReadAll(os.Stdin) if err != nil { From 9cf1b4a2e635f6a959118fd5a19e3cba7ed54308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Sun, 28 May 2023 22:31:57 +0200 Subject: [PATCH 28/93] fix: update doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- docs/command-line/promtool.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 59c46dd79b..6784fb99fd 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -181,7 +181,7 @@ Check if the rule files are valid or not. | Argument | Description | | --- | --- | -| rule-files | The rule files to check, default is read from standard input (STDIN). | +| rule-files | The rule files to check, default is read from standard input. | From 6e7ac76981a53f6727da04f3a360494dc6696fb0 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Mon, 29 May 2023 16:26:11 +0800 Subject: [PATCH 29/93] fix problematic link (#12405) Signed-off-by: cui fliter --- web/ui/module/codemirror-promql/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/module/codemirror-promql/README.md b/web/ui/module/codemirror-promql/README.md index 441a81315b..627e4fe15a 100644 --- a/web/ui/module/codemirror-promql/README.md +++ b/web/ui/module/codemirror-promql/README.md @@ -1,7 +1,7 @@ CodeMirror-promql ================= -This project provides a mode for [CodeMirror Next](https://codemirror.net/6) that handles syntax highlighting, linting +This project provides a mode for [CodeMirror](https://codemirror.net/6/) that handles syntax highlighting, linting and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io/docs/introduction/overview/)). ![preview](https://user-images.githubusercontent.com/4548045/95660829-d5e4b680-0b2a-11eb-9ecb-41dca6396273.gif) @@ -15,7 +15,7 @@ npm install --save @prometheus-io/codemirror-promql ``` **Note:** You will have to manually install different packages that are part -of [CodeMirror Next](https://codemirror.net/6), as they are a peer dependency to this package. Here are the different +of [CodeMirror](https://codemirror.net/6/), as they are a peer dependency to this package. Here are the different packages you need to install: * **@codemirror/autocomplete** From 044e004a81157c681ca1c9512d1101bb05c6ff88 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 30 May 2023 09:08:00 +0200 Subject: [PATCH 30/93] Update exporter-toolkit Adds web config option `client_allowed_sans`. This enables Prometheus to limit the Subject Alternate Name (SAN) allowed to connect. Signed-off-by: SuperQ --- docs/configuration/https.md | 7 +++++++ go.mod | 4 ++-- go.sum | 8 ++++---- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/configuration/https.md b/docs/configuration/https.md index d31aca7b4b..bc83e07a38 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -44,6 +44,13 @@ tls_server_config: # CA certificate for client certificate authentication to the server. [ client_ca_file: ] + # Verify that the client certificate has a Subject Alternate Name (SAN) + # which is an exact match to an entry in this list, else terminate the + # connection. SAN match can be one or multiple of the following: DNS, + # IP, e-mail, or URI address from https://pkg.go.dev/crypto/x509#Certificate. + [ client_allowed_sans: + [ - ] ] + # Minimum TLS version that is acceptable. [ min_version: | default = "TLS12" ] diff --git a/go.mod b/go.mod index 998c173cda..15daecc2cf 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/prometheus/common v0.42.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.9.1 + github.com/prometheus/exporter-toolkit v0.10.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/stretchr/testify v1.8.3 @@ -173,7 +173,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.7.0 // indirect + golang.org/x/crypto v0.8.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/mod v0.10.0 // indirect golang.org/x/term v0.8.0 // indirect diff --git a/go.sum b/go.sum index 545c85a9e1..18d98dfaeb 100644 --- a/go.sum +++ b/go.sum @@ -671,8 +671,8 @@ github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/ github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= -github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= +github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= +github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -832,8 +832,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= From a16b876a05faef651f6fd4f3dd71a32a996ba367 Mon Sep 17 00:00:00 2001 From: Arianna Vespri <36129782+vesari@users.noreply.github.com> Date: Tue, 30 May 2023 10:22:23 +0200 Subject: [PATCH 31/93] Add limits to global config (#12126) * Add limits to global config Signed-off-by: Arianna Vespri * Move changes into Validate func Signed-off-by: Arianna Vespri * Make comments consistent wrt 0 meaning no limit Signed-off-by: Arianna Vespri * Document global limits Signed-off-by: Arianna Vespri --------- Signed-off-by: Arianna Vespri --- config/config.go | 61 ++++- config/config_test.go | 399 +++++++++++++++++++++------- config/testdata/conf.good.yml | 11 + docs/configuration/configuration.md | 33 +++ 4 files changed, 394 insertions(+), 110 deletions(-) diff --git a/config/config.go b/config/config.go index d0ba03ab29..9f81bbfd57 100644 --- a/config/config.go +++ b/config/config.go @@ -267,7 +267,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { for i, scfg := range c.ScrapeConfigs { // We do these checks for library users that would not call Validate in // Unmarshal. - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err } @@ -294,7 +294,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { return nil, fileErr(filename, err) } for _, scfg := range cfg.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, fileErr(filename, err) } @@ -343,7 +343,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return err } @@ -390,6 +390,24 @@ type GlobalConfig struct { QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. 0 means no limit. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -478,19 +496,19 @@ type ScrapeConfig struct { // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` // More than this many samples post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the - // scrapes to fail. + // scrapes to fail. 0 means no limit. TargetLimit uint `yaml:"target_limit,omitempty"` // More than this many labels post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. LabelLimit uint `yaml:"label_limit,omitempty"` // More than this label name length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` // More than this label value length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` // More than this many buckets in a native histogram will cause the scrape to // fail. @@ -552,25 +570,44 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { +func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c == nil { return errors.New("empty or null scrape config section") } // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. if c.ScrapeInterval == 0 { - c.ScrapeInterval = defaultInterval + c.ScrapeInterval = globalConfig.ScrapeInterval } if c.ScrapeTimeout > c.ScrapeInterval { return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) } if c.ScrapeTimeout == 0 { - if defaultTimeout > c.ScrapeInterval { + if globalConfig.ScrapeTimeout > c.ScrapeInterval { c.ScrapeTimeout = c.ScrapeInterval } else { - c.ScrapeTimeout = defaultTimeout + c.ScrapeTimeout = globalConfig.ScrapeTimeout } } + if c.BodySizeLimit == 0 { + c.BodySizeLimit = globalConfig.BodySizeLimit + } + if c.SampleLimit == 0 { + c.SampleLimit = globalConfig.SampleLimit + } + if c.TargetLimit == 0 { + c.TargetLimit = globalConfig.TargetLimit + } + if c.LabelLimit == 0 { + c.LabelLimit = globalConfig.LabelLimit + } + if c.LabelNameLengthLimit == 0 { + c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit + } + if c.LabelValueLengthLimit == 0 { + c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit + } + return nil } diff --git a/config/config_test.go b/config/config_test.go index bde09dfece..d243d687c4 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -68,6 +68,15 @@ func mustParseURL(u string) *config.URL { return &config.URL{URL: parsed} } +const ( + globBodySizeLimit = 15 * units.MiB + globSampleLimit = 1500 + globTargetLimit = 30 + globLabelLimit = 30 + globLabelNameLengthLimit = 200 + globLabelValueLengthLimit = 200 +) + var expectedConf = &Config{ GlobalConfig: GlobalConfig{ ScrapeInterval: model.Duration(15 * time.Second), @@ -76,6 +85,13 @@ var expectedConf = &Config{ QueryLogFile: "", ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), + + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, }, RuleFiles: []string{ @@ -165,10 +181,16 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -261,11 +283,15 @@ var expectedConf = &Config{ { JobName: "service-x", - HonorTimestamps: true, - ScrapeInterval: model.Duration(50 * time.Second), - ScrapeTimeout: model.Duration(5 * time.Second), - BodySizeLimit: 10 * units.MiB, - SampleLimit: 1000, + HonorTimestamps: true, + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + BodySizeLimit: 10 * units.MiB, + SampleLimit: 1000, + TargetLimit: 35, + LabelLimit: 35, + LabelNameLengthLimit: 210, + LabelValueLengthLimit: 210, HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -352,9 +378,15 @@ var expectedConf = &Config{ { JobName: "service-y", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -399,9 +431,15 @@ var expectedConf = &Config{ { JobName: "service-z", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/metrics", Scheme: "http", @@ -424,9 +462,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -455,9 +499,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes-namespaces", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -486,9 +536,15 @@ var expectedConf = &Config{ { JobName: "service-kuma", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -506,9 +562,15 @@ var expectedConf = &Config{ { JobName: "service-marathon", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -535,9 +597,15 @@ var expectedConf = &Config{ { JobName: "service-nomad", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -561,9 +629,15 @@ var expectedConf = &Config{ { JobName: "service-ec2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -594,9 +668,15 @@ var expectedConf = &Config{ { JobName: "service-lightsail", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -617,9 +697,15 @@ var expectedConf = &Config{ { JobName: "service-azure", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -643,9 +729,15 @@ var expectedConf = &Config{ { JobName: "service-nerve", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -662,9 +754,15 @@ var expectedConf = &Config{ { JobName: "0123service-xxx", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -684,9 +782,15 @@ var expectedConf = &Config{ { JobName: "badfederation", - HonorTimestamps: false, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: false, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -706,9 +810,15 @@ var expectedConf = &Config{ { JobName: "測試", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -728,9 +838,15 @@ var expectedConf = &Config{ { JobName: "httpsd", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -747,9 +863,15 @@ var expectedConf = &Config{ { JobName: "service-triton", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -774,9 +896,15 @@ var expectedConf = &Config{ { JobName: "digitalocean-droplets", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -800,9 +928,15 @@ var expectedConf = &Config{ { JobName: "docker", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -822,9 +956,15 @@ var expectedConf = &Config{ { JobName: "dockerswarm", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -844,9 +984,15 @@ var expectedConf = &Config{ { JobName: "service-openstack", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -870,9 +1016,15 @@ var expectedConf = &Config{ { JobName: "service-puppetdb", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -898,10 +1050,16 @@ var expectedConf = &Config{ }, }, { - JobName: "hetzner", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "hetzner", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -947,9 +1105,15 @@ var expectedConf = &Config{ { JobName: "service-eureka", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -966,9 +1130,16 @@ var expectedConf = &Config{ { JobName: "ovhcloud", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -995,9 +1166,16 @@ var expectedConf = &Config{ { JobName: "scaleway", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1030,9 +1208,15 @@ var expectedConf = &Config{ { JobName: "linode-instances", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1057,9 +1241,16 @@ var expectedConf = &Config{ { JobName: "uyuni", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1076,10 +1267,16 @@ var expectedConf = &Config{ }, }, { - JobName: "ionos", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "ionos", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1101,9 +1298,15 @@ var expectedConf = &Config{ { JobName: "vultr", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 388b9de32d..19cfe1eb5d 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -2,6 +2,12 @@ global: scrape_interval: 15s evaluation_interval: 30s + body_size_limit: 15MB + sample_limit: 1500 + target_limit: 30 + label_limit: 30 + label_name_length_limit: 200 + label_value_length_limit: 200 # scrape_timeout is set to the global default (10s). external_labels: @@ -111,6 +117,11 @@ scrape_configs: body_size_limit: 10MB sample_limit: 1000 + target_limit: 35 + label_limit: 35 + label_name_length_limit: 210 + label_value_length_limit: 210 + metrics_path: /my_path scheme: https diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b0b587e02a..ff1449e34a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -73,6 +73,39 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # An uncompressed response body larger than this many bytes will cause the + # scrape to fail. 0 means no limit. Example: 100MB. + # This is an experimental feature, this behaviour could + # change or be removed in the future. + [ body_size_limit: | default = 0 ] + + # Per-scrape limit on number of scraped samples that will be accepted. + # If more than this number of samples are present after metric relabeling + # the entire scrape will be treated as failed. 0 means no limit. + [ sample_limit: | default = 0 ] + + # Per-scrape limit on number of labels that will be accepted for a sample. If + # more than this number of labels are present post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_limit: | default = 0 ] + + # Per-scrape limit on length of labels name that will be accepted for a sample. + # If a label name is longer than this number post metric-relabeling, the entire + # scrape will be treated as failed. 0 means no limit. + [ label_name_length_limit: | default = 0 ] + + # Per-scrape limit on length of labels value that will be accepted for a sample. + # If a label value is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_value_length_limit: | default = 0 ] + + # Per-scrape config limit on number of unique targets that will be + # accepted. If more than this number of targets are present after target + # relabeling, Prometheus will mark the targets as failed without scraping them. + # 0 means no limit. This is an experimental feature, this behaviour could + # change in the future. + [ target_limit: | default = 0 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: From 73078bf73870be23142d3c6703d52b5da3279d17 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 30 May 2023 04:49:22 -0700 Subject: [PATCH 32/93] Opmizing Group Regex (#12375) Signed-off-by: Alan Protasio --- tsdb/querier.go | 9 ++++++++- tsdb/querier_bench_test.go | 4 ++++ tsdb/querier_test.go | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 9baf3f2429..72b6b51414 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -190,7 +190,14 @@ func findSetMatches(pattern string) []string { } escaped := false sets := []*strings.Builder{{}} - for i := 4; i < len(pattern)-2; i++ { + init := 4 + end := len(pattern) - 2 + // If the regex is wrapped in a group we can remove the first and last parentheses + if pattern[init] == '(' && pattern[end-1] == ')' { + init++ + end-- + } + for i := init; i < end; i++ { if escaped { switch { case isRegexMetaCharacter(pattern[i]): diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 19619e35bb..c6deaeb44c 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -113,6 +113,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+") iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]") iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)") + iNotAlternate := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "(1|2|3|4|5|6|20|55)") iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z") iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z") cases := []struct { @@ -132,6 +133,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { {`j=~"XXX|YYY"`, []*labels.Matcher{jXXXYYY}}, {`j=~"X.+"`, []*labels.Matcher{jXplus}}, {`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}}, + {`i!~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iNotAlternate}}, {`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}}, {`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}}, {`i=~".*"`, []*labels.Matcher{iStar}}, @@ -161,6 +163,8 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { for _, c := range cases { b.Run(c.name, func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() for i := 0; i < b.N; i++ { _, err := PostingsForMatchers(ir, c.matchers...) require.NoError(b, err) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 5bf721a620..e9dd3b75f4 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2051,6 +2051,12 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "(1|2.5)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "2"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")}, exp: []labels.Labels{ @@ -2112,6 +2118,13 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1", "i", "b"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(a|b)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "x1|2")}, exp: []labels.Labels{ @@ -2134,6 +2147,14 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2.5"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(c||d)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, } ir, err := h.Index() From dfae954dc1137568f33564e8cffda321f2867925 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 30 May 2023 08:42:38 -0700 Subject: [PATCH 33/93] Improving Performance on the API Gzip Handler (#12363) Using github.com/klauspost/compress package to replace the current Gzip Handler on the API. We see significant improvements using this handler over the current one as shown in the benchmark added. Also: * move selection of compression from `newCompressedResponseWriter` to `*CompressionHandler.ServeHTTP`. * renaming `compressedResponseWriter` since it now only does one kind of compression. Signed-off-by: Alan Protasio --- go.mod | 1 + go.sum | 1 + util/httputil/compression.go | 74 ++++++++----------- util/httputil/compression_test.go | 115 +++++++++++++++++++++++++++--- 4 files changed, 139 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index 15daecc2cf..9b826ad334 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/hetznercloud/hcloud-go v1.45.1 github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.13.6 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.16.1 github.com/miekg/dns v1.1.53 diff --git a/go.sum b/go.sum index 18d98dfaeb..3a472c99a0 100644 --- a/go.sum +++ b/go.sum @@ -499,6 +499,7 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= diff --git a/util/httputil/compression.go b/util/httputil/compression.go index b96c088cbb..5e9276958d 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -14,11 +14,11 @@ package httputil import ( - "compress/gzip" - "compress/zlib" - "io" "net/http" "strings" + + "github.com/klauspost/compress/gzhttp" + "github.com/klauspost/compress/zlib" ) const ( @@ -28,53 +28,27 @@ const ( deflateEncoding = "deflate" ) -// Wrapper around http.Handler which adds suitable response compression based -// on the client's Accept-Encoding headers. -type compressedResponseWriter struct { +// Wrapper around http.ResponseWriter which adds deflate compression +type deflatedResponseWriter struct { http.ResponseWriter - writer io.Writer + writer *zlib.Writer } // Writes HTTP response content data. -func (c *compressedResponseWriter) Write(p []byte) (int, error) { +func (c *deflatedResponseWriter) Write(p []byte) (int, error) { return c.writer.Write(p) } -// Closes the compressedResponseWriter and ensures to flush all data before. -func (c *compressedResponseWriter) Close() { - if zlibWriter, ok := c.writer.(*zlib.Writer); ok { - zlibWriter.Flush() - } - if gzipWriter, ok := c.writer.(*gzip.Writer); ok { - gzipWriter.Flush() - } - if closer, ok := c.writer.(io.Closer); ok { - defer closer.Close() - } +// Close Closes the deflatedResponseWriter and ensures to flush all data before. +func (c *deflatedResponseWriter) Close() { + c.writer.Close() } -// Constructs a new compressedResponseWriter based on client request headers. -func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { - switch strings.TrimSpace(encoding) { - case gzipEncoding: - writer.Header().Set(contentEncodingHeader, gzipEncoding) - return &compressedResponseWriter{ - ResponseWriter: writer, - writer: gzip.NewWriter(writer), - } - case deflateEncoding: - writer.Header().Set(contentEncodingHeader, deflateEncoding) - return &compressedResponseWriter{ - ResponseWriter: writer, - writer: zlib.NewWriter(writer), - } - } - } - return &compressedResponseWriter{ +// Constructs a new deflatedResponseWriter to compress the original writer using 'deflate' compression. +func newDeflateResponseWriter(writer http.ResponseWriter) *deflatedResponseWriter { + return &deflatedResponseWriter{ ResponseWriter: writer, - writer: writer, + writer: zlib.NewWriter(writer), } } @@ -86,7 +60,21 @@ type CompressionHandler struct { // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { - compWriter := newCompressedResponseWriter(writer, req) - c.Handler.ServeHTTP(compWriter, req) - compWriter.Close() + encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") + for _, encoding := range encodings { + switch strings.TrimSpace(encoding) { + case gzipEncoding: + gzhttp.GzipHandler(c.Handler).ServeHTTP(writer, req) + return + case deflateEncoding: + compWriter := newDeflateResponseWriter(writer) + writer.Header().Set(contentEncodingHeader, deflateEncoding) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() + return + default: + c.Handler.ServeHTTP(writer, req) + return + } + } } diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index 8512797613..b7148fc1cc 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -17,23 +17,30 @@ import ( "bytes" "compress/gzip" "compress/zlib" + "encoding/json" + "fmt" "io" "net/http" "net/http/httptest" + "strings" "testing" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" ) var ( - mux *http.ServeMux - server *httptest.Server + mux *http.ServeMux + server *httptest.Server + respBody = strings.Repeat("Hello World!", 500) ) func setup() func() { mux = http.NewServeMux() server = httptest.NewServer(mux) return func() { + server.CloseClientConnections() server.Close() } } @@ -41,7 +48,7 @@ func setup() func() { func getCompressionHandlerFunc() CompressionHandler { hf := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("Hello World!")) + w.Write([]byte(respBody)) } return CompressionHandler{ Handler: http.HandlerFunc(hf), @@ -67,9 +74,8 @@ func TestCompressionHandler_PlainText(t *testing.T) { contents, err := io.ReadAll(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") - expected := "Hello World!" actual := string(contents) - require.Equal(t, expected, actual, "expected response with content") + require.Equal(t, respBody, actual, "expected response with content") } func TestCompressionHandler_Gzip(t *testing.T) { @@ -103,8 +109,7 @@ func TestCompressionHandler_Gzip(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - expected := "Hello World!" - require.Equal(t, expected, actual, "unexpected response content") + require.Equal(t, respBody, actual, "unexpected response content") } func TestCompressionHandler_Deflate(t *testing.T) { @@ -138,6 +143,98 @@ func TestCompressionHandler_Deflate(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - expected := "Hello World!" - require.Equal(t, expected, actual, "expected response with content") + require.Equal(t, respBody, actual, "expected response with content") +} + +func Benchmark_compression(b *testing.B) { + client := &http.Client{ + Transport: &http.Transport{ + DisableCompression: true, + }, + } + + cases := map[string]struct { + enc string + numberOfLabels int + }{ + "gzip-10-labels": { + enc: gzipEncoding, + numberOfLabels: 10, + }, + "gzip-100-labels": { + enc: gzipEncoding, + numberOfLabels: 100, + }, + "gzip-1K-labels": { + enc: gzipEncoding, + numberOfLabels: 1000, + }, + "gzip-10K-labels": { + enc: gzipEncoding, + numberOfLabels: 10000, + }, + "gzip-100K-labels": { + enc: gzipEncoding, + numberOfLabels: 100000, + }, + "gzip-1M-labels": { + enc: gzipEncoding, + numberOfLabels: 1000000, + }, + } + + for name, tc := range cases { + b.Run(name, func(b *testing.B) { + tearDown := setup() + defer tearDown() + labels := labels.ScratchBuilder{} + + for i := 0; i < tc.numberOfLabels; i++ { + labels.Add(fmt.Sprintf("Name%v", i), fmt.Sprintf("Value%v", i)) + } + + respBody, err := json.Marshal(labels.Labels()) + require.NoError(b, err) + + hf := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(respBody) + } + h := CompressionHandler{ + Handler: http.HandlerFunc(hf), + } + + mux.Handle("/foo_endpoint", h) + + req, _ := http.NewRequest("GET", server.URL+"/foo_endpoint", nil) + req.Header.Set(acceptEncodingHeader, tc.enc) + + b.ReportAllocs() + b.ResetTimer() + + // Reusing the array to read the body and avoid allocation on the test + encRespBody := make([]byte, len(respBody)) + + for i := 0; i < b.N; i++ { + resp, err := client.Do(req) + + require.NoError(b, err) + + require.NoError(b, err, "client get failed with unexpected error") + responseBodySize := 0 + for { + n, err := resp.Body.Read(encRespBody) + responseBodySize += n + if err == io.EOF { + break + } + } + + b.ReportMetric(float64(responseBodySize), "ContentLength") + resp.Body.Close() + } + + client.CloseIdleConnections() + }) + } } From ca6580828aeef078370116b86ff3fbe9759b75ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 31 May 2023 15:17:44 +0200 Subject: [PATCH 34/93] feat: support histogram and summary metric types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- util/fmtutil/format.go | 171 +++++++++++++++++++++++++----------- util/fmtutil/format_test.go | 140 ++++++++++++++++++++++++++++- 2 files changed, 256 insertions(+), 55 deletions(-) diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index b5bb9469ce..291308dc23 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -14,6 +14,7 @@ package fmtutil import ( + "errors" "fmt" "io" "sort" @@ -26,6 +27,12 @@ import ( "github.com/prometheus/prometheus/prompb" ) +const ( + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" +) + var MetricMetadataTypeValue = map[string]int32{ "UNKNOWN": 0, "COUNTER": 1, @@ -60,71 +67,129 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]strin wr.Metadata = append(wr.Metadata, metadata) for _, metric := range mf[metricName].Metric { - var timeserie prompb.TimeSeries - - // build labels map - labels := make(map[string]string, len(metric.Label)+len(extraLabels)) - labels[model.MetricNameLabel] = metricName - - // add extra labels - for key, value := range extraLabels { - labels[key] = value - } - - // add metric labels - for _, label := range metric.Label { - labelname := label.GetName() - if labelname == model.JobLabel { - labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) - } - labels[labelname] = label.GetValue() - } - - // build labels name list - sortedLabelNames := make([]string, 0, len(labels)) - for label := range labels { - sortedLabelNames = append(sortedLabelNames, label) + labels := makeLabelsMap(metric, metricName, extraLabels) + if err := makeTimeseries(wr, labels, metric); err != nil { + return wr, err } - // sort labels name in lexicographical order - sort.Strings(sortedLabelNames) - - for _, label := range sortedLabelNames { - timeserie.Labels = append(timeserie.Labels, prompb.Label{ - Name: label, - Value: labels[label], - }) - } - - timestamp := metric.GetTimestampMs() - if timestamp == 0 { - timestamp = time.Now().UnixNano() / int64(time.Millisecond) - } - - timeserie.Samples = []prompb.Sample{ - { - Timestamp: timestamp, - Value: getMetricsValue(metric), - }, - } - - wr.Timeseries = append(wr.Timeseries, timeserie) } } return wr, nil } -// getMetricsValue return the value of a timeserie without the need to give value type -func getMetricsValue(m *dto.Metric) float64 { +func makeTimeserie(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { + var timeserie prompb.TimeSeries + timeserie.Labels = makeLabels(labels) + timeserie.Samples = []prompb.Sample{ + { + Timestamp: timestamp, + Value: value, + }, + } + wr.Timeseries = append(wr.Timeseries, timeserie) +} + +func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Metric) error { + var err error + + timestamp := m.GetTimestampMs() + if timestamp == 0 { + timestamp = time.Now().UnixNano() / int64(time.Millisecond) + } + switch { case m.Gauge != nil: - return m.GetGauge().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetGauge().GetValue()) case m.Counter != nil: - return m.GetCounter().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetCounter().GetValue()) + case m.Summary != nil: + metricName := labels[model.MetricNameLabel] + // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie + // Add Summary quantile timeseries + quantileLabels := make(map[string]string, len(labels)+1) + for key, value := range labels { + quantileLabels[key] = value + } + + for _, q := range m.GetSummary().Quantile { + quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile()) + makeTimeserie(wr, quantileLabels, timestamp, q.GetValue()) + } + // Overwrite label model.MetricNameLabel for count and sum metrics + // Add Summary sum timeserie + labels[model.MetricNameLabel] = metricName + sumStr + makeTimeserie(wr, labels, timestamp, m.GetSummary().GetSampleSum()) + // Add Summary count timeserie + labels[model.MetricNameLabel] = metricName + countStr + makeTimeserie(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) + + case m.Histogram != nil: + metricName := labels[model.MetricNameLabel] + // Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie + // Add Histogram bucket timeseries + bucketLabels := make(map[string]string, len(labels)+1) + for key, value := range labels { + bucketLabels[key] = value + } + for _, b := range m.GetHistogram().Bucket { + bucketLabels[model.MetricNameLabel] = metricName + bucketStr + bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound()) + makeTimeserie(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) + } + // Overwrite label model.MetricNameLabel for count and sum metrics + // Add Histogram sum timeserie + labels[model.MetricNameLabel] = metricName + sumStr + makeTimeserie(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) + // Add Histogram count timeserie + labels[model.MetricNameLabel] = metricName + countStr + makeTimeserie(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) + case m.Untyped != nil: - return m.GetUntyped().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetUntyped().GetValue()) default: - return 0. + err = errors.New("unsupported metric type") + } + return err +} + +func makeLabels(labelsMap map[string]string) []prompb.Label { + // build labels name list + sortedLabelNames := make([]string, 0, len(labelsMap)) + for label := range labelsMap { + sortedLabelNames = append(sortedLabelNames, label) } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + var labels []prompb.Label + for _, label := range sortedLabelNames { + labels = append(labels, prompb.Label{ + Name: label, + Value: labelsMap[label], + }) + } + return labels +} + +func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]string) map[string]string { + // build labels map + labels := make(map[string]string, len(m.Label)+len(extraLabels)) + labels[model.MetricNameLabel] = metricName + + // add extra labels + for key, value := range extraLabels { + labels[key] = value + } + + // add metric labels + for _, label := range m.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + return labels } // ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 9deed2de90..e2ac301353 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -24,13 +24,130 @@ import ( var writeRequestFixture = &prompb.WriteRequest{ Metadata: []prompb.MetricMetadata{ + { + MetricFamilyName: "http_request_duration_seconds", + Type: 3, + Help: "A histogram of the request duration.", + }, + { + MetricFamilyName: "http_requests_total", + Type: 1, + Help: "The total number of HTTP requests.", + }, + { + MetricFamilyName: "rpc_duration_seconds", + Type: 5, + Help: "A summary of the RPC duration in seconds.", + }, { MetricFamilyName: "test_metric1", Type: 2, - Help: "this is a test metric", + Help: "This is a test metric.", }, }, Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "0.1"}, + }, + Samples: []prompb.Sample{{Value: 33444, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "0.5"}, + }, + Samples: []prompb.Sample{{Value: 129389, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "1"}, + }, + Samples: []prompb.Sample{{Value: 133988, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "+Inf"}, + }, + Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_sum"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 53423, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_count"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_requests_total"}, + {Name: "code", Value: "200"}, + {Name: "job", Value: "promtool"}, + {Name: "method", Value: "post"}, + }, + Samples: []prompb.Sample{{Value: 1027, Timestamp: 1395066363000}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_requests_total"}, + {Name: "code", Value: "400"}, + {Name: "job", Value: "promtool"}, + {Name: "method", Value: "post"}, + }, + Samples: []prompb.Sample{{Value: 3, Timestamp: 1395066363000}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.01"}, + }, + Samples: []prompb.Sample{{Value: 3102, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.5"}, + }, + Samples: []prompb.Sample{{Value: 4773, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{{Value: 76656, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds_sum"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 1.7560473e+07, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds_count"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 2693, Timestamp: 1}}, + }, { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, @@ -58,7 +175,26 @@ var writeRequestFixture = &prompb.WriteRequest{ func TestParseMetricsTextAndFormat(t *testing.T) { input := bytes.NewReader([]byte(` - # HELP test_metric1 this is a test metric + # HELP http_request_duration_seconds A histogram of the request duration. + # TYPE http_request_duration_seconds histogram + http_request_duration_seconds_bucket{le="0.1"} 33444 1 + http_request_duration_seconds_bucket{le="0.5"} 129389 1 + http_request_duration_seconds_bucket{le="1"} 133988 1 + http_request_duration_seconds_bucket{le="+Inf"} 144320 1 + http_request_duration_seconds_sum 53423 1 + http_request_duration_seconds_count 144320 1 + # HELP http_requests_total The total number of HTTP requests. + # TYPE http_requests_total counter + http_requests_total{method="post",code="200"} 1027 1395066363000 + http_requests_total{method="post",code="400"} 3 1395066363000 + # HELP rpc_duration_seconds A summary of the RPC duration in seconds. + # TYPE rpc_duration_seconds summary + rpc_duration_seconds{quantile="0.01"} 3102 1 + rpc_duration_seconds{quantile="0.5"} 4773 1 + rpc_duration_seconds{quantile="0.99"} 76656 1 + rpc_duration_seconds_sum 1.7560473e+07 1 + rpc_duration_seconds_count 2693 1 + # HELP test_metric1 This is a test metric. # TYPE test_metric1 gauge test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 From 6ae4a46845295e88c353de0dd42caa2b0fe1b46f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 1 Jun 2023 10:28:55 +0200 Subject: [PATCH 35/93] feat: enhance stdin check and add tests parsing error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 2 +- cmd/promtool/metrics.go | 88 +++++++++++++++++------------------ docs/command-line/promtool.md | 2 +- util/fmtutil/format_test.go | 27 ++++++++++- 4 files changed, 70 insertions(+), 49 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c76790e13b..bcfcc24223 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -185,7 +185,7 @@ func main() { pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL) metricFiles := pushMetricsCmd.Arg( "metric-files", - "The metric files to push, default is read from standard input (STDIN).", + "The metric files to push, default is read from standard input.", ).ExistingFiles() pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 8abe32cf41..4a6fafd407 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -33,8 +33,6 @@ import ( // Push metrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { - failed := false - addressURL, err := url.Parse(url.String()) if err != nil { fmt.Fprintln(os.Stderr, err) @@ -63,63 +61,37 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin headers: headers, } - // add empty string to avoid matching filename - if len(files) == 0 { - files = append(files, "") - } + var data []byte + var failed bool - for _, file := range files { - var data []byte - var err error - - // if file is an empty string it is a stdin - if file == "" { - data, err = io.ReadAll(os.Stdin) - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - break - } - - fmt.Printf("Parsing input from stdin\n") - } else { - data, err = os.ReadFile(file) - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - continue - } - - fmt.Printf("Parsing input from metric file %s\n", file) - } - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + if len(files) == 0 { + data, err = io.ReadAll(os.Stdin) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - continue + return failureExitCode } - - raw, err := metricsData.Marshal() - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - continue + fmt.Printf("Parsing standard input\n") + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics pushed to remote write.\n") + return successExitCode } + return failureExitCode + } - // Encode the request body into snappy encoding. - compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed) + for _, file := range files { + data, err = os.ReadFile(file) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - if file == "" { - fmt.Printf(" SUCCESS: metric pushed to remote write.\n") - } else { - fmt.Printf(" SUCCESS: metric file %s pushed to remote write.\n", file) + fmt.Printf("Parsing metrics file %s\n", file) + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file) + continue } + failed = true } if failed { @@ -129,6 +101,30 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin return successExitCode } +func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + raw, err := metricsData.Marshal() + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + // Encode the request body into snappy encoding. + compressed := snappy.Encode(nil, raw) + err = client.Store(context.Background(), compressed) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + return true +} + type setHeadersTransport struct { http.RoundTripper headers map[string]string diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index c78900b991..c36caaf616 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -410,7 +410,7 @@ Push metrics to a prometheus remote write (for testing purpose only). | Argument | Description | Required | | --- | --- | --- | | remote-write-url | Prometheus remote write url to push metrics. | Yes | -| metric-files | The metric files to push, default is read from standard input (STDIN). | | +| metric-files | The metric files to push, default is read from standard input. | | diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index e2ac301353..5c1ab5bde0 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -173,7 +173,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, } -func TestParseMetricsTextAndFormat(t *testing.T) { +func TestParseAndPushMetricsTextAndFormat(t *testing.T) { input := bytes.NewReader([]byte(` # HELP http_request_duration_seconds A histogram of the request duration. # TYPE http_request_duration_seconds histogram @@ -206,3 +206,28 @@ func TestParseMetricsTextAndFormat(t *testing.T) { require.Equal(t, writeRequestFixture, expected) } + +func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP http_requests_total The total number of HTTP requests. + # TYPE http_requests_total counter + http_requests_total{method="post",code="200"} 1027Error 1395066363000 + http_requests_total{method="post",code="400"} 3 1395066363000 + `)) + labels := map[string]string{"job": "promtool"} + + _, err := ParseMetricsTextAndFormat(input, labels) + require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") +} + +func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP node_info node info summary. + # TYPE node_info info + node_info{test="summary"} 1 1395066363000 + `)) + labels := map[string]string{"job": "promtool"} + + _, err := ParseMetricsTextAndFormat(input, labels) + require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") +} From a8772a41782758e8153c494dcd207e770d8421a4 Mon Sep 17 00:00:00 2001 From: Nidhey Nitin Indurkar <46122307+nidhey27@users.noreply.github.com> Date: Thu, 1 Jun 2023 17:13:09 +0530 Subject: [PATCH 36/93] Feat: Get block by id directly on promtool analyze & get latest block if ID not provided (#12031) * feat: analyze latest block or block by ID in CLI (promtool) Signed-off-by: nidhey27 * address remarks Signed-off-by: nidhey60@gmail.com * address latest review comments Signed-off-by: nidhey60@gmail.com --------- Signed-off-by: nidhey27 Signed-off-by: nidhey60@gmail.com --- cmd/promtool/tsdb.go | 28 +++++++++-------------- tsdb/db.go | 54 ++++++++++++++++++++++++++++++++++++++++++++ tsdb/db_test.go | 19 +++++++++++++++- 3 files changed, 83 insertions(+), 18 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 84aa43a9c4..b7fad5fe09 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -398,26 +398,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) if err != nil { return nil, nil, err } - blocks, err := db.Blocks() - if err != nil { - return nil, nil, err - } - var block tsdb.BlockReader - switch { - case blockID != "": - for _, b := range blocks { - if b.Meta().ULID.String() == blockID { - block = b - break - } + + if blockID == "" { + blockID, err = db.LastBlockID() + if err != nil { + return nil, nil, err } - case len(blocks) > 0: - block = blocks[len(blocks)-1] } - if block == nil { - return nil, nil, fmt.Errorf("block %s not found", blockID) + + b, err := db.Block(blockID) + if err != nil { + return nil, nil, err } - return db, block, nil + + return db, b, nil } func analyzeBlock(path, blockID string, limit int, runExtended bool) error { diff --git a/tsdb/db.go b/tsdb/db.go index 12974150be..32dae57a52 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -607,6 +607,60 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return blockReaders, nil } +// LastBlockID returns the BlockID of latest block. +func (db *DBReadOnly) LastBlockID() (string, error) { + entries, err := os.ReadDir(db.dir) + if err != nil { + return "", err + } + + max := uint64(0) + + lastBlockID := "" + + for _, e := range entries { + // Check if dir is a block dir or not. + dirName := e.Name() + ulidObj, err := ulid.ParseStrict(dirName) + if err != nil { + continue // Not a block dir. + } + timestamp := ulidObj.Time() + if timestamp > max { + max = timestamp + lastBlockID = dirName + } + } + + if lastBlockID == "" { + return "", errors.New("no blocks found") + } + + return lastBlockID, nil +} + +// Block returns a block reader by given block id. +func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { + select { + case <-db.closed: + return nil, ErrClosed + default: + } + + _, err := os.Stat(filepath.Join(db.dir, blockID)) + if os.IsNotExist(err) { + return nil, errors.Errorf("invalid block ID %s", blockID) + } + + block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) + if err != nil { + return nil, err + } + db.closers = append(db.closers, block) + + return block, nil +} + // Close all block readers. func (db *DBReadOnly) Close() error { select { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 0b980f6ec6..427a3b7afa 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2384,6 +2384,7 @@ func TestDBReadOnly(t *testing.T) { dbDir string logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) expBlocks []*Block + expBlock *Block expSeries map[string][]tsdbutil.Sample expChunks map[string][][]tsdbutil.Sample expDBHash []byte @@ -2427,6 +2428,7 @@ func TestDBReadOnly(t *testing.T) { require.NoError(t, app.Commit()) expBlocks = dbWritable.Blocks() + expBlock = expBlocks[0] expDbSize, err := fileutil.DirSize(dbWritable.Dir()) require.NoError(t, err) require.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append") @@ -2455,7 +2457,22 @@ func TestDBReadOnly(t *testing.T) { require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch") } }) - + t.Run("block", func(t *testing.T) { + blockID := expBlock.meta.ULID.String() + block, err := dbReadOnly.Block(blockID) + require.NoError(t, err) + require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch") + }) + t.Run("invalid block ID", func(t *testing.T) { + blockID := "01GTDVZZF52NSWB5SXQF0P2PGF" + _, err := dbReadOnly.Block(blockID) + require.Error(t, err) + }) + t.Run("last block ID", func(t *testing.T) { + blockID, err := dbReadOnly.LastBlockID() + require.NoError(t, err) + require.Equal(t, expBlocks[2].Meta().ULID.String(), blockID) + }) t.Run("querier", func(t *testing.T) { // Open a read only db and ensure that the API returns the same result as the normal DB. q, err := dbReadOnly.Querier(context.TODO(), math.MinInt64, math.MaxInt64) From f676d4a756f6833ba2694b2c6600259ff3c62533 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 1 Jun 2023 17:39:04 +0200 Subject: [PATCH 37/93] feat refactoring checkrules func MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 88 ++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c55e5be1ef..e29d6f1602 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -448,20 +448,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files } fmt.Println() - for _, rf := range ruleFiles { - if n, errs := checkRules(rf, lintSettings); len(errs) > 0 { - fmt.Fprintln(os.Stderr, " FAILED:") - for _, err := range errs { - fmt.Fprintln(os.Stderr, " ", err) - } - failed = true - for _, err := range errs { - hasErrors = hasErrors || !errors.Is(err, lintError) - } - } else { - fmt.Printf(" SUCCESS: %d rules found\n", n) - } - fmt.Println() + rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings) + if rulesFailed { + failed = rulesFailed + } + if rulesHasErrors { + hasErrors = rulesHasErrors } } if failed && hasErrors { @@ -689,14 +681,19 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false - - // Add empty string to avoid matching filename. if len(files) == 0 { - files = append(files, "") - } - - for _, f := range files { - if n, errs := checkRules(f, ls); errs != nil { + fmt.Println("Checking standard input") + data, err := io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return failureExitCode + } + rgs, errs := rulefmt.Parse(data) + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + return failureExitCode + } + if n, errs := checkRuleGroups(rgs, ls); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -709,38 +706,49 @@ func CheckRules(ls lintConfig, files ...string) int { fmt.Printf(" SUCCESS: %d rules found\n", n) } fmt.Println() + } else { + failed, hasErrors = checkRules(files, ls) } + if failed && hasErrors { return failureExitCode } if failed && ls.fatal { return lintErrExitCode } + return successExitCode } -func checkRules(filename string, lintSettings lintConfig) (int, []error) { - var rgs *rulefmt.RuleGroups - var errs []error - - // Empty string is stdin input. - if filename == "" { - data, err := io.ReadAll(os.Stdin) - if err != nil { - errs = append(errs, err) - return failureExitCode, errs +// checkRules validates rule files. +func checkRules(files []string, ls lintConfig) (bool, bool) { + failed := false + hasErrors := false + for _, f := range files { + fmt.Println("Checking", f) + rgs, errs := rulefmt.ParseFile(f) + if errs != nil { + failed = true + continue } - fmt.Println("Checking stdin") - rgs, errs = rulefmt.Parse(data) - } else { - fmt.Println("Checking", filename) - rgs, errs = rulefmt.ParseFile(filename) - } - - if errs != nil { - return successExitCode, errs + if n, errs := checkRuleGroups(rgs, ls); errs != nil { + fmt.Fprintln(os.Stderr, " FAILED:") + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + } + failed = true + for _, err := range errs { + hasErrors = hasErrors || !errors.Is(err, lintError) + } + } else { + fmt.Printf(" SUCCESS: %d rules found\n", n) + } + fmt.Println() } + return failed, hasErrors +} +func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) { numRules := 0 for _, rg := range rgs.Groups { numRules += len(rg.Rules) From 1f3821379c23ab5855df88ad33f5bb5ecfd68a03 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 17:17:04 +0000 Subject: [PATCH 38/93] promql: refactor: extract fn to wait on concurrency limit Signed-off-by: Bryan Boreham --- promql/engine.go | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8a64fdf394..8730466f75 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -589,18 +589,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) - // Log query in active log. The active log guarantees that we don't run over - // MaxConcurrent queries. - if ng.activeQueryTracker != nil { - queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) - if err != nil { - queueSpanTimer.Finish() - return nil, nil, contextErr(err, "query queue") - } - defer ng.activeQueryTracker.Delete(queryIndex) + if finish, err := ng.queueActive(ctx, q); err != nil { + return nil, nil, err + } else { + defer finish() } - queueSpanTimer.Finish() // Cancel when execution is done or an error was raised. defer q.cancel() @@ -623,6 +616,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) } +// Log query in active log. The active log guarantees that we don't run over +// MaxConcurrent queries. +func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) { + if ng.activeQueryTracker == nil { + return func() {}, nil + } + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + queueSpanTimer.Finish() + return func() { ng.activeQueryTracker.Delete(queryIndex) }, err +} + func timeMilliseconds(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) } From 71fc4f1516b8264606daca7bf7c0e011b7f91f2e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 17:54:17 +0000 Subject: [PATCH 39/93] promql: refactor: create query object before parsing Signed-off-by: Bryan Boreham --- promql/engine.go | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8730466f75..81fb3c9168 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -409,15 +409,15 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // NewInstantQuery returns an evaluation query for the given expression at the given time. func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } - qry, err := ng.newQuery(q, opts, expr, ts, ts, 0) - if err != nil { + if err := ng.validateOpts(expr); err != nil { return nil, err } - qry.q = qs + *pExpr = PreprocessExpr(expr, ts, ts) return qry, nil } @@ -425,27 +425,23 @@ func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts * // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + if err := ng.validateOpts(expr); err != nil { + return nil, err + } if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry, err := ng.newQuery(q, opts, expr, start, end, interval) - if err != nil { - return nil, err - } - qry.q = qs + *pExpr = PreprocessExpr(expr, start, end) return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { - if err := ng.validateOpts(expr); err != nil { - return nil, err - } - +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { // Default to empty QueryOpts if not provided. if opts == nil { opts = &QueryOpts{} @@ -457,20 +453,20 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp } es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), Start: start, End: end, Interval: interval, LookbackDelta: lookbackDelta, } qry := &query{ + q: qs, stmt: es, ng: ng, stats: stats.NewQueryTimers(), sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), queryable: q, } - return qry, nil + return &es.Expr, qry } var ( From bb0d8320ddc3b8f8d4888524a7f631f904a5ec6f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 18:16:05 +0000 Subject: [PATCH 40/93] promql: include parsing in active-query tracking So that the max-concurrency limit is applied. Signed-off-by: Bryan Boreham --- promql/engine.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 81fb3c9168..085a7d23c4 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -408,8 +408,13 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + if finish, err := ng.queueActive(ctx, qry); err != nil { + return nil, err + } else { + defer finish() + } expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -424,8 +429,13 @@ func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts * // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) + if finish, err := ng.queueActive(ctx, qry); err != nil { + return nil, err + } else { + defer finish() + } expr, err := parser.ParseExpr(qs) if err != nil { return nil, err From 67d2ef004d84427626fadb6e62b420b80712a7f9 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 18:36:34 +0000 Subject: [PATCH 41/93] Placate lint I think the version using scoping was better, but I'm out of energy to fight the linter. Signed-off-by: Bryan Boreham --- promql/engine.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 085a7d23c4..f29db3a647 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -410,11 +410,11 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // NewInstantQuery returns an evaluation query for the given expression at the given time. func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) - if finish, err := ng.queueActive(ctx, qry); err != nil { + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { return nil, err - } else { - defer finish() } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -431,11 +431,11 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // the resolution set by the interval. func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) - if finish, err := ng.queueActive(ctx, qry); err != nil { + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { return nil, err - } else { - defer finish() } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -595,11 +595,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - if finish, err := ng.queueActive(ctx, q); err != nil { + finishQueue, err := ng.queueActive(ctx, q) + if err != nil { return nil, nil, err - } else { - defer finish() } + defer finishQueue() // Cancel when execution is done or an error was raised. defer q.cancel() From b1675e23af84e6b156da0c17b5b12736775896b1 Mon Sep 17 00:00:00 2001 From: rakshith210 <56937390+rakshith210@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:20:10 -0700 Subject: [PATCH 42/93] Add Azure AD package for remote write (#11944) * Add Azure AD package for remote write * Made AzurePublic default and updated configuration.md * Updated config structure and removed getToken at initialization * Changed passing context from request Signed-off-by: Rakshith Padmanabha Signed-off-by: rakshith210 --- config/config.go | 12 +- config/config_test.go | 2 +- docs/configuration/configuration.md | 14 +- go.mod | 9 +- go.sum | 12 + storage/remote/azuread/README.md | 8 + storage/remote/azuread/azuread.go | 247 +++++++++++++++++ storage/remote/azuread/azuread_test.go | 252 ++++++++++++++++++ .../testdata/azuread_bad_clientidmissing.yaml | 1 + .../testdata/azuread_bad_invalidclientid.yaml | 3 + .../remote/azuread/testdata/azuread_good.yaml | 3 + .../testdata/azuread_good_cloudmissing.yaml | 2 + storage/remote/client.go | 9 + storage/remote/write.go | 1 + 14 files changed, 568 insertions(+), 7 deletions(-) create mode 100644 storage/remote/azuread/README.md create mode 100644 storage/remote/azuread/azuread.go create mode 100644 storage/remote/azuread/azuread_test.go create mode 100644 storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml create mode 100644 storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml create mode 100644 storage/remote/azuread/testdata/azuread_good.yaml create mode 100644 storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml diff --git a/config/config.go b/config/config.go index 9f81bbfd57..d32fcc33c9 100644 --- a/config/config.go +++ b/config/config.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage/remote/azuread" ) var ( @@ -907,6 +908,7 @@ type RemoteWriteConfig struct { QueueConfig QueueConfig `yaml:"queue_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` + AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -943,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil - if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + } + + if c.SigV4Config != nil && c.AzureADConfig != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") } return nil @@ -965,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/config/config_test.go b/config/config_test.go index d243d687c4..d3288cc90d 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1727,7 +1727,7 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, }, { filename: "remote_write_url_missing.bad.yml", diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index ff1449e34a..3a9ace2b6c 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3466,7 +3466,7 @@ authorization: [ credentials_file: ] # Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3485,10 +3485,20 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. oauth2: [ ] +# Optional AzureAD configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +azuread: + # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + [ cloud: | default = AzurePublic ] + + # Azure User-assigned Managed identity. + [ managed_identity: + [ client_id: ] + # Configures the remote write request's TLS settings. tls_config: [ ] diff --git a/go.mod b/go.mod index 9b826ad334..71f6a2b8db 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 github.com/Azure/go-autorest/autorest v0.11.28 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 @@ -83,10 +85,15 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect @@ -135,7 +142,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.0 github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect diff --git a/go.sum b/go.sum index 3a472c99a0..859ce81ca2 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,12 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -60,6 +66,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -515,6 +523,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= @@ -630,6 +640,8 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/storage/remote/azuread/README.md b/storage/remote/azuread/README.md new file mode 100644 index 0000000000..b3b4457a6e --- /dev/null +++ b/storage/remote/azuread/README.md @@ -0,0 +1,8 @@ +azuread package +========================================= + +azuread provides an http.RoundTripper that attaches an Azure AD accessToken +to remote write requests. + +This module is considered internal to Prometheus, without any stability +guarantees for external usage. diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go new file mode 100644 index 0000000000..94b6144da1 --- /dev/null +++ b/storage/remote/azuread/azuread.go @@ -0,0 +1,247 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azuread + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/google/uuid" +) + +const ( + // Clouds. + AzureChina = "AzureChina" + AzureGovernment = "AzureGovernment" + AzurePublic = "AzurePublic" + + // Audiences. + IngestionChinaAudience = "https://monitor.azure.cn//.default" + IngestionGovernmentAudience = "https://monitor.azure.us//.default" + IngestionPublicAudience = "https://monitor.azure.com//.default" +) + +// ManagedIdentityConfig is used to store managed identity config values +type ManagedIdentityConfig struct { + // ClientID is the clientId of the managed identity that is being used to authenticate. + ClientID string `yaml:"client_id,omitempty"` +} + +// AzureADConfig is used to store the config values. +type AzureADConfig struct { // nolint:revive + // ManagedIdentity is the managed identity that is being used to authenticate. + ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` + + // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. + Cloud string `yaml:"cloud,omitempty"` +} + +// azureADRoundTripper is used to store the roundtripper and the tokenprovider. +type azureADRoundTripper struct { + next http.RoundTripper + tokenProvider *tokenProvider +} + +// tokenProvider is used to store and retrieve Azure AD accessToken. +type tokenProvider struct { + // token is member used to store the current valid accessToken. + token string + // mu guards access to token. + mu sync.Mutex + // refreshTime is used to store the refresh time of the current valid accessToken. + refreshTime time.Time + // credentialClient is the Azure AD credential client that is being used to retrieve accessToken. + credentialClient azcore.TokenCredential + options *policy.TokenRequestOptions +} + +// Validate validates config values provided. +func (c *AzureADConfig) Validate() error { + if c.Cloud == "" { + c.Cloud = AzurePublic + } + + if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic { + return fmt.Errorf("must provide a cloud in the Azure AD config") + } + + if c.ManagedIdentity == nil { + return fmt.Errorf("must provide an Azure Managed Identity in the Azure AD config") + } + + if c.ManagedIdentity.ClientID == "" { + return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") + } + + _, err := uuid.Parse(c.ManagedIdentity.ClientID) + if err != nil { + return fmt.Errorf("the provided Azure Managed Identity client_id provided is invalid") + } + return nil +} + +// UnmarshalYAML unmarshal the Azure AD config yaml. +func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain AzureADConfig + *c = AzureADConfig{} + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() +} + +// NewAzureADRoundTripper creates round tripper adding Azure AD authorization to calls. +func NewAzureADRoundTripper(cfg *AzureADConfig, next http.RoundTripper) (http.RoundTripper, error) { + if next == nil { + next = http.DefaultTransport + } + + cred, err := newTokenCredential(cfg) + if err != nil { + return nil, err + } + + tokenProvider, err := newTokenProvider(cfg, cred) + if err != nil { + return nil, err + } + + rt := &azureADRoundTripper{ + next: next, + tokenProvider: tokenProvider, + } + return rt, nil +} + +// RoundTrip sets Authorization header for requests. +func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + accessToken, err := rt.tokenProvider.getAccessToken(req.Context()) + if err != nil { + return nil, err + } + bearerAccessToken := "Bearer " + accessToken + req.Header.Set("Authorization", bearerAccessToken) + + return rt.next.RoundTrip(req) +} + +// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application. +func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) { + cred, err := newManagedIdentityTokenCredential(cfg.ManagedIdentity.ClientID) + if err != nil { + return nil, err + } + + return cred, nil +} + +// newManagedIdentityTokenCredential returns new Managed Identity token credential. +func newManagedIdentityTokenCredential(managedIdentityClientID string) (azcore.TokenCredential, error) { + clientID := azidentity.ClientID(managedIdentityClientID) + opts := &azidentity.ManagedIdentityCredentialOptions{ID: clientID} + return azidentity.NewManagedIdentityCredential(opts) +} + +// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of +// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests. +func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) { + audience, err := getAudience(cfg.Cloud) + if err != nil { + return nil, err + } + + tokenProvider := &tokenProvider{ + credentialClient: cred, + options: &policy.TokenRequestOptions{Scopes: []string{audience}}, + } + + return tokenProvider, nil +} + +// getAccessToken returns the current valid accessToken. +func (tokenProvider *tokenProvider) getAccessToken(ctx context.Context) (string, error) { + tokenProvider.mu.Lock() + defer tokenProvider.mu.Unlock() + if tokenProvider.valid() { + return tokenProvider.token, nil + } + err := tokenProvider.getToken(ctx) + if err != nil { + return "", errors.New("Failed to get access token: " + err.Error()) + } + return tokenProvider.token, nil +} + +// valid checks if the token in the token provider is valid and not expired. +func (tokenProvider *tokenProvider) valid() bool { + if len(tokenProvider.token) == 0 { + return false + } + if tokenProvider.refreshTime.After(time.Now().UTC()) { + return true + } + return false +} + +// getToken retrieves a new accessToken and stores the newly retrieved token in the tokenProvider. +func (tokenProvider *tokenProvider) getToken(ctx context.Context) error { + accessToken, err := tokenProvider.credentialClient.GetToken(ctx, *tokenProvider.options) + if err != nil { + return err + } + if len(accessToken.Token) == 0 { + return errors.New("access token is empty") + } + + tokenProvider.token = accessToken.Token + err = tokenProvider.updateRefreshTime(accessToken) + if err != nil { + return err + } + return nil +} + +// updateRefreshTime handles logic to set refreshTime. The refreshTime is set at half the duration of the actual token expiry. +func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error { + tokenExpiryTimestamp := accessToken.ExpiresOn.UTC() + deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2) + if deltaExpirytime.After(time.Now().UTC()) { + tokenProvider.refreshTime = deltaExpirytime + } else { + return errors.New("access token expiry is less than the current time") + } + return nil +} + +// getAudience returns audiences for different clouds. +func getAudience(cloud string) (string, error) { + switch strings.ToLower(cloud) { + case strings.ToLower(AzureChina): + return IngestionChinaAudience, nil + case strings.ToLower(AzureGovernment): + return IngestionGovernmentAudience, nil + case strings.ToLower(AzurePublic): + return IngestionPublicAudience, nil + default: + return "", errors.New("Cloud is not specified or is incorrect: " + cloud) + } +} diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go new file mode 100644 index 0000000000..ebbd98958c --- /dev/null +++ b/storage/remote/azuread/azuread_test.go @@ -0,0 +1,252 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azuread + +import ( + "context" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "gopkg.in/yaml.v2" +) + +const ( + dummyAudience = "dummyAudience" + dummyClientID = "00000000-0000-0000-0000-000000000000" + testTokenString = "testTokenString" +) + +var testTokenExpiry = time.Now().Add(10 * time.Second) + +type AzureAdTestSuite struct { + suite.Suite + mockCredential *mockCredential +} + +type TokenProviderTestSuite struct { + suite.Suite + mockCredential *mockCredential +} + +// mockCredential mocks azidentity TokenCredential interface. +type mockCredential struct { + mock.Mock +} + +func (ad *AzureAdTestSuite) BeforeTest(_, _ string) { + ad.mockCredential = new(mockCredential) +} + +func TestAzureAd(t *testing.T) { + suite.Run(t, new(AzureAdTestSuite)) +} + +func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { + var gotReq *http.Request + + testToken := &azcore.AccessToken{ + Token: testTokenString, + ExpiresOn: testTokenExpiry, + } + + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + + ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) + + tokenProvider, err := newTokenProvider(azureAdConfig, ad.mockCredential) + ad.Assert().NoError(err) + + rt := &azureADRoundTripper{ + next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + gotReq = req + return &http.Response{StatusCode: http.StatusOK}, nil + }), + tokenProvider: tokenProvider, + } + + cli := &http.Client{Transport: rt} + + req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!")) + ad.Assert().NoError(err) + + _, err = cli.Do(req) + ad.Assert().NoError(err) + ad.Assert().NotNil(gotReq) + + origReq := gotReq + ad.Assert().NotEmpty(origReq.Header.Get("Authorization")) + ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) +} + +func loadAzureAdConfig(filename string) (*AzureADConfig, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := AzureADConfig{} + if err = yaml.UnmarshalStrict(content, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +func testGoodConfig(t *testing.T, filename string) { + _, err := loadAzureAdConfig(filename) + if err != nil { + t.Fatalf("Unexpected error parsing %s: %s", filename, err) + } +} + +func TestGoodAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_good.yaml" + testGoodConfig(t, filename) +} + +func TestGoodCloudMissingAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_good_cloudmissing.yaml" + testGoodConfig(t, filename) +} + +func TestBadClientIdMissingAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_bad_clientidmissing.yaml" + _, err := loadAzureAdConfig(filename) + if err == nil { + t.Fatalf("Did not receive expected error unmarshaling bad azuread config") + } + if !strings.Contains(err.Error(), "must provide an Azure Managed Identity in the Azure AD config") { + t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error()) + } +} + +func TestBadInvalidClientIdAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_bad_invalidclientid.yaml" + _, err := loadAzureAdConfig(filename) + if err == nil { + t.Fatalf("Did not receive expected error unmarshaling bad azuread config") + } + if !strings.Contains(err.Error(), "the provided Azure Managed Identity client_id provided is invalid") { + t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error()) + } +} + +func (m *mockCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { + args := m.MethodCalled("GetToken", ctx, options) + if args.Get(0) == nil { + return azcore.AccessToken{}, args.Error(1) + } + + return args.Get(0).(azcore.AccessToken), nil +} + +func (s *TokenProviderTestSuite) BeforeTest(_, _ string) { + s.mockCredential = new(mockCredential) +} + +func TestTokenProvider(t *testing.T) { + suite.Run(t, new(TokenProviderTestSuite)) +} + +func (s *TokenProviderTestSuite) TestNewTokenProvider_NilAudience_Fail() { + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "PublicAzure", + ManagedIdentity: managedIdentityConfig, + } + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().Nil(actualTokenProvider) + s.Assert().NotNil(actualErr) + s.Assert().Equal("Cloud is not specified or is incorrect: "+azureAdConfig.Cloud, actualErr.Error()) +} + +func (s *TokenProviderTestSuite) TestNewTokenProvider_Success() { + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().NotNil(actualTokenProvider) + s.Assert().Nil(actualErr) + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) +} + +func (s *TokenProviderTestSuite) TestPeriodicTokenRefresh_Success() { + // setup + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + testToken := &azcore.AccessToken{ + Token: testTokenString, + ExpiresOn: testTokenExpiry, + } + + s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once(). + On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().NotNil(actualTokenProvider) + s.Assert().Nil(actualErr) + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + + // Token set to refresh at half of the expiry time. The test tokens are set to expiry in 10s. + // Hence, the 6 seconds wait to check if the token is refreshed. + time.Sleep(6 * time.Second) + + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + + s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2) + accessToken, err := actualTokenProvider.getAccessToken(context.Background()) + s.Assert().Nil(err) + s.Assert().NotEqual(accessToken, testTokenString) +} + +func getToken() azcore.AccessToken { + return azcore.AccessToken{ + Token: uuid.New().String(), + ExpiresOn: time.Now().Add(10 * time.Second), + } +} diff --git a/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml b/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml new file mode 100644 index 0000000000..68b119cd42 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml @@ -0,0 +1 @@ +cloud: AzurePublic diff --git a/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml b/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml new file mode 100644 index 0000000000..1f72fbb71f --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml @@ -0,0 +1,3 @@ +cloud: AzurePublic +managed_identity: + client_id: foo-foobar-bar-foo-00000000 diff --git a/storage/remote/azuread/testdata/azuread_good.yaml b/storage/remote/azuread/testdata/azuread_good.yaml new file mode 100644 index 0000000000..de39f0a060 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_good.yaml @@ -0,0 +1,3 @@ +cloud: AzurePublic +managed_identity: + client_id: 00000000-0000-0000-0000-000000000000 diff --git a/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml b/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml new file mode 100644 index 0000000000..bef6318743 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml @@ -0,0 +1,2 @@ +managed_identity: + client_id: 00000000-0000-0000-0000-000000000000 diff --git a/storage/remote/client.go b/storage/remote/client.go index 1625c9918d..33774203c5 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -36,6 +36,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote/azuread" ) const maxErrMsgLen = 1024 @@ -97,6 +98,7 @@ type ClientConfig struct { Timeout model.Duration HTTPClientConfig config_util.HTTPClientConfig SigV4Config *sigv4.SigV4Config + AzureADConfig *azuread.AzureADConfig Headers map[string]string RetryOnRateLimit bool } @@ -146,6 +148,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { } } + if conf.AzureADConfig != nil { + t, err = azuread.NewAzureADRoundTripper(conf.AzureADConfig, httpClient.Transport) + if err != nil { + return nil, err + } + } + if len(conf.Headers) > 0 { t = newInjectHeadersRoundTripper(conf.Headers, t) } diff --git a/storage/remote/write.go b/storage/remote/write.go index 6a33c0adf9..4b0a249014 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -158,6 +158,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { Timeout: rwConf.RemoteTimeout, HTTPClientConfig: rwConf.HTTPClientConfig, SigV4Config: rwConf.SigV4Config, + AzureADConfig: rwConf.AzureADConfig, Headers: rwConf.Headers, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, }) From 084f38df12fe7c5693f7b0e7164cdb1570a525b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Jun 2023 23:57:03 +0000 Subject: [PATCH 43/93] build(deps): bump bufbuild/buf-setup-action from 1.17.0 to 1.20.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.17.0 to 1.20.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.17.0...v1.20.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 79430ee56a..a72837b79e 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: bufbuild/buf-setup-action@v1.20.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 06e53172e8..b63f073fa5 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: bufbuild/buf-setup-action@v1.20.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 90816aa39ecc7e9017a4de653615c461f1516f08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 00:01:29 +0000 Subject: [PATCH 44/93] build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.43.0 to 0.44.0. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.43.0...v0.44.0) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 14 ++--- documentation/examples/remote_storage/go.sum | 60 ++++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 1db3e9a9e4..c0d4331968 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -10,13 +10,13 @@ require ( github.com/influxdata/influxdb v1.11.0 github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 - github.com/prometheus/prometheus v0.43.0 + github.com/prometheus/prometheus v0.44.0 github.com/stretchr/testify v1.8.2 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.217 // indirect + github.com/aws/aws-sdk-go v1.44.245 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -44,11 +44,11 @@ require ( go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.1 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index c4350e78b0..e0eac05c10 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,7 +1,7 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= @@ -19,8 +19,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= -github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -35,15 +35,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -100,7 +100,7 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= @@ -114,13 +114,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -146,14 +146,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -169,7 +169,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -201,10 +201,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg= -github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= +github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= +github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -245,12 +245,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -267,12 +267,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -299,20 +299,20 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -320,7 +320,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From ac8abdaacda605bdcd358ec4b6d30e634a0a569a Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 5 Jun 2023 17:36:11 +0200 Subject: [PATCH 45/93] Rename remaining jitterSeed -> offsetSeed variables (#12414) I had changed the naming from "jitter" to "offset" in: https://github.com/prometheus/prometheus/commit/cb045c0e4b94bbf3eee174d91b5ef2b8553948d5 ...but I forgot to add this file to the commit to complete the renaming, doing that now. Signed-off-by: Julius Volz --- scrape/target.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index a655e85413..8b745a9c49 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -154,14 +154,14 @@ func (t *Target) hash() uint64 { } // offset returns the time until the next scrape cycle for the target. -// It includes the global server jitterSeed for scrapes from multiple Prometheus to try to be at different times. -func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration { +// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times. +func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration { now := time.Now().UnixNano() // Base is a pinned to absolute time, no matter how often offset is called. var ( base = int64(interval) - now%int64(interval) - offset = (t.hash() ^ jitterSeed) % uint64(interval) + offset = (t.hash() ^ offsetSeed) % uint64(interval) next = base + int64(offset) ) From bfa466d00f152aa8dd58494c171872d7f95e3d5e Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 7 Jun 2023 12:29:04 +0200 Subject: [PATCH 46/93] Create release candidate 2.45.0-rc.0 (#12435) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 24 + VERSION | 2 +- go.mod | 59 +- go.sum | 118 ++-- web/api/v1/api.go | 2 +- web/ui/module/codemirror-promql/package.json | 18 +- web/ui/module/lezer-promql/package.json | 6 +- web/ui/package-lock.json | 592 ++++++++++--------- web/ui/package.json | 12 +- web/ui/react-app/package.json | 44 +- web/web.go | 2 +- 11 files changed, 464 insertions(+), 415 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ff7022156..3b4ee3fc17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 2.45.0-rc.0 / 2023-06-05 + +This release is a LTS (Long-Term Support) release of Prometheus and will +receive security, documentation and bugfix patches for at least 12 months. +Please read more about our LTS release cycle at +. + +* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336 +* [FEATURE] Config: Add limits to global config. #12126 +* [FEATURE] Consul SD: Added support for `path_prefix`. #12372 +* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350 +* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262 +* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031 +* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944 +* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055 +* [ENHANCEMENT] API: Improving Performance on the API Gzip Handler. #12363 +* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254 +* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297 +* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185 +* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357 +* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329 +* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245 +* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349 + ## 2.44.0 / 2023-05-13 This version is built with Go tag `stringlabels`, to use the smaller data diff --git a/VERSION b/VERSION index 3e197472e2..ae2ba732af 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.44.0 +2.45.0-rc.0 diff --git a/go.mod b/go.mod index 71f6a2b8db..dd75cc333d 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,18 @@ require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 - github.com/Azure/go-autorest/autorest v0.11.28 + github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.245 + github.com/aws/aws-sdk-go v1.44.276 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.99.0 - github.com/docker/docker v23.0.4+incompatible + github.com/docker/docker v24.0.2+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 - github.com/envoyproxy/protoc-gen-validate v0.10.1 + github.com/envoyproxy/protoc-gen-validate v1.0.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 @@ -25,19 +25,19 @@ require ( github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230406165453-00490a63f317 - github.com/gophercloud/gophercloud v1.3.0 + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 + github.com/gophercloud/gophercloud v1.4.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 + github.com/hashicorp/consul/api v1.21.0 + github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f github.com/hetznercloud/hcloud-go v1.45.1 - github.com/ionos-cloud/sdk-go/v6 v6.1.6 + github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.13.6 + github.com/klauspost/compress v1.16.5 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.16.1 - github.com/miekg/dns v1.1.53 + github.com/linode/linodego v1.17.0 + github.com/miekg/dns v1.1.54 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 @@ -45,14 +45,14 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.15.1 - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_model v0.4.0 + github.com/prometheus/common v0.44.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.10.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 go.opentelemetry.io/otel v1.16.0 @@ -61,17 +61,17 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/trace v1.16.0 - go.uber.org/atomic v1.10.0 + go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.1.0 + golang.org/x/oauth2 v0.8.0 + golang.org/x/sync v0.2.0 golang.org/x/sys v0.8.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.8.0 + golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e + google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 @@ -80,23 +80,20 @@ require ( k8s.io/apimachinery v0.26.2 k8s.io/client-go v0.26.2 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.90.1 + k8s.io/klog/v2 v2.100.1 ) require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect ) require ( @@ -112,7 +109,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -146,7 +143,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.4.0 // indirect @@ -182,7 +179,7 @@ require ( go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.8.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect @@ -190,7 +187,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index 859ce81ca2..774deb85be 100644 --- a/go.sum +++ b/go.sum @@ -42,15 +42,15 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IK github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -106,8 +106,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= +github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -138,8 +138,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -162,8 +162,8 @@ github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= -github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -188,8 +188,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -301,7 +301,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -376,8 +375,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -390,8 +389,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= -github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= +github.com/gophercloud/gophercloud v1.4.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -407,11 +406,11 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= +github.com/hashicorp/consul/api v1.21.0/go.mod h1:f8zVJwBcLdr1IQnfdfszjUM0xzp31Zl3bpws3pL9uFM= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= @@ -460,8 +459,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3OXUTfsWbCCBsQd3EdfPTz5evDi+fxqdvpN+WqQg= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= @@ -474,8 +473,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= -github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= +github.com/ionos-cloud/sdk-go/v6 v6.1.7/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -507,8 +506,9 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -527,8 +527,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= -github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= +github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= +github.com/linode/linodego v1.17.0/go.mod h1:/omzPxie0/YI6S0sTw1q47qDt5IYSlbO/infRR4UG+A= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -561,8 +561,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -640,8 +640,8 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -669,8 +669,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -678,8 +678,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -701,15 +701,14 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -752,8 +751,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -818,8 +817,8 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= @@ -857,8 +856,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -938,8 +937,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -953,8 +952,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1010,6 +1009,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1100,8 +1100,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1163,12 +1163,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e h1:AZX1ra8YbFMSb7+1pI8S9v4rrgRR7jU1FmuFSSjTVcQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1259,8 +1255,8 @@ k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/web/api/v1/api.go b/web/api/v1/api.go index f7249efb04..e9b0081b36 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1491,7 +1491,7 @@ func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult { chunkCount := int64(math.NaN()) for _, mF := range metrics { if *mF.Name == "prometheus_tsdb_head_chunks" { - m := *mF.Metric[0] + m := mF.Metric[0] if m.Gauge != nil { chunkCount = int64(m.Gauge.GetValue()) break diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e819319ee6..e9c952c9cf 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,17 +33,17 @@ "lru-cache": "^6.0.0" }, "devDependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" }, "peerDependencies": { "@codemirror/autocomplete": "^6.4.0", diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 7daa567388..08249b418d 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -30,9 +30,9 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.2.2", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3" + "@lezer/generator": "^1.2.3", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6" }, "peerDependencies": { "@lezer/lr": "^1.2.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 1d3d1d3e34..a672a17673 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -10,17 +10,17 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.4.0", + "@types/jest": "^29.5.2", "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.6.0", + "eslint-config-prettier": "^8.8.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.4.0", + "jest-canvas-mock": "^2.5.1", "jest-fetch-mock": "^3.0.3", - "prettier": "^2.8.3", + "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.0.5", - "typescript": "^4.9.4" + "ts-jest": "^29.1.0", + "typescript": "^4.9.5" }, "engines": { "npm": ">=7.0.0" @@ -35,17 +35,17 @@ "lru-cache": "^6.0.0" }, "devDependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" }, "engines": { "node": ">=12.0.0" @@ -64,9 +64,9 @@ "version": "0.44.0", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.2.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1" + "@lezer/generator": "^1.2.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -2080,9 +2080,9 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.4.0.tgz", - "integrity": "sha512-HLF2PnZAm1s4kGs30EiqKMgD7XsYaQ0XJnMR0rofEWQ5t5D60SfqpDIkIh1ze5tiEbyUWm8+VJ6W1/erVvBMIA==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", + "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -2097,9 +2097,9 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.0.tgz", - "integrity": "sha512-+00smmZBradoGFEkRjliN7BjqPh/Hx0KCHWOEibUmflUqZz2RwBTU0MrVovEEHozhx3AUSGcO/rl3/5f9e9Biw==", + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", + "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", @@ -2108,9 +2108,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.4.0.tgz", - "integrity": "sha512-Wzb7GnNj8vnEtbPWiOy9H0m1fBtE28kepQNGLXekU2EEZv43BF865VKITUn+NoV8OpW6gRtvm29YEhqm46927Q==", + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", + "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2121,9 +2121,9 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.1.0.tgz", - "integrity": "sha512-mdvDQrjRmYPvQ3WrzF6Ewaao+NWERYtpthJvoQ3tK3t/44Ynhk8ZGjTSL9jMEv8CgSMogmt75X8ceOZRDSXHtQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", + "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2131,9 +2131,9 @@ } }, "node_modules/@codemirror/search": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.2.3.tgz", - "integrity": "sha512-V9n9233lopQhB1dyjsBK2Wc1i+8hcCqxl1wQ46c5HWWLePoe4FluV3TGHoZ04rBRlGjNyz9DTmpJErig8UE4jw==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", + "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2141,14 +2141,14 @@ } }, "node_modules/@codemirror/state": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.0.tgz", - "integrity": "sha512-69QXtcrsc3RYtOtd+GsvczJ319udtBf1PTrr2KbLWM/e2CXUPnh0Nz9AUo8WfhSQ7GeL8dPVNUmhQVgpmuaNGA==" + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" }, "node_modules/@codemirror/view": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.7.3.tgz", - "integrity": "sha512-Lt+4POnhXrZFfHOdPzXEHxrzwdy7cjqYlMkOWvoFGi6/bAsjzlFfr0NY3B15B/PGx+cDFgM1hlc12wvYeZbGLw==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", + "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", "dependencies": { "@codemirror/state": "^6.1.4", "style-mod": "^4.0.0", @@ -2476,42 +2476,33 @@ } }, "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", + "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==", "hasInstallScript": true, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.2.1.tgz", - "integrity": "sha512-HELwwbCz6C1XEcjzyT1Jugmz2NNklMrSPjZOWMlc+ZsHIVk+XOvOXLGGQtFBwSyqfJDNgRq4xBCwWOaZ/d9DEA==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", + "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" }, "engines": { "node": ">=6" } }, - "node_modules/@fortawesome/fontawesome-svg-core/node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==", - "hasInstallScript": true, - "engines": { - "node": ">=6" - } - }, "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.2.1.tgz", - "integrity": "sha512-oKuqrP5jbfEPJWTij4sM+/RvgX+RMFwx3QZCZcK9PrBDgxC35zuc7AOFsyMjMd/PIFPeB2JxyqDr5zs/DZFPPw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", + "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" }, "engines": { "node": ">=6" @@ -3510,14 +3501,14 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.2.tgz", - "integrity": "sha512-SVgiGtMnMnW3ActR8SXgsDhw7a0w0ChHSYAyAUxxrOiJ1OqYWEKk/xJd84tTSPo1mo6DXLObAJALNnd0Hrv7Ng==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" }, "node_modules/@lezer/generator": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.2.tgz", - "integrity": "sha512-O//eH9jTPM1GnbZruuD23xU68Pkuragonn1DEIom4Kt/eJN/QFt7Vzvp1YjV/XBmoUKC+2ySPgrA5fMF9FMM2g==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", + "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", "dev": true, "dependencies": { "@lezer/common": "^1.0.2", @@ -3528,17 +3519,17 @@ } }, "node_modules/@lezer/highlight": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.3.tgz", - "integrity": "sha512-3vLKLPThO4td43lYRBygmMY18JN3CPh9w+XS2j8WC30vR4yZeFG4z1iFe4jXE43NtGqe//zHW5q8ENLlHvz9gw==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.1.tgz", - "integrity": "sha512-+GymJB/+3gThkk2zHwseaJTI5oa4AuOuj1I2LCslAVq1dFZLSX8SAe4ZlJq1TjezteDXtF/+d4qeWz9JvnrG9Q==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", + "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", "dependencies": { "@lezer/common": "^1.0.0" } @@ -4209,13 +4200,24 @@ } }, "node_modules/@types/enzyme": { - "version": "3.10.12", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.12.tgz", - "integrity": "sha512-xryQlOEIe1TduDWAOphR0ihfebKFSWOXpIsk+70JskCfRfW+xALdnJ0r1ZOTo85F9Qsjk6vtlU7edTYHbls9tA==", + "version": "3.10.13", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", + "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", "dev": true, "dependencies": { "@types/cheerio": "*", - "@types/react": "*" + "@types/react": "^16" + } + }, + "node_modules/@types/enzyme/node_modules/@types/react": { + "version": "16.14.42", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.14.42.tgz", + "integrity": "sha512-r6lbqQBJsQ5JJ0fp5I1+F3weosNhk7jOEcKeusIlCDYUK6kCpvIkYCamBNqGyS6WEztYlT8wmAVgblV0HxOFoA==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" } }, "node_modules/@types/eslint": { @@ -4330,9 +4332,9 @@ } }, "node_modules/@types/jest": { - "version": "29.4.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.4.0.tgz", - "integrity": "sha512-VaywcGQ9tPorCX/Jkkni7RWGFfI11whqzs8dvxF41P17Z+z872thvEvlIbznjPJ02kl1HMX3LmLOonsj2n7HeQ==", + "version": "29.5.2", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", + "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", "dev": true, "dependencies": { "expect": "^29.0.0", @@ -4466,9 +4468,9 @@ } }, "node_modules/@types/react-dom": { - "version": "17.0.18", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.18.tgz", - "integrity": "sha512-rLVtIfbwyur2iFKykP2w0pl/1unw26b5td16d5xMgp7/yjTHomkyxPYChFoCr/FtEX1lN9wY6lFj1qvKdS5kDw==", + "version": "17.0.20", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", + "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", "dev": true, "dependencies": { "@types/react": "^17" @@ -4520,9 +4522,9 @@ "dev": true }, "node_modules/@types/sanitize-html": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.8.0.tgz", - "integrity": "sha512-Uih6caOm3DsBYnVGOYn0A9NoTNe1c4aPStmHC/YA2JrpP9kx//jzaRcIklFvSpvVQEcpl/ZCr4DgISSf/YxTvg==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", + "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", "dev": true, "dependencies": { "htmlparser2": "^8.0.0" @@ -4573,9 +4575,9 @@ } }, "node_modules/@types/sinon": { - "version": "10.0.13", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.13.tgz", - "integrity": "sha512-UVjDqJblVNQYvVNUsj0PuYYw0ELRmgt1Nt5Vk0pT5f16ROGfcKJY8o1HVuMOJOpD727RrGB9EGvoaTQE5tgxZQ==", + "version": "10.0.15", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", + "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", "dev": true, "dependencies": { "@types/sinonjs__fake-timers": "*" @@ -7557,9 +7559,9 @@ "dev": true }, "node_modules/downshift": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", - "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", + "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", "dependencies": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -7984,9 +7986,9 @@ } }, "node_modules/eslint-config-prettier": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.6.0.tgz", - "integrity": "sha512-bAF0eLpLVqP5oEVUFKpMA+NnRFICwn9X8B5jrR9FcqnYBuPbqWEjTEspPWMj5ye6czoSLDweCzSo3Ko7gGrZaA==", + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", + "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", "dev": true, "bin": { "eslint-config-prettier": "bin/cli.js" @@ -10550,9 +10552,9 @@ } }, "node_modules/jest-canvas-mock": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.4.0.tgz", - "integrity": "sha512-mmMpZzpmLzn5vepIaHk5HoH3Ka4WykbSoLuG/EKoJd0x0ID/t+INo1l8ByfcUJuDM+RIsL4QDg/gDnBbrj2/IQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", + "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", "dev": true, "dependencies": { "cssfontparser": "^1.2.1", @@ -12836,9 +12838,9 @@ } }, "node_modules/jquery": { - "version": "3.6.3", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.3.tgz", - "integrity": "sha512-bZ5Sy3YzKo9Fyc8wH2iIQK4JImJ6R0GWI9kL1/k7Z91ZBNgkRXE6U0JfHIizZbort8ZunhSI3jw9I6253ahKfg==" + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", + "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" }, "node_modules/jquery.flot.tooltip": { "version": "0.9.0", @@ -13488,11 +13490,11 @@ } }, "node_modules/moment-timezone": { - "version": "0.5.40", - "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.40.tgz", - "integrity": "sha512-tWfmNkRYmBkPJz5mr9GVDn9vRlVZOTe6yqY92rFxiOdWXbjaR0+9LwQnZGGuNR63X456NqmEkbskte8tWL5ePg==", + "version": "0.5.43", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.43.tgz", + "integrity": "sha512-72j3aNyuIsDxdF1i7CEgV2FfxM1r6aaqJyLB2vwb33mXYyoyLly+F1zbWqhA3/bVIoJ4szlUoMbUnVdid32NUQ==", "dependencies": { - "moment": ">= 2.9.0" + "moment": "^2.29.4" }, "engines": { "node": "*" @@ -13640,9 +13642,9 @@ } }, "node_modules/nock": { - "version": "13.3.0", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.0.tgz", - "integrity": "sha512-HHqYQ6mBeiMc+N038w8LkMpDCRquCHWeNmN3v6645P3NhN2+qXOBqvPqo7Rt1VyCMzKhJ733wZqw5B7cQVFNPg==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", + "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", "dev": true, "dependencies": { "debug": "^4.1.0", @@ -15708,9 +15710,9 @@ } }, "node_modules/prettier": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.3.tgz", - "integrity": "sha512-tJ/oJ4amDihPoufT5sM0Z1SKEuKay8LfVAMlbbhnnkvt6BUserZylqo2PN+p9KeljLr0OHa2rXHU1T8reeoTrw==", + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -18022,9 +18024,9 @@ "dev": true }, "node_modules/sanitize-html": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.8.1.tgz", - "integrity": "sha512-qK5neD0SaMxGwVv5txOYv05huC3o6ZAA4h5+7nJJgWMNFUNRjcjLO6FpwAtKzfKCZ0jrG6xTk6eVFskbvOGblg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz", + "integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", @@ -18059,9 +18061,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -18071,7 +18073,7 @@ "sass": "sass.js" }, "engines": { - "node": ">=12.0.0" + "node": ">=14.0.0" } }, "node_modules/sass-loader": { @@ -19378,9 +19380,9 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.0.5", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.0.5.tgz", - "integrity": "sha512-PL3UciSgIpQ7f6XjVOmbi96vmDHUqAyqDr8YxzopDqX3kfgYtX1cuNeBjP+L9sFXi6nzsGGA6R3fP3DDDJyrxA==", + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -19403,7 +19405,7 @@ "@jest/types": "^29.0.0", "babel-jest": "^29.0.0", "jest": "^29.0.0", - "typescript": ">=4.3" + "typescript": ">=4.3 <6" }, "peerDependenciesMeta": { "@babel/core": { @@ -19572,9 +19574,9 @@ } }, "node_modules/typescript": { - "version": "4.9.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.4.tgz", - "integrity": "sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -20765,31 +20767,31 @@ "name": "@prometheus-io/app", "version": "0.44.0", "dependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", @@ -20799,22 +20801,22 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -20824,6 +20826,17 @@ "optionalDependencies": { "fsevents": "^2.3.2" } + }, + "react-app/node_modules/@types/react": { + "version": "17.0.60", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", + "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } } }, "dependencies": { @@ -22224,9 +22237,9 @@ "dev": true }, "@codemirror/autocomplete": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.4.0.tgz", - "integrity": "sha512-HLF2PnZAm1s4kGs30EiqKMgD7XsYaQ0XJnMR0rofEWQ5t5D60SfqpDIkIh1ze5tiEbyUWm8+VJ6W1/erVvBMIA==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", + "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -22235,9 +22248,9 @@ } }, "@codemirror/commands": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.0.tgz", - "integrity": "sha512-+00smmZBradoGFEkRjliN7BjqPh/Hx0KCHWOEibUmflUqZz2RwBTU0MrVovEEHozhx3AUSGcO/rl3/5f9e9Biw==", + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", + "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", @@ -22246,9 +22259,9 @@ } }, "@codemirror/language": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.4.0.tgz", - "integrity": "sha512-Wzb7GnNj8vnEtbPWiOy9H0m1fBtE28kepQNGLXekU2EEZv43BF865VKITUn+NoV8OpW6gRtvm29YEhqm46927Q==", + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", + "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22259,9 +22272,9 @@ } }, "@codemirror/lint": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.1.0.tgz", - "integrity": "sha512-mdvDQrjRmYPvQ3WrzF6Ewaao+NWERYtpthJvoQ3tK3t/44Ynhk8ZGjTSL9jMEv8CgSMogmt75X8ceOZRDSXHtQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", + "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22269,9 +22282,9 @@ } }, "@codemirror/search": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.2.3.tgz", - "integrity": "sha512-V9n9233lopQhB1dyjsBK2Wc1i+8hcCqxl1wQ46c5HWWLePoe4FluV3TGHoZ04rBRlGjNyz9DTmpJErig8UE4jw==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", + "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22279,14 +22292,14 @@ } }, "@codemirror/state": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.0.tgz", - "integrity": "sha512-69QXtcrsc3RYtOtd+GsvczJ319udtBf1PTrr2KbLWM/e2CXUPnh0Nz9AUo8WfhSQ7GeL8dPVNUmhQVgpmuaNGA==" + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" }, "@codemirror/view": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.7.3.tgz", - "integrity": "sha512-Lt+4POnhXrZFfHOdPzXEHxrzwdy7cjqYlMkOWvoFGi6/bAsjzlFfr0NY3B15B/PGx+cDFgM1hlc12wvYeZbGLw==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", + "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", "requires": { "@codemirror/state": "^6.1.4", "style-mod": "^4.0.0", @@ -22463,31 +22476,24 @@ } }, "@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==" + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", + "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==" }, "@fortawesome/fontawesome-svg-core": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.2.1.tgz", - "integrity": "sha512-HELwwbCz6C1XEcjzyT1Jugmz2NNklMrSPjZOWMlc+ZsHIVk+XOvOXLGGQtFBwSyqfJDNgRq4xBCwWOaZ/d9DEA==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", + "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", "requires": { - "@fortawesome/fontawesome-common-types": "6.2.1" - }, - "dependencies": { - "@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==" - } + "@fortawesome/fontawesome-common-types": "6.4.0" } }, "@fortawesome/free-solid-svg-icons": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.2.1.tgz", - "integrity": "sha512-oKuqrP5jbfEPJWTij4sM+/RvgX+RMFwx3QZCZcK9PrBDgxC35zuc7AOFsyMjMd/PIFPeB2JxyqDr5zs/DZFPPw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", + "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" } }, "@fortawesome/react-fontawesome": { @@ -23277,14 +23283,14 @@ "dev": true }, "@lezer/common": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.2.tgz", - "integrity": "sha512-SVgiGtMnMnW3ActR8SXgsDhw7a0w0ChHSYAyAUxxrOiJ1OqYWEKk/xJd84tTSPo1mo6DXLObAJALNnd0Hrv7Ng==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" }, "@lezer/generator": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.2.tgz", - "integrity": "sha512-O//eH9jTPM1GnbZruuD23xU68Pkuragonn1DEIom4Kt/eJN/QFt7Vzvp1YjV/XBmoUKC+2ySPgrA5fMF9FMM2g==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", + "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", "dev": true, "requires": { "@lezer/common": "^1.0.2", @@ -23292,17 +23298,17 @@ } }, "@lezer/highlight": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.3.tgz", - "integrity": "sha512-3vLKLPThO4td43lYRBygmMY18JN3CPh9w+XS2j8WC30vR4yZeFG4z1iFe4jXE43NtGqe//zHW5q8ENLlHvz9gw==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", "requires": { "@lezer/common": "^1.0.0" } }, "@lezer/lr": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.1.tgz", - "integrity": "sha512-+GymJB/+3gThkk2zHwseaJTI5oa4AuOuj1I2LCslAVq1dFZLSX8SAe4ZlJq1TjezteDXtF/+d4qeWz9JvnrG9Q==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", + "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", "requires": { "@lezer/common": "^1.0.0" } @@ -23401,45 +23407,45 @@ "@prometheus-io/app": { "version": "file:react-app", "requires": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", "fsevents": "^2.3.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "mutationobserver-shim": "^0.3.7", "popper.js": "^1.14.3", "react": "^17.0.2", @@ -23450,37 +23456,50 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "sinon": "^14.0.2", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" + }, + "dependencies": { + "@types/react": { + "version": "17.0.60", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", + "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + } } }, "@prometheus-io/codemirror-promql": { "version": "file:module/codemirror-promql", "requires": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@prometheus-io/lezer-promql": "0.44.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" } }, "@prometheus-io/lezer-promql": { "version": "file:module/lezer-promql", "requires": { - "@lezer/generator": "^1.2.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1" + "@lezer/generator": "^1.2.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6" } }, "@rollup/plugin-babel": { @@ -23851,13 +23870,26 @@ } }, "@types/enzyme": { - "version": "3.10.12", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.12.tgz", - "integrity": "sha512-xryQlOEIe1TduDWAOphR0ihfebKFSWOXpIsk+70JskCfRfW+xALdnJ0r1ZOTo85F9Qsjk6vtlU7edTYHbls9tA==", + "version": "3.10.13", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", + "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", "dev": true, "requires": { "@types/cheerio": "*", - "@types/react": "*" + "@types/react": "^16" + }, + "dependencies": { + "@types/react": { + "version": "16.14.42", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.14.42.tgz", + "integrity": "sha512-r6lbqQBJsQ5JJ0fp5I1+F3weosNhk7jOEcKeusIlCDYUK6kCpvIkYCamBNqGyS6WEztYlT8wmAVgblV0HxOFoA==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + } } }, "@types/eslint": { @@ -23972,9 +24004,9 @@ } }, "@types/jest": { - "version": "29.4.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.4.0.tgz", - "integrity": "sha512-VaywcGQ9tPorCX/Jkkni7RWGFfI11whqzs8dvxF41P17Z+z872thvEvlIbznjPJ02kl1HMX3LmLOonsj2n7HeQ==", + "version": "29.5.2", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", + "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", "dev": true, "requires": { "expect": "^29.0.0", @@ -24101,9 +24133,9 @@ } }, "@types/react-dom": { - "version": "17.0.18", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.18.tgz", - "integrity": "sha512-rLVtIfbwyur2iFKykP2w0pl/1unw26b5td16d5xMgp7/yjTHomkyxPYChFoCr/FtEX1lN9wY6lFj1qvKdS5kDw==", + "version": "17.0.20", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", + "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", "dev": true, "requires": { "@types/react": "^17" @@ -24155,9 +24187,9 @@ "dev": true }, "@types/sanitize-html": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.8.0.tgz", - "integrity": "sha512-Uih6caOm3DsBYnVGOYn0A9NoTNe1c4aPStmHC/YA2JrpP9kx//jzaRcIklFvSpvVQEcpl/ZCr4DgISSf/YxTvg==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", + "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", "dev": true, "requires": { "htmlparser2": "^8.0.0" @@ -24203,9 +24235,9 @@ } }, "@types/sinon": { - "version": "10.0.13", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.13.tgz", - "integrity": "sha512-UVjDqJblVNQYvVNUsj0PuYYw0ELRmgt1Nt5Vk0pT5f16ROGfcKJY8o1HVuMOJOpD727RrGB9EGvoaTQE5tgxZQ==", + "version": "10.0.15", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", + "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", "dev": true, "requires": { "@types/sinonjs__fake-timers": "*" @@ -26441,9 +26473,9 @@ "dev": true }, "downshift": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", - "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", + "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", "requires": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -26779,9 +26811,9 @@ } }, "eslint-config-prettier": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.6.0.tgz", - "integrity": "sha512-bAF0eLpLVqP5oEVUFKpMA+NnRFICwn9X8B5jrR9FcqnYBuPbqWEjTEspPWMj5ye6czoSLDweCzSo3Ko7gGrZaA==", + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", + "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", "dev": true, "requires": {} }, @@ -28650,9 +28682,9 @@ } }, "jest-canvas-mock": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.4.0.tgz", - "integrity": "sha512-mmMpZzpmLzn5vepIaHk5HoH3Ka4WykbSoLuG/EKoJd0x0ID/t+INo1l8ByfcUJuDM+RIsL4QDg/gDnBbrj2/IQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", + "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", "dev": true, "requires": { "cssfontparser": "^1.2.1", @@ -30498,9 +30530,9 @@ } }, "jquery": { - "version": "3.6.3", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.3.tgz", - "integrity": "sha512-bZ5Sy3YzKo9Fyc8wH2iIQK4JImJ6R0GWI9kL1/k7Z91ZBNgkRXE6U0JfHIizZbort8ZunhSI3jw9I6253ahKfg==" + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", + "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" }, "jquery.flot.tooltip": { "version": "0.9.0", @@ -31015,11 +31047,11 @@ "integrity": "sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==" }, "moment-timezone": { - "version": "0.5.40", - "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.40.tgz", - "integrity": "sha512-tWfmNkRYmBkPJz5mr9GVDn9vRlVZOTe6yqY92rFxiOdWXbjaR0+9LwQnZGGuNR63X456NqmEkbskte8tWL5ePg==", + "version": "0.5.43", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.43.tgz", + "integrity": "sha512-72j3aNyuIsDxdF1i7CEgV2FfxM1r6aaqJyLB2vwb33mXYyoyLly+F1zbWqhA3/bVIoJ4szlUoMbUnVdid32NUQ==", "requires": { - "moment": ">= 2.9.0" + "moment": "^2.29.4" } }, "moo": { @@ -31146,9 +31178,9 @@ } }, "nock": { - "version": "13.3.0", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.0.tgz", - "integrity": "sha512-HHqYQ6mBeiMc+N038w8LkMpDCRquCHWeNmN3v6645P3NhN2+qXOBqvPqo7Rt1VyCMzKhJ733wZqw5B7cQVFNPg==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", + "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", "dev": true, "requires": { "debug": "^4.1.0", @@ -32479,9 +32511,9 @@ "dev": true }, "prettier": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.3.tgz", - "integrity": "sha512-tJ/oJ4amDihPoufT5sM0Z1SKEuKay8LfVAMlbbhnnkvt6BUserZylqo2PN+p9KeljLr0OHa2rXHU1T8reeoTrw==", + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "dev": true }, "prettier-linter-helpers": { @@ -34265,9 +34297,9 @@ "dev": true }, "sanitize-html": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.8.1.tgz", - "integrity": "sha512-qK5neD0SaMxGwVv5txOYv05huC3o6ZAA4h5+7nJJgWMNFUNRjcjLO6FpwAtKzfKCZ0jrG6xTk6eVFskbvOGblg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz", + "integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==", "requires": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", @@ -34297,9 +34329,9 @@ "dev": true }, "sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -35312,9 +35344,9 @@ "dev": true }, "ts-jest": { - "version": "29.0.5", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.0.5.tgz", - "integrity": "sha512-PL3UciSgIpQ7f6XjVOmbi96vmDHUqAyqDr8YxzopDqX3kfgYtX1cuNeBjP+L9sFXi6nzsGGA6R3fP3DDDJyrxA==", + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", "dev": true, "requires": { "bs-logger": "0.x", @@ -35449,9 +35481,9 @@ } }, "typescript": { - "version": "4.9.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.4.tgz", - "integrity": "sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true }, "unbox-primitive": { diff --git a/web/ui/package.json b/web/ui/package.json index 22f2615ff8..d5663ee75e 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -16,16 +16,16 @@ "npm": ">=7.0.0" }, "devDependencies": { - "@types/jest": "^29.4.0", + "@types/jest": "^29.5.2", "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.6.0", + "eslint-config-prettier": "^8.8.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.4.0", + "jest-canvas-mock": "^2.5.1", "jest-fetch-mock": "^3.0.3", "react-scripts": "^5.0.1", - "prettier": "^2.8.3", - "ts-jest": "^29.0.5", - "typescript": "^4.9.4" + "prettier": "^2.8.8", + "ts-jest": "^29.1.0", + "typescript": "^4.9.5" } } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index fc0d1e5fc3..d67209d8ef 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,31 +3,31 @@ "version": "0.44.0", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3", - "@lezer/common": "^1.0.2", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6", + "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", @@ -37,8 +37,8 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -66,15 +66,15 @@ ], "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", diff --git a/web/web.go b/web/web.go index 27378b3b81..b9af2819b7 100644 --- a/web/web.go +++ b/web/web.go @@ -744,7 +744,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { } func toFloat64(f *io_prometheus_client.MetricFamily) float64 { - m := *f.Metric[0] + m := f.Metric[0] if m.Gauge != nil { return m.Gauge.GetValue() } From 4268feb9d7b6be75359e9fe0dcf765cf73fedd17 Mon Sep 17 00:00:00 2001 From: Leo Q Date: Wed, 7 Jun 2023 20:28:13 +0800 Subject: [PATCH 47/93] add alert for sd refresh failure (#12410) * add alert for sd refresh failure Due to config error or sd service down, prometheus may fail to refresh sd resource, which may lead to scrape fail or irrelavant metrics. Signed-off-by: Leo Q * apply suggestions Signed-off-by: Leo Q --------- Signed-off-by: Leo Q --- documentation/prometheus-mixin/alerts.libsonnet | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 0ee5d83c7a..3efb0f27d1 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -20,6 +20,20 @@ description: 'Prometheus %(prometheusName)s has failed to reload its configuration.' % $._config, }, }, + { + alert: 'PrometheusSDRefreshFailure', + expr: ||| + increase(prometheus_sd_refresh_failures_total{%(prometheusSelector)s}[10m]) > 0 + ||| % $._config, + 'for': '20m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Failed Prometheus SD refresh.', + description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| From edfc97a77e74d0f3996fed16c8a1b29c2b10ed35 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 7 Jun 2023 16:00:15 +0200 Subject: [PATCH 48/93] Bump UI version (#12440) Signed-off-by: Jesus Vazquez --- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e9c952c9cf..6c404a6c46 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 08249b418d..389eb88f4f 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a672a17673..f80d2fd6e4 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.44.0", + "version": "0.45.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d67209d8ef..d8efd56810 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.44.0", + "version": "0.45.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 344c8ff97ce261dbaaf2720f1e5164a8fee19184 Mon Sep 17 00:00:00 2001 From: Michael Hoffmann Date: Wed, 7 Jun 2023 22:54:30 +0200 Subject: [PATCH 49/93] feat: dont compile regex matcher if we know its a literal (#12434) labels: dont compile regex matcher if we know its a literal Signed-off-by: Michael Hoffmann Co-authored-by: Sharad --- model/labels/matcher_test.go | 18 ++++++++++++++---- model/labels/regexp.go | 17 +++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index 14615a50d1..d26e9329f2 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -102,12 +102,12 @@ func TestInverse(t *testing.T) { expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"}, }, { - matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"}, - expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"}, + matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3.*"}, + expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3.*"}, }, { - matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"}, - expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"}, + matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4.*"}, + expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4.*"}, }, } @@ -123,3 +123,13 @@ func BenchmarkMatchType_String(b *testing.B) { _ = MatchType(i % int(MatchNotRegexp+1)).String() } } + +func BenchmarkNewMatcher(b *testing.B) { + b.Run("regex matcher with literal", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i <= b.N; i++ { + NewMatcher(MatchRegexp, "foo", "bar") + } + }) +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index e09a63772f..14319c7f7a 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -25,9 +25,16 @@ type FastRegexMatcher struct { prefix string suffix string contains string + + // shortcut for literals + literal bool + value string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + if isLiteral(v) { + return &FastRegexMatcher{literal: true, value: v}, nil + } re, err := regexp.Compile("^(?:" + v + ")$") if err != nil { return nil, err @@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } func (m *FastRegexMatcher) MatchString(s string) bool { + if m.literal { + return s == m.value + } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false } @@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool { } func (m *FastRegexMatcher) GetRegexString() string { + if m.literal { + return m.value + } return m.re.String() } +func isLiteral(re string) bool { + return regexp.QuoteMeta(re) == re +} + // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { From f7abe27fef92adc3239c7c3e4006f570b56e71b8 Mon Sep 17 00:00:00 2001 From: sinkingpoint Date: Fri, 9 Jun 2023 14:06:09 +1000 Subject: [PATCH 50/93] Remove trailing commas from Exemplar API docs The trailing commas here make this example invalid JSON. Here we remove them. Signed-off-by: sinkingpoint --- docs/querying/api.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index edce366ee6..b2b72d9cde 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -407,7 +407,7 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "EpTxMJ40fUus7aGY" }, "value": "6", - "timestamp": 1600096945.479, + "timestamp": 1600096945.479 } ] }, @@ -424,15 +424,15 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "Olp9XHlq763ccsfa" }, "value": "19", - "timestamp": 1600096955.479, + "timestamp": 1600096955.479 }, { "labels": { "traceID": "hCtjygkIHwAN9vs4" }, "value": "20", - "timestamp": 1600096965.489, - }, + "timestamp": 1600096965.489 + } ] } ] From 1ea477f4bcd04c580e51711dca346a06421b311c Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Mon, 12 Jun 2023 12:17:20 -0300 Subject: [PATCH 51/93] Add feature flag to squash metadata from /api/v1/metadata (#12391) Signed-off-by: ArthurSens --- docs/querying/api.md | 27 +++++++ web/api/v1/api.go | 16 ++++- web/api/v1/api_test.go | 160 ++++++++++++++++++++++++++++++++++++++--- 3 files changed, 193 insertions(+), 10 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index b2b72d9cde..ca7f64f622 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -863,6 +863,7 @@ GET /api/v1/metadata URL query parameters: - `limit=`: Maximum number of metrics to return. +- `limit_per_metric=`: Maximum number of metadata to return per metric. - `metric=`: A metric name to filter metadata for. All metric metadata is retrieved if left empty. The `data` section of the query result consists of an object where each key is a metric name and each value is a list of unique metadata objects, as exposed for that metric name across all targets. @@ -898,6 +899,32 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2 } ``` +The following example returns only one metadata entry for each metric. + +```json +curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1 + +{ + "status": "success", + "data": { + "cortex_ring_tokens": [ + { + "type": "gauge", + "help": "Number of tokens in the ring", + "unit": "" + } + ], + "http_requests_total": [ + { + "type": "counter", + "help": "Number of HTTP requests", + "unit": "" + } + ] + } +} +``` + The following example returns metadata only for the metric `http_requests_total`. ```json diff --git a/web/api/v1/api.go b/web/api/v1/api.go index f7249efb04..c74f6ee7ae 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1194,16 +1194,26 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil} } } + limitPerMetric := -1 + if s := r.FormValue("limit_per_metric"); s != "" { + var err error + if limitPerMetric, err = strconv.Atoi(s); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit_per_metric must be a number")}, nil, nil} + } + } metric := r.FormValue("metric") for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { - if metric == "" { for _, mm := range t.MetadataList() { m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} ms, ok := metrics[mm.Metric] + if limitPerMetric > 0 && len(ms) >= limitPerMetric { + continue + } + if !ok { ms = map[metadata]struct{}{} metrics[mm.Metric] = ms @@ -1217,6 +1227,10 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} ms, ok := metrics[md.Metric] + if limitPerMetric > 0 && len(ms) >= limitPerMetric { + continue + } + if !ok { ms = map[metadata]struct{}{} metrics[md.Metric] = ms diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index baee2189ef..16e74071c1 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1023,15 +1023,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E } type test struct { - endpoint apiFunc - params map[string]string - query url.Values - response interface{} - responseLen int - errType errorType - sorter func(interface{}) - metadata []targetMetadata - exemplars []exemplar.QueryResult + endpoint apiFunc + params map[string]string + query url.Values + response interface{} + responseLen int + responseMetadataTotal int + errType errorType + sorter func(interface{}) + metadata []targetMetadata + exemplars []exemplar.QueryResult } tests := []test{ @@ -1776,6 +1777,126 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, responseLen: 2, }, + // With a limit for the number of metadata per metric. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + }, + response: map[string][]metadata{ + "go_threads": { + {textparse.MetricTypeGauge, "Number of OS threads created", ""}, + }, + "go_gc_duration_seconds": { + {textparse.MetricTypeSummary, "A summary of the GC invocation durations.", ""}, + }, + }, + }, + // With a limit for the number of metadata per metric and per metric. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + }, + responseLen: 1, + responseMetadataTotal: 1, + }, + + // With a limit for the number of metadata per metric and per metric, while having multiple targets. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + { + identifier: "secondTarget", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created, but from a different target", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations, but from a different target.", + Unit: "", + }, + }, + }, + }, + responseLen: 1, + responseMetadataTotal: 1, + }, // When requesting a specific metric that is present. { endpoint: api.metricMetadata, @@ -2565,6 +2686,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E if test.responseLen != 0 { assertAPIResponseLength(t, res.data, test.responseLen) + if test.responseMetadataTotal != 0 { + assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal) + } } else { assertAPIResponse(t, res.data, test.response) } @@ -2615,6 +2739,24 @@ func assertAPIResponseLength(t *testing.T, got interface{}, expLen int) { } } +func assertAPIResponseMetadataLen(t *testing.T, got interface{}, expLen int) { + t.Helper() + + var gotLen int + response := got.(map[string][]metadata) + for _, m := range response { + gotLen += len(m) + } + + if gotLen != expLen { + t.Fatalf( + "Amount of metadata in the response does not match, expected:\n%d\ngot:\n%d", + expLen, + gotLen, + ) + } +} + type fakeDB struct { err error } From 81bf3e63a4663e27a935bab0167f73b9da3afeb5 Mon Sep 17 00:00:00 2001 From: Jayapriya Pai Date: Mon, 12 Jun 2023 21:39:19 +0530 Subject: [PATCH 52/93] docs: update prometheus-operator link Signed-off-by: Jayapriya Pai --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3a9ace2b6c..4287272f65 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2160,7 +2160,7 @@ attach_metadata: See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) for a detailed example of configuring Prometheus for Kubernetes. -You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), +You may wish to check out the 3rd party [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), which automates the Prometheus setup on top of Kubernetes. ### `` From 06843db80a58bc679cf3c1863891abdd1c7cdeba Mon Sep 17 00:00:00 2001 From: Bartol Deak Date: Mon, 12 Jun 2023 22:56:44 +0200 Subject: [PATCH 53/93] Hide `which` stderr output Signed-off-by: Bartol Deak --- Makefile | 2 +- Makefile.common | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 3877ee719e..2816c98014 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ assets-tarball: assets .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." -ifeq (, $(shell which goyacc)) +ifeq (, $(shell which goyacc 2>/dev/null)) @echo "goyacc not installed so skipping" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" else diff --git a/Makefile.common b/Makefile.common index b111d25620..2bae0efab5 100644 --- a/Makefile.common +++ b/Makefile.common @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell which yamllint 2>/dev/null)) @echo "yamllint not installed so skipping" else yamllint . From 6a18962cfac3445693c78b157976e31c2bb81f12 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 13 Jun 2023 10:38:00 +0200 Subject: [PATCH 54/93] mv labels_string.go labels_stringlabels.go (#12328) This is a minor cosmetical change, but my IDE (and I guess many of them) nests `labels_string.go` under `labels.go` because it assumes it's the file generated by the `stringer` tool, which follows that naming pattern. Signed-off-by: Oleg Zaytsev --- model/labels/{labels_string.go => labels_stringlabels.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename model/labels/{labels_string.go => labels_stringlabels.go} (100%) diff --git a/model/labels/labels_string.go b/model/labels/labels_stringlabels.go similarity index 100% rename from model/labels/labels_string.go rename to model/labels/labels_stringlabels.go From 95b7d592bae8542c58efa92bb67777a047add231 Mon Sep 17 00:00:00 2001 From: Bartol Deak Date: Tue, 13 Jun 2023 11:57:52 +0200 Subject: [PATCH 55/93] rewrite `which` with `command -v` Signed-off-by: Bartol Deak --- Makefile | 2 +- Makefile.common | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2816c98014..0dd8673af3 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ assets-tarball: assets .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." -ifeq (, $(shell which goyacc 2>/dev/null)) +ifeq (, $(shell command -v goyacc > /dev/null)) @echo "goyacc not installed so skipping" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" else diff --git a/Makefile.common b/Makefile.common index 2bae0efab5..e372d34738 100644 --- a/Makefile.common +++ b/Makefile.common @@ -49,7 +49,7 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint 2>/dev/null)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . From 0941ea4afcc6df7d70fa9578bc9403c5fec8ea1c Mon Sep 17 00:00:00 2001 From: tyltr Date: Fri, 16 Jun 2023 11:09:19 +0800 Subject: [PATCH 56/93] typo Signed-off-by: tyltr --- cmd/prometheus/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e05ac79570..3d723f1529 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -429,7 +429,7 @@ func main() { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err)) a.Usage(os.Args[1:]) os.Exit(2) } From 0c6cf868287b47b01272caa1a73e21a07befc0a0 Mon Sep 17 00:00:00 2001 From: timmartin-stripe <131782471+timmartin-stripe@users.noreply.github.com> Date: Fri, 16 Jun 2023 06:55:41 -0400 Subject: [PATCH 57/93] Add sentence explaining what happens when the `for` clause is omitted (#12457) Just adding a statement here explaining that the default is an immediate move to "active" without a pending state. Signed-off-by: Tim Martin --- docs/configuration/alerting_rules.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 74f6c02b12..3c1ec84f0f 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -32,7 +32,11 @@ groups: ``` The optional `for` clause causes Prometheus to wait for a certain duration -between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. +between first encountering a new expression output vector element and counting +an alert as firing for this element. In this case, Prometheus will check that +the alert continues to be active during each evaluation for 10 minutes before +firing the alert. Elements that are active, but not firing yet, are in the pending state. +Alerting rules without the `for` clause will become active on the first evaluation. The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label From 0de855508c314ddfd50fb48851be384c61e65501 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 30 May 2023 15:22:24 +0200 Subject: [PATCH 58/93] Add support for inline TLS certificates Signed-off-by: Julien Pivotto --- docs/configuration/configuration.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3a9ace2b6c..db41c3247d 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -427,11 +427,16 @@ Where `` must be unique across all scrape configurations. A `tls_config` allows configuring TLS connections. ```yaml -# CA certificate to validate API server certificate with. +# CA certificate to validate API server certificate with. At most one of ca and ca_file is allowed. +[ ca: ] [ ca_file: ] -# Certificate and key files for client cert authentication to the server. +# Certificate and key for client cert authentication to the server. +# At most one of cert and cert_file is allowed. +# At most one of key and key_file is allowed. +[ cert: ] [ cert_file: ] +[ key: ] [ key_file: ] # ServerName extension to indicate the name of the server. From 9b6355f02eee0df38a7c797a02fa5fa47242fd65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:18:31 +0000 Subject: [PATCH 59/93] build(deps): bump golang.org/x/oauth2 from 0.8.0 to 0.9.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.8.0 to 0.9.0. - [Commits](https://github.com/golang/oauth2/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333d..7d3ed3ab89 100644 --- a/go.mod +++ b/go.mod @@ -64,10 +64,10 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.8.0 + golang.org/x/net v0.11.0 + golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 + golang.org/x/sys v0.9.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 @@ -178,11 +178,11 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.8.0 // indirect + golang.org/x/crypto v0.10.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 774deb85be..f3583ccb19 100644 --- a/go.sum +++ b/go.sum @@ -844,8 +844,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -928,8 +928,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -937,8 +937,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1021,14 +1021,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1039,8 +1039,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From f28d7d23cdd279f4530fcc66c8ff248aa5e57a56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:18:50 +0000 Subject: [PATCH 60/93] build(deps): bump github.com/stretchr/testify Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.2 to 1.8.4. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.2...v1.8.4) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c0d4331968..2f28619474 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -11,7 +11,7 @@ require ( github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v0.44.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 ) require ( diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index e0eac05c10..c540d8984f 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -211,16 +211,12 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= From 6adb64a9669e50819ac0b344aea591df3e6fbc15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:19:02 +0000 Subject: [PATCH 61/93] build(deps): bump github.com/aws/aws-sdk-go from 1.44.276 to 1.44.284 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.276 to 1.44.284. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.276...v1.44.284) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333d..ea61b52151 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.276 + github.com/aws/aws-sdk-go v1.44.284 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.99.0 diff --git a/go.sum b/go.sum index 774deb85be..bf091742a9 100644 --- a/go.sum +++ b/go.sum @@ -106,8 +106,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= -github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.284 h1:Oc5Kubi43/VCkerlt3ZU3KpBju6BpNkoG3s7E8vj/O8= +github.com/aws/aws-sdk-go v1.44.284/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 3eaa7eb538a14bfbfbb1f6e11c4f0d5a4878b10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Mon, 19 Jun 2023 10:44:24 +0200 Subject: [PATCH 62/93] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- util/fmtutil/format.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 291308dc23..5cff0516b9 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -44,8 +44,8 @@ var MetricMetadataTypeValue = map[string]int32{ "STATESET": 7, } -// FormatMetrics convert metric family to a writerequest. -func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { +// CreateWriteRequest convert metric family to a writerequest. +func CreateWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -76,16 +76,16 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]strin return wr, nil } -func makeTimeserie(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { - var timeserie prompb.TimeSeries - timeserie.Labels = makeLabels(labels) - timeserie.Samples = []prompb.Sample{ +func toTimeseries(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { + var ts prompb.TimeSeries + ts.Labels = makeLabels(labels) + ts.Samples = []prompb.Sample{ { Timestamp: timestamp, Value: value, }, } - wr.Timeseries = append(wr.Timeseries, timeserie) + wr.Timeseries = append(wr.Timeseries, ts) } func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Metric) error { @@ -98,9 +98,9 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me switch { case m.Gauge != nil: - makeTimeserie(wr, labels, timestamp, m.GetGauge().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetGauge().GetValue()) case m.Counter != nil: - makeTimeserie(wr, labels, timestamp, m.GetCounter().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue()) case m.Summary != nil: metricName := labels[model.MetricNameLabel] // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie @@ -112,15 +112,15 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me for _, q := range m.GetSummary().Quantile { quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile()) - makeTimeserie(wr, quantileLabels, timestamp, q.GetValue()) + toTimeseries(wr, quantileLabels, timestamp, q.GetValue()) } // Overwrite label model.MetricNameLabel for count and sum metrics // Add Summary sum timeserie labels[model.MetricNameLabel] = metricName + sumStr - makeTimeserie(wr, labels, timestamp, m.GetSummary().GetSampleSum()) + toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum()) // Add Summary count timeserie labels[model.MetricNameLabel] = metricName + countStr - makeTimeserie(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) + toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) case m.Histogram != nil: metricName := labels[model.MetricNameLabel] @@ -133,18 +133,18 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me for _, b := range m.GetHistogram().Bucket { bucketLabels[model.MetricNameLabel] = metricName + bucketStr bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound()) - makeTimeserie(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) + toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) } // Overwrite label model.MetricNameLabel for count and sum metrics // Add Histogram sum timeserie labels[model.MetricNameLabel] = metricName + sumStr - makeTimeserie(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) + toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) // Add Histogram count timeserie labels[model.MetricNameLabel] = metricName + countStr - makeTimeserie(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) + toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) case m.Untyped != nil: - makeTimeserie(wr, labels, timestamp, m.GetUntyped().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetUntyped().GetValue()) default: err = errors.New("unsupported metric type") } @@ -208,5 +208,5 @@ func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prom if err != nil { return nil, err } - return FormatMetrics(mf, labels) + return CreateWriteRequest(mf, labels) } From 8bc2a19469119cccf51dd067efdb413e1150ca46 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Mon, 19 Jun 2023 17:40:15 +0200 Subject: [PATCH 63/93] web: Initialize requestCounter metrics to 0 with handler and 200k labels. Signed-off-by: Matthias Loibl --- web/web.go | 1 + 1 file changed, 1 insertion(+) diff --git a/web/web.go b/web/web.go index b9af2819b7..ddb5430eda 100644 --- a/web/web.go +++ b/web/web.go @@ -158,6 +158,7 @@ func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName st } func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { + m.requestCounter.WithLabelValues(handlerName, "200").Add(0) return promhttp.InstrumentHandlerCounter( m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}), promhttp.InstrumentHandlerDuration( From d19fa62476ada4ec6eb4399c93d0e11cca2aece9 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 20 Jun 2023 13:39:32 +0200 Subject: [PATCH 64/93] Revert "Improving Performance on the API Gzip Handler (#12363)" This reverts commit dfae954dc1137568f33564e8cffda321f2867925. Signed-off-by: Julien Pivotto --- go.mod | 1 - go.sum | 2 - util/httputil/compression.go | 74 +++++++++++-------- util/httputil/compression_test.go | 115 +++--------------------------- 4 files changed, 52 insertions(+), 140 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333d..9e4ab7ce95 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,6 @@ require ( github.com/hetznercloud/hcloud-go v1.45.1 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.16.5 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.17.0 github.com/miekg/dns v1.1.54 diff --git a/go.sum b/go.sum index 774deb85be..897e6acff6 100644 --- a/go.sum +++ b/go.sum @@ -507,8 +507,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/util/httputil/compression.go b/util/httputil/compression.go index 5e9276958d..b96c088cbb 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -14,11 +14,11 @@ package httputil import ( + "compress/gzip" + "compress/zlib" + "io" "net/http" "strings" - - "github.com/klauspost/compress/gzhttp" - "github.com/klauspost/compress/zlib" ) const ( @@ -28,27 +28,53 @@ const ( deflateEncoding = "deflate" ) -// Wrapper around http.ResponseWriter which adds deflate compression -type deflatedResponseWriter struct { +// Wrapper around http.Handler which adds suitable response compression based +// on the client's Accept-Encoding headers. +type compressedResponseWriter struct { http.ResponseWriter - writer *zlib.Writer + writer io.Writer } // Writes HTTP response content data. -func (c *deflatedResponseWriter) Write(p []byte) (int, error) { +func (c *compressedResponseWriter) Write(p []byte) (int, error) { return c.writer.Write(p) } -// Close Closes the deflatedResponseWriter and ensures to flush all data before. -func (c *deflatedResponseWriter) Close() { - c.writer.Close() +// Closes the compressedResponseWriter and ensures to flush all data before. +func (c *compressedResponseWriter) Close() { + if zlibWriter, ok := c.writer.(*zlib.Writer); ok { + zlibWriter.Flush() + } + if gzipWriter, ok := c.writer.(*gzip.Writer); ok { + gzipWriter.Flush() + } + if closer, ok := c.writer.(io.Closer); ok { + defer closer.Close() + } } -// Constructs a new deflatedResponseWriter to compress the original writer using 'deflate' compression. -func newDeflateResponseWriter(writer http.ResponseWriter) *deflatedResponseWriter { - return &deflatedResponseWriter{ +// Constructs a new compressedResponseWriter based on client request headers. +func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { + encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") + for _, encoding := range encodings { + switch strings.TrimSpace(encoding) { + case gzipEncoding: + writer.Header().Set(contentEncodingHeader, gzipEncoding) + return &compressedResponseWriter{ + ResponseWriter: writer, + writer: gzip.NewWriter(writer), + } + case deflateEncoding: + writer.Header().Set(contentEncodingHeader, deflateEncoding) + return &compressedResponseWriter{ + ResponseWriter: writer, + writer: zlib.NewWriter(writer), + } + } + } + return &compressedResponseWriter{ ResponseWriter: writer, - writer: zlib.NewWriter(writer), + writer: writer, } } @@ -60,21 +86,7 @@ type CompressionHandler struct { // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { - switch strings.TrimSpace(encoding) { - case gzipEncoding: - gzhttp.GzipHandler(c.Handler).ServeHTTP(writer, req) - return - case deflateEncoding: - compWriter := newDeflateResponseWriter(writer) - writer.Header().Set(contentEncodingHeader, deflateEncoding) - c.Handler.ServeHTTP(compWriter, req) - compWriter.Close() - return - default: - c.Handler.ServeHTTP(writer, req) - return - } - } + compWriter := newCompressedResponseWriter(writer, req) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() } diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index b7148fc1cc..8512797613 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -17,30 +17,23 @@ import ( "bytes" "compress/gzip" "compress/zlib" - "encoding/json" - "fmt" "io" "net/http" "net/http/httptest" - "strings" "testing" "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/model/labels" ) var ( - mux *http.ServeMux - server *httptest.Server - respBody = strings.Repeat("Hello World!", 500) + mux *http.ServeMux + server *httptest.Server ) func setup() func() { mux = http.NewServeMux() server = httptest.NewServer(mux) return func() { - server.CloseClientConnections() server.Close() } } @@ -48,7 +41,7 @@ func setup() func() { func getCompressionHandlerFunc() CompressionHandler { hf := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(respBody)) + w.Write([]byte("Hello World!")) } return CompressionHandler{ Handler: http.HandlerFunc(hf), @@ -74,8 +67,9 @@ func TestCompressionHandler_PlainText(t *testing.T) { contents, err := io.ReadAll(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") + expected := "Hello World!" actual := string(contents) - require.Equal(t, respBody, actual, "expected response with content") + require.Equal(t, expected, actual, "expected response with content") } func TestCompressionHandler_Gzip(t *testing.T) { @@ -109,7 +103,8 @@ func TestCompressionHandler_Gzip(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - require.Equal(t, respBody, actual, "unexpected response content") + expected := "Hello World!" + require.Equal(t, expected, actual, "unexpected response content") } func TestCompressionHandler_Deflate(t *testing.T) { @@ -143,98 +138,6 @@ func TestCompressionHandler_Deflate(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - require.Equal(t, respBody, actual, "expected response with content") -} - -func Benchmark_compression(b *testing.B) { - client := &http.Client{ - Transport: &http.Transport{ - DisableCompression: true, - }, - } - - cases := map[string]struct { - enc string - numberOfLabels int - }{ - "gzip-10-labels": { - enc: gzipEncoding, - numberOfLabels: 10, - }, - "gzip-100-labels": { - enc: gzipEncoding, - numberOfLabels: 100, - }, - "gzip-1K-labels": { - enc: gzipEncoding, - numberOfLabels: 1000, - }, - "gzip-10K-labels": { - enc: gzipEncoding, - numberOfLabels: 10000, - }, - "gzip-100K-labels": { - enc: gzipEncoding, - numberOfLabels: 100000, - }, - "gzip-1M-labels": { - enc: gzipEncoding, - numberOfLabels: 1000000, - }, - } - - for name, tc := range cases { - b.Run(name, func(b *testing.B) { - tearDown := setup() - defer tearDown() - labels := labels.ScratchBuilder{} - - for i := 0; i < tc.numberOfLabels; i++ { - labels.Add(fmt.Sprintf("Name%v", i), fmt.Sprintf("Value%v", i)) - } - - respBody, err := json.Marshal(labels.Labels()) - require.NoError(b, err) - - hf := func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write(respBody) - } - h := CompressionHandler{ - Handler: http.HandlerFunc(hf), - } - - mux.Handle("/foo_endpoint", h) - - req, _ := http.NewRequest("GET", server.URL+"/foo_endpoint", nil) - req.Header.Set(acceptEncodingHeader, tc.enc) - - b.ReportAllocs() - b.ResetTimer() - - // Reusing the array to read the body and avoid allocation on the test - encRespBody := make([]byte, len(respBody)) - - for i := 0; i < b.N; i++ { - resp, err := client.Do(req) - - require.NoError(b, err) - - require.NoError(b, err, "client get failed with unexpected error") - responseBodySize := 0 - for { - n, err := resp.Body.Read(encRespBody) - responseBodySize += n - if err == io.EOF { - break - } - } - - b.ReportMetric(float64(responseBodySize), "ContentLength") - resp.Body.Close() - } - - client.CloseIdleConnections() - }) - } + expected := "Hello World!" + require.Equal(t, expected, actual, "expected response with content") } From c858049744a8cdf684a54cf67b8d16a41d69d81d Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 20 Jun 2023 17:13:02 +0200 Subject: [PATCH 65/93] Create 2.45.0-rc.1 (#12478) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 5 +++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 18 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b4ee3fc17..e4b61b13f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog + +## 2.45.0-rc.1 / 2023-06-20 + +* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12363 + ## 2.45.0-rc.0 / 2023-06-05 This release is a LTS (Long-Term Support) release of Prometheus and will diff --git a/VERSION b/VERSION index ae2ba732af..362f077e4f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.45.0-rc.0 +2.45.0-rc.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 6c404a6c46..f39187f9eb 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 389eb88f4f..1b6a1cebda 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f80d2fd6e4..a773df1478 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d8efd56810..6f75d6503f 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 87d08abe110406d27870d50ed4e1cfa5f8f12206 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Jun 2023 20:58:47 +0100 Subject: [PATCH 66/93] labels: faster Compare function when using -tags stringlabels (#12451) Instead of unpacking every individual string, we skip to the point where there is a difference, going 8 bytes at a time where possible. Add benchmark for Compare; extend tests too. --------- Signed-off-by: Bryan Boreham Co-authored-by: Oleg Zaytsev --- model/labels/labels_stringlabels.go | 68 +++++++++++++++----------- model/labels/labels_test.go | 76 +++++++++++++++++++++-------- 2 files changed, 96 insertions(+), 48 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index e232ca7a4d..eb98fd6fb7 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -422,37 +422,49 @@ func FromStrings(ss ...string) Labels { // Compare compares the two label sets. // The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -// TODO: replace with Less function - Compare is never needed. -// TODO: just compare the underlying strings when we don't need alphanumeric sorting. func Compare(a, b Labels) int { - l := len(a.data) - if len(b.data) < l { - l = len(b.data) - } - - ia, ib := 0, 0 - for ia < l { - var aName, bName string - aName, ia = decodeString(a.data, ia) - bName, ib = decodeString(b.data, ib) - if aName != bName { - if aName < bName { - return -1 - } - return 1 - } - var aValue, bValue string - aValue, ia = decodeString(a.data, ia) - bValue, ib = decodeString(b.data, ib) - if aValue != bValue { - if aValue < bValue { - return -1 - } - return 1 + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break + } + } + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break } } - // If all labels so far were in common, the set with fewer labels comes first. - return len(a.data) - len(b.data) + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent := i + for i = 0; ; { + size, nextI := decodeSize(a.data, i) + if nextI+size > firstCharDifferent { + break + } + i = nextI + size + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 } // Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 108d8b0de0..f0f05734be 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -361,6 +361,18 @@ func TestLabels_Compare(t *testing.T) { "bbc", "222"), expected: -1, }, + { + compared: FromStrings( + "aaa", "111", + "bb", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbbb", "222"), + expected: -1, + }, { compared: FromStrings( "aaa", "111"), @@ -380,6 +392,10 @@ func TestLabels_Compare(t *testing.T) { "bbb", "222"), expected: 0, }, + { + compared: EmptyLabels(), + expected: 1, + }, } sign := func(a int) int { @@ -395,6 +411,8 @@ func TestLabels_Compare(t *testing.T) { for i, test := range tests { got := Compare(labels, test.compared) require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i) + got = Compare(test.compared, labels) + require.Equal(t, -sign(test.expected), sign(got), "unexpected comparison result for reverse test case %d", i) } } @@ -468,27 +486,34 @@ func BenchmarkLabels_Get(b *testing.B) { } } +var comparisonBenchmarkScenarios = []struct { + desc string + base, other Labels +}{ + { + "equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + }, + { + "not equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + }, + { + "different sizes", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value"), + }, + { + "lots", + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + }, +} + func BenchmarkLabels_Equals(b *testing.B) { - for _, scenario := range []struct { - desc string - base, other Labels - }{ - { - "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - }, - { - "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), - }, - { - "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), - }, - } { + for _, scenario := range comparisonBenchmarkScenarios { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { @@ -498,6 +523,17 @@ func BenchmarkLabels_Equals(b *testing.B) { } } +func BenchmarkLabels_Compare(b *testing.B) { + for _, scenario := range comparisonBenchmarkScenarios { + b.Run(scenario.desc, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Compare(scenario.base, scenario.other) + } + }) + } +} + func TestLabels_Copy(t *testing.T) { require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").Copy()) } From be0b82cf2ffbe5558eaff537b23c0eaed152c76d Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 20 Jun 2023 22:20:01 +0200 Subject: [PATCH 67/93] Fix PR typo in 2.45.0-rc.1 CHANGELOG (#12479) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4b61b13f1..946073e61e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ ## 2.45.0-rc.1 / 2023-06-20 -* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12363 +* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12476 ## 2.45.0-rc.0 / 2023-06-05 From 8ef767e396bf8445f009f945b0162fd71827f445 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Fri, 23 Jun 2023 17:01:52 +0200 Subject: [PATCH 68/93] Release 2.45.0 (#12486) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 7 +------ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 946073e61e..76b6e51b5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,7 @@ # Changelog -## 2.45.0-rc.1 / 2023-06-20 - -* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12476 - -## 2.45.0-rc.0 / 2023-06-05 +## 2.45.0 / 2023-06-23 This release is a LTS (Long-Term Support) release of Prometheus and will receive security, documentation and bugfix patches for at least 12 months. @@ -20,7 +16,6 @@ Please read more about our LTS release cycle at * [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031 * [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944 * [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055 -* [ENHANCEMENT] API: Improving Performance on the API Gzip Handler. #12363 * [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254 * [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297 * [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185 diff --git a/VERSION b/VERSION index 362f077e4f..e599014eab 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.45.0-rc.1 +2.45.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index f39187f9eb..c7fafb1337 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 1b6a1cebda..847a9610e3 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a773df1478..ac365d0d09 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.45.0-rc.1", + "version": "0.45.0", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 6f75d6503f..81eba150a0 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.45.0-rc.1", + "version": "0.45.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 02277bbe0dd2d89fd3fe5f508399d54da9c61827 Mon Sep 17 00:00:00 2001 From: Matt Harbison Date: Fri, 23 Jun 2023 16:37:04 -0400 Subject: [PATCH 69/93] Fix path handling in File-SD watcher to allow directory monitoring on Windows Previously, `d.paths` were normalized to backslashes on Windows, even when if the config file used Unix style. The end result meant always watching `./`, so changes for this config were always ignored: scrape_configs: - job_name: 'envmsc1' file_sd_configs: - files: - 'targets/envmsc1.d/*.yml' - 'targets/envmsc1.d/*.yaml' Additionally, unlike the other platforms, no warning was emitted on startup about not being able to install the watch if the directory didn't exist. Now it is logged. Signed-off-by: Matt Harbison --- discovery/file/file.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/file/file.go b/discovery/file/file.go index c45595c6dd..60b63350f5 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() { panic("no watcher configured") } for _, p := range d.paths { - if idx := strings.LastIndex(p, "/"); idx > -1 { - p = p[:idx] + if dir, _ := filepath.Split(p); dir != "" { + p = dir } else { p = "./" } From e1115ae58d069b3f7fd19ffc6b635a6c98882148 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 26 Jun 2023 18:35:22 +0100 Subject: [PATCH 70/93] labels: improve Get method for stringlabels build (#12485) Inline one call to `decodeString`, and skip decoding the value string until we find a match for the name. Do a quick check on the first character in each string, and exit early if we've gone past - labels are sorted in order. Also improve tests and benchmark: * labels: test Get with varying lengths - it's not typical for Prometheus labels to all be the same length. * extend benchmark with label not found --------- Signed-off-by: Bryan Boreham --- model/labels/labels_stringlabels.go | 24 +++++++++++++++++++----- model/labels/labels_test.go | 6 ++++-- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index eb98fd6fb7..223aa6ebf7 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -273,13 +273,27 @@ func (ls Labels) Copy() Labels { // Get returns the value for the label with the given name. // Returns an empty string if the label doesn't exist. func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - if lName == name { - return lValue + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return "" } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index f0f05734be..d91be27cbc 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -443,7 +443,8 @@ func TestLabels_Has(t *testing.T) { func TestLabels_Get(t *testing.T) { require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) - require.Equal(t, "111", FromStrings("aaa", "111", "bbb", "222").Get("aaa")) + require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) + require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation @@ -463,7 +464,7 @@ func BenchmarkLabels_Get(b *testing.B) { maxLabels := 30 allLabels := make([]Label, maxLabels) for i := 0; i < maxLabels; i++ { - allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)} + allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))} } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { @@ -474,6 +475,7 @@ func BenchmarkLabels_Get(b *testing.B) { {"get first label", allLabels[0].Name}, {"get middle label", allLabels[size/2].Name}, {"get last label", allLabels[size-1].Name}, + {"get not-found label", "benchmark"}, } { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() From d78661ba108f7771e196d69dea00711d51a57893 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 27 Jun 2023 08:58:52 +0200 Subject: [PATCH 71/93] Stepping up as 2.46 release shepherd (#12494) Signed-off-by: Julien Pivotto --- RELEASE.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index f5c907fe99..0d0918191b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -50,7 +50,9 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | | v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | -| v2.46 | 2023-07-12 | **searching for volunteer** | +| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | +| v2.47 | 2023-08-23 | **searching for volunteer** | +| v2.48 | 2023-10-04 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 58d38c4c5630fb73afb5403d03ede884ad7fd91c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 27 Jun 2023 09:30:39 +0200 Subject: [PATCH 72/93] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/metrics.go | 2 +- util/fmtutil/format.go | 33 ++++++++++++--------------------- util/fmtutil/format_test.go | 10 +++++----- 3 files changed, 18 insertions(+), 27 deletions(-) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 4a6fafd407..2bc2237e2f 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -102,7 +102,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin } func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 5cff0516b9..9034a90fa7 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -44,8 +44,18 @@ var MetricMetadataTypeValue = map[string]int32{ "STATESET": 7, } -// CreateWriteRequest convert metric family to a writerequest. -func CreateWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { +// MetricTextToWriteRequest consumes an io.Reader and return the data in write request format. +func MetricTextToWriteRequest(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return MetricFamiliesToWriteRequest(mf, labels) +} + +// MetricFamiliesToWriteRequest convert metric family to a writerequest. +func MetricFamiliesToWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -191,22 +201,3 @@ func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]stri return labels } - -// ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. -func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { - var parser expfmt.TextParser - mf, err := parser.TextToMetricFamilies(input) - if err != nil { - return nil, err - } - return mf, nil -} - -// ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. -func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { - mf, err := ParseMetricsTextReader(input) - if err != nil { - return nil, err - } - return CreateWriteRequest(mf, labels) -} diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 5c1ab5bde0..0f052f5e75 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -201,13 +201,13 @@ func TestParseAndPushMetricsTextAndFormat(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - expected, err := ParseMetricsTextAndFormat(input, labels) + expected, err := MetricTextToWriteRequest(input, labels) require.NoError(t, err) require.Equal(t, writeRequestFixture, expected) } -func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { +func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { input := bytes.NewReader([]byte(` # HELP http_requests_total The total number of HTTP requests. # TYPE http_requests_total counter @@ -216,11 +216,11 @@ func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - _, err := ParseMetricsTextAndFormat(input, labels) + _, err := MetricTextToWriteRequest(input, labels) require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") } -func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { +func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { input := bytes.NewReader([]byte(` # HELP node_info node info summary. # TYPE node_info info @@ -228,6 +228,6 @@ func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - _, err := ParseMetricsTextAndFormat(input, labels) + _, err := MetricTextToWriteRequest(input, labels) require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") } From 2c6b9c4cd4b7ce8232eaefaf6b1dbdccd6aeb130 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 11:14:27 +0000 Subject: [PATCH 73/93] build(deps): bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.0 to 1.11.2. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Changelog](https://github.com/influxdata/influxdb/blob/master/CHANGELOG_OLD.md) - [Commits](https://github.com/influxdata/influxdb/commits) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c0d4331968..2972854f13 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.0 + github.com/influxdata/influxdb v1.11.2 github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v0.44.0 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index e0eac05c10..678ddfe2c1 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -118,8 +118,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpX github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= -github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA= +github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= +github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= From c18f5b64872429384ef0170bf8a39f1d809b7337 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Thu, 30 Mar 2023 11:04:37 +0200 Subject: [PATCH 74/93] Update golangci-lint * Update golangci-lint to v1.53.3. * Update the sync script handler for the old golanci-lint action. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 2 +- Makefile.common | 2 +- scripts/golangci-lint.yml | 32 ++++++++++++++++++++++++++++++++ scripts/sync_repo_files.sh | 23 +++++++++++++++-------- 4 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 scripts/golangci-lint.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bfa1ac00d0..c0bccd0efd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,7 +150,7 @@ jobs: uses: golangci/golangci-lint-action@v3.4.0 with: args: --verbose - version: v1.51.2 + version: v1.53.3 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/Makefile.common b/Makefile.common index e372d34738..787feff089 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.51.2 +GOLANGCI_LINT_VERSION ?= v1.53.3 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml new file mode 100644 index 0000000000..433f71b885 --- /dev/null +++ b/scripts/golangci-lint.yml @@ -0,0 +1,32 @@ +--- +# This action is synced from https://github.com/prometheus/prometheus +name: golangci-lint +on: + push: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + pull_request: + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: 1.20.x + - name: Install snmp_exporter/generator dependencies + run: sudo apt-get update && sudo apt-get -y install libsnmp-dev + if: github.repository == 'prometheus/snmp_exporter' + - name: Lint + uses: golangci/golangci-lint-action@v3.4.0 + with: + version: v1.53.3 diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 3c987f07ee..5e32eb8832 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then fi # List of files that should be synced. -SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint .github/workflows/golangci-lint.yml" +SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml" # Go to the root of the repo cd "$(git rev-parse --show-cdup)" || exit 1 @@ -115,20 +115,23 @@ process_repo() { local needs_update=() for source_file in ${SYNC_FILES}; do source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)" - - target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${source_file}")" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then + echo "${org_repo} is not Go, skipping golangci-lint.yml." + continue + fi if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then echo "LICENSE in ${org_repo} is not apache, skipping." continue fi - if [[ "${source_file}" == '.github/workflows/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then - echo "${org_repo} is not Go, skipping .github/workflows/golangci-lint.yml." - continue + target_filename="${source_file}" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then + target_filename=".github/workflows/${source_file}" fi + target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${target_filename}")" if [[ -z "${target_file}" ]]; then echo "${source_file} doesn't exist in ${org_repo}" case "${source_file}" in - CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/golangci-lint.yml) + CODE_OF_CONDUCT.md | SECURITY.md) echo "${source_file} missing in ${org_repo}, force updating." needs_update+=("${source_file}") ;; @@ -159,8 +162,12 @@ process_repo() { # Update the files in target repo by one from prometheus/prometheus. for source_file in "${needs_update[@]}"; do + target_filename="${source_file}" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then + target_filename=".github/workflows/${source_file}" + fi case "${source_file}" in - *) cp -f "${source_dir}/${source_file}" "./${source_file}" ;; + *) cp -f "${source_dir}/${source_file}" "./${target_filename}" ;; esac done From 16c3cd35b1dfeafd4812d76db4cfa38f7c09ae67 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 4 Apr 2023 10:28:19 +0200 Subject: [PATCH 75/93] Disable revive unused-parameter Signed-off-by: SuperQ --- .golangci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index fc2721455c..128ad4f04a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -56,3 +56,9 @@ linters-settings: local-prefixes: github.com/prometheus/prometheus gofumpt: extra-rules: true + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: true From 4e9b89f88716d04c25ded01cc653052595073c73 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 27 Jun 2023 13:51:29 +0200 Subject: [PATCH 76/93] Update depguard config syntax. Signed-off-by: SuperQ --- .golangci.yml | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 128ad4f04a..4a6daae594 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -31,14 +31,19 @@ issues: linters-settings: depguard: - list-type: blacklist - include-go-root: true - packages-with-error-message: - - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic" - - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - - io/ioutil: "Use corresponding 'os' or 'io' functions instead." - - regexp: "Use github.com/grafana/regexp instead of regexp" + rules: + main: + deny: + - pkg: "sync/atomic" + desc: "Use go.uber.org/atomic instead of sync/atomic" + - pkg: "github.com/stretchr/testify/assert" + desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" + - pkg: "github.com/go-kit/kit/log" + desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + - pkg: "io/ioutil" + desc: "Use corresponding 'os' or 'io' functions instead." + - pkg: "regexp" + desc: "Use github.com/grafana/regexp instead of regexp" errcheck: exclude-functions: # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". From 686482ab34cf8cbbfdc88caa0edfd7cbdb8439b2 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Tue, 27 Jun 2023 18:10:38 +0200 Subject: [PATCH 77/93] Remove Add(0) Signed-off-by: Matthias Loibl --- web/web.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/web.go b/web/web.go index ddb5430eda..6a3eab3270 100644 --- a/web/web.go +++ b/web/web.go @@ -158,7 +158,7 @@ func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName st } func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { - m.requestCounter.WithLabelValues(handlerName, "200").Add(0) + m.requestCounter.WithLabelValues(handlerName, "200") return promhttp.InstrumentHandlerCounter( m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}), promhttp.InstrumentHandlerDuration( From 484a9e4071dbcd83c9bc7a878816723ce7114398 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Thu, 29 Jun 2023 18:28:13 +0800 Subject: [PATCH 78/93] fix some typos (#12498) Signed-off-by: cui fliter --- CHANGELOG.md | 2 +- web/ui/react-app/public/index.html | 2 +- web/ui/react-app/src/contexts/ThemeContext.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76b6e51b5f..d316e84d37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,7 +63,7 @@ improvements for testing. #10991 * [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487 * [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019 * [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098 -* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098 +* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098 * [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088 * [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897 * [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984 diff --git a/web/ui/react-app/public/index.html b/web/ui/react-app/public/index.html index 57131a26b6..9aeb3555e1 100755 --- a/web/ui/react-app/public/index.html +++ b/web/ui/react-app/public/index.html @@ -42,7 +42,7 @@ --> TITLE_PLACEHOLDER diff --git a/web/ui/react-app/src/contexts/ThemeContext.tsx b/web/ui/react-app/src/contexts/ThemeContext.tsx index 9ee84cf5ec..2f2ee70730 100644 --- a/web/ui/react-app/src/contexts/ThemeContext.tsx +++ b/web/ui/react-app/src/contexts/ThemeContext.tsx @@ -9,7 +9,7 @@ export interface ThemeCtx { setTheme: (t: themeSetting) => void; } -// defaults, will be overriden in App.tsx +// defaults, will be overridden in App.tsx export const ThemeContext = React.createContext({ theme: 'light', userPreference: 'auto', From 031d22df9e43f1c3014b8344337e133099902ae8 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 30 Jun 2023 14:59:59 +0200 Subject: [PATCH 79/93] Fix race condition in ChunkDiskMapper.Truncate() (#12500) * Fix race condition in ChunkDiskMapper.Truncate() Signed-off-by: Marco Pracucci * Added unit test Signed-off-by: Marco Pracucci * Update tsdb/chunks/head_chunks.go Co-authored-by: Ganesh Vernekar Signed-off-by: Marco Pracucci --------- Signed-off-by: Marco Pracucci Co-authored-by: Ganesh Vernekar --- tsdb/chunks/head_chunks.go | 20 +++++++++---- tsdb/chunks/head_chunks_test.go | 52 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index bcdab21254..d73eb36f87 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -948,12 +948,22 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { if len(chkFileIndices) == len(removedFiles) { // All files were deleted. Reset the current sequence. cdm.evtlPosMtx.Lock() - if err == nil { - cdm.evtlPos.setSeq(0) - } else { - // In case of error, set it to the last file number on the disk that was not deleted. - cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1])) + + // We can safely reset the sequence only if the write queue is empty. If it's not empty, + // then there may be a job in the queue that will create a new segment file with an ID + // generated before the sequence reset. + // + // The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid + // a race condition with WriteChunk(). + if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() { + if err == nil { + cdm.evtlPos.setSeq(0) + } else { + // In case of error, set it to the last file number on the disk that was not deleted. + cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1])) + } } + cdm.evtlPosMtx.Unlock() } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 20a4c2064b..68c1330881 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -19,7 +19,9 @@ import ( "math/rand" "os" "strconv" + "sync" "testing" + "time" "github.com/stretchr/testify/require" @@ -356,6 +358,56 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { verifyFiles([]int{5, 6, 7}) } +func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) { + hrw := createChunkDiskMapper(t, "") + t.Cleanup(func() { + require.NoError(t, hrw.Close()) + }) + + // This test should only run when the queue is enabled. + if hrw.writeQueue == nil { + t.Skip("This test should only run when the queue is enabled") + } + + // Add an artificial delay in the writeChunk function to easily trigger the race condition. + origWriteChunk := hrw.writeQueue.writeChunk + hrw.writeQueue.writeChunk = func(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error { + time.Sleep(100 * time.Millisecond) + return origWriteChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile) + } + + wg := sync.WaitGroup{} + wg.Add(2) + + // Write a chunk. Since the queue is enabled, the chunk will be written asynchronously (with the artificial delay). + ref := hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) { + defer wg.Done() + require.NoError(t, err) + }) + + seq, _ := ref.Unpack() + require.Equal(t, 1, seq) + + // Truncate, simulating that all chunks from segment files before 1 can be dropped. + require.NoError(t, hrw.Truncate(1)) + + // Request to cut a new file when writing the next chunk. If there's a race condition, cutting a new file will + // allow us to detect there's actually an issue with the sequence number (because it's checked when a new segment + // file is created). + hrw.CutNewFile() + + // Write another chunk. This will cut a new file. + ref = hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) { + defer wg.Done() + require.NoError(t, err) + }) + + seq, _ = ref.Unpack() + require.Equal(t, 2, seq) + + wg.Wait() +} + // TestHeadReadWriter_TruncateAfterIterateChunksError tests for // https://github.com/prometheus/prometheus/issues/7753 func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { From 1a8f06bb7a89bf2aec1505f459b8e9b15224fd21 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Sat, 1 Jul 2023 11:31:51 +0200 Subject: [PATCH 80/93] Update sync script. * Fix the path to the golanci-lint target filename. * Use the target filename when printing errors. * Put the yamllint filename back to previous name. Signed-off-by: SuperQ --- .yamllint.yml => .yamllint | 0 scripts/sync_repo_files.sh | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) rename .yamllint.yml => .yamllint (100%) diff --git a/.yamllint.yml b/.yamllint similarity index 100% rename from .yamllint.yml rename to .yamllint diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 5e32eb8832..d53ae8be7b 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -125,11 +125,11 @@ process_repo() { fi target_filename="${source_file}" if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then - target_filename=".github/workflows/${source_file}" + target_filename=".github/workflows/golangci-lint.yml" fi target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${target_filename}")" if [[ -z "${target_file}" ]]; then - echo "${source_file} doesn't exist in ${org_repo}" + echo "${target_filename} doesn't exist in ${org_repo}" case "${source_file}" in CODE_OF_CONDUCT.md | SECURITY.md) echo "${source_file} missing in ${org_repo}, force updating." @@ -164,7 +164,7 @@ process_repo() { for source_file in "${needs_update[@]}"; do target_filename="${source_file}" if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then - target_filename=".github/workflows/${source_file}" + target_filename=".github/workflows/golangci-lint.yml" fi case "${source_file}" in *) cp -f "${source_dir}/${source_file}" "./${target_filename}" ;; From 35069910f532b9078236177052425ae9605880c8 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Sat, 1 Jul 2023 14:29:59 +0200 Subject: [PATCH 81/93] Fix infinite loop in index Writer when a series contains duplicated label names Signed-off-by: Marco Pracucci --- tsdb/index/index.go | 5 ++++- tsdb/index/index_test.go | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 50a701d3a6..ef2d167dc8 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -864,7 +864,10 @@ func (w *Writer) writePostingsToTmpFiles() error { // using more memory than a single label name can. for len(names) > 0 { if w.labelNames[names[0]]+c > maxPostings { - break + if c > 0 { + break + } + return fmt.Errorf("corruption detected when writing postings to index: label %q has %d uses, but maxPostings is %d", names[0], w.labelNames[names[0]], maxPostings) } batchNames = append(batchNames, names[0]) c += w.labelNames[names[0]] diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index e4cee4a55d..a978ba186a 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -471,6 +471,21 @@ func TestPersistence_index_e2e(t *testing.T) { require.NoError(t, ir.Close()) } +func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) { + w, err := NewWriter(context.Background(), filepath.Join(t.TempDir(), "index")) + require.NoError(t, err) + + require.NoError(t, w.AddSymbol("__name__")) + require.NoError(t, w.AddSymbol("metric_1")) + require.NoError(t, w.AddSymbol("metric_2")) + + require.NoError(t, w.AddSeries(0, labels.FromStrings("__name__", "metric_1", "__name__", "metric_2"))) + + err = w.Close() + require.Error(t, err) + require.ErrorContains(t, err, "corruption detected when writing postings to index") +} + func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) From 81606762d2b62a1eb2a4419927108f948e8fdd8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:05:56 +0000 Subject: [PATCH 82/93] build(deps): bump github.com/linode/linodego from 1.17.0 to 1.17.2 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.17.0 to 1.17.2. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.17.0...v1.17.2) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc96..50fea81691 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.17.0 + github.com/linode/linodego v1.17.2 github.com/miekg/dns v1.1.54 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 diff --git a/go.sum b/go.sum index fe998a0006..e529c33f91 100644 --- a/go.sum +++ b/go.sum @@ -525,8 +525,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= -github.com/linode/linodego v1.17.0/go.mod h1:/omzPxie0/YI6S0sTw1q47qDt5IYSlbO/infRR4UG+A= +github.com/linode/linodego v1.17.2 h1:b32dj4662PGG5P9qVa6nBezccWdqgukndlMIuPGq1CQ= +github.com/linode/linodego v1.17.2/go.mod h1:C2iyT3Vg2O2sPxkWka4XAQ5WSUtm5LmTZ3Adw43Ra7Q= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= From aa75c60f9a52a8b8102b49b3edafa07d15856f5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:06:08 +0000 Subject: [PATCH 83/93] build(deps): bump github.com/hetznercloud/hcloud-go Bumps [github.com/hetznercloud/hcloud-go](https://github.com/hetznercloud/hcloud-go) from 1.45.1 to 1.47.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v1.45.1...v1.47.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc96..641c99185e 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.21.0 github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f - github.com/hetznercloud/hcloud-go v1.45.1 + github.com/hetznercloud/hcloud-go v1.47.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b @@ -43,7 +43,7 @@ require ( github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 github.com/prometheus/common/assets v0.2.0 @@ -170,7 +170,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go.sum b/go.sum index fe998a0006..b2aa9f2e33 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= -github.com/hetznercloud/hcloud-go v1.45.1/go.mod h1:aAUGxSfSnB8/lVXHNEDxtCT1jykaul8kqjD7f5KQXF8= +github.com/hetznercloud/hcloud-go v1.47.0 h1:WMZDwLPtMZwOLWIgERHrrrTzRFdHx0hTygYVQ4VWHW4= +github.com/hetznercloud/hcloud-go v1.47.0/go.mod h1:zSpmBnxIdb5oMdbpVg1Q977Cq2qiwERkjj3jqRbHH5U= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -659,8 +659,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -690,8 +690,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= From 4027ba3d67f230180c3758b02148e4970cbdf10c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:06:16 +0000 Subject: [PATCH 84/93] build(deps): bump google.golang.org/grpc from 1.55.0 to 1.56.1 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.55.0 to 1.56.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.55.0...v1.56.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc96..2dcb5ec841 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/digitalocean/godo v1.99.0 github.com/docker/docker v24.0.2+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.11.0 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f github.com/envoyproxy/protoc-gen-validate v1.0.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 @@ -70,8 +70,8 @@ require ( golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 - google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 - google.golang.org/grpc v1.55.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.56.1 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -96,7 +96,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -108,7 +108,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect diff --git a/go.sum b/go.sum index fe998a0006..d4edc92c56 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -138,8 +138,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -185,8 +185,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= @@ -1161,8 +1161,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1184,8 +1184,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 1a355ded01af32fc6d278a17678c5ed4ba6d2c5e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:10:44 +0000 Subject: [PATCH 85/93] build(deps): bump golangci/golangci-lint-action from 3.4.0 to 3.6.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.4.0 to 3.6.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3.4.0...v3.6.0) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0bccd0efd..e8b61bcadb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -147,7 +147,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@v3.4.0 + uses: golangci/golangci-lint-action@v3.6.0 with: args: --verbose version: v1.53.3 From 5735373c9b97aa5aa47b9bab7eeb77a26bfeb7fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:55:45 +0000 Subject: [PATCH 86/93] build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.44.0 to 0.45.0. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.44.0...v0.45.0) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 39 +++--- documentation/examples/remote_storage/go.sum | 123 +++++++++++-------- 2 files changed, 94 insertions(+), 68 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 196cad017d..0dfb897ec0 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,46 +8,55 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.15.0 - github.com/prometheus/common v0.42.0 - github.com/prometheus/prometheus v0.44.0 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.44.0 + github.com/prometheus/prometheus v0.45.0 github.com/stretchr/testify v1.8.4 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.245 // indirect + github.com/aws/aws-sdk-go v1.44.276 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/metric v0.37.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.2.1 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.7.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 657b542a76..79e1595c33 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,12 +1,20 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -19,8 +27,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= +github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -28,22 +36,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -59,8 +68,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -72,12 +81,11 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -100,11 +108,13 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -114,13 +124,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= -github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -146,14 +156,16 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -170,6 +182,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -180,19 +194,19 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -201,16 +215,17 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= -github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= +github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= +github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -223,16 +238,16 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -240,9 +255,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -263,12 +279,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -276,7 +292,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -292,15 +308,16 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -316,7 +333,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -325,8 +342,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -358,8 +375,8 @@ k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From 3f230fc9f894d76cccc2dc75ea11909004c28c94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 3 Jul 2023 15:56:06 +0300 Subject: [PATCH 87/93] promql: convert QueryOpts to interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert QueryOpts to an interface so that downstream projects like https://github.com/thanos-community/promql-engine could extend the query options with engine specific options that are not in the original engine. Will be used to enable query analysis per-query. Signed-off-by: Giedrius Statkevičius --- promql/engine.go | 43 ++++++++++++++++++++++++++++++++---------- promql/engine_test.go | 6 ++---- web/api/v1/api.go | 23 +++++++++++++--------- web/api/v1/api_test.go | 20 +++++++------------- 4 files changed, 56 insertions(+), 36 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index f29db3a647..83bbdeff89 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -130,11 +130,35 @@ type Query interface { String() string } -type QueryOpts struct { +type PrometheusQueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. - EnablePerStepStats bool + enablePerStepStats bool // Lookback delta duration for this query. - LookbackDelta time.Duration + lookbackDelta time.Duration +} + +var _ QueryOpts = &PrometheusQueryOpts{} + +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { + return &PrometheusQueryOpts{ + enablePerStepStats: enablePerStepStats, + lookbackDelta: lookbackDelta, + } +} + +func (p *PrometheusQueryOpts) EnablePerStepStats() bool { + return p.enablePerStepStats +} + +func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { + return p.lookbackDelta +} + +type QueryOpts interface { + // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. + EnablePerStepStats() bool + // Lookback delta duration for this query. + LookbackDelta() time.Duration } // query implements the Query interface. @@ -408,7 +432,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) finishQueue, err := ng.queueActive(ctx, qry) if err != nil { @@ -429,7 +453,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) finishQueue, err := ng.queueActive(ctx, qry) if err != nil { @@ -451,13 +475,12 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts * return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { - // Default to empty QueryOpts if not provided. +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = &QueryOpts{} + opts = NewPrometheusQueryOpts(false, 0) } - lookbackDelta := opts.LookbackDelta + lookbackDelta := opts.LookbackDelta() if lookbackDelta <= 0 { lookbackDelta = ng.lookbackDelta } @@ -473,7 +496,7 @@ func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, star stmt: es, ng: ng, stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), queryable: q, } return &es.Expr, qry diff --git a/promql/engine_test.go b/promql/engine_test.go index 72cbf91533..5ffebc202d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1198,7 +1198,7 @@ load 10s origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := &QueryOpts{EnablePerStepStats: true} + opts := NewPrometheusQueryOpts(true, 0) engine.maxSamplesPerQuery = origMaxSamples runQuery := func(expErr error) *stats.Statistics { @@ -4626,9 +4626,7 @@ metric 0 1 2 if c.engineLookback != 0 { eng.lookbackDelta = c.engineLookback } - opts := &QueryOpts{ - LookbackDelta: c.queryLookback, - } + opts := NewPrometheusQueryOpts(false, c.queryLookback) qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) require.NoError(t, err) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index ff339b09a7..43ceaa6bed 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -178,8 +178,13 @@ type TSDBAdminStats interface { // QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked. type QueryEngine interface { SetQueryLogger(l promql.QueryLogger) - NewInstantQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) - NewRangeQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) + NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) +} + +type QueryOpts interface { + EnablePerStepStats() bool + LookbackDelta() time.Duration } // API can register a set of endpoints in a router and handle @@ -462,18 +467,18 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } -func extractQueryOpts(r *http.Request) (*promql.QueryOpts, error) { - opts := &promql.QueryOpts{ - EnablePerStepStats: r.FormValue("stats") == "all", - } +func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { + var duration time.Duration + if strDuration := r.FormValue("lookback_delta"); strDuration != "" { - duration, err := parseDuration(strDuration) + parsedDuration, err := parseDuration(strDuration) if err != nil { return nil, fmt.Errorf("error parsing lookback delta duration: %w", err) } - opts.LookbackDelta = duration + duration = parsedDuration } - return opts, nil + + return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration), nil } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 16e74071c1..3aa10ee449 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3627,7 +3627,7 @@ func TestExtractQueryOpts(t *testing.T) { tests := []struct { name string form url.Values - expect *promql.QueryOpts + expect promql.QueryOpts err error }{ { @@ -3635,9 +3635,8 @@ func TestExtractQueryOpts(t *testing.T) { form: url.Values{ "stats": []string{"all"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: true, - }, + expect: promql.NewPrometheusQueryOpts(true, 0), + err: nil, }, { @@ -3645,10 +3644,8 @@ func TestExtractQueryOpts(t *testing.T) { form: url.Values{ "stats": []string{"none"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: false, - }, - err: nil, + expect: promql.NewPrometheusQueryOpts(false, 0), + err: nil, }, { name: "with lookback delta", @@ -3656,11 +3653,8 @@ func TestExtractQueryOpts(t *testing.T) { "stats": []string{"all"}, "lookback_delta": []string{"30s"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: true, - LookbackDelta: 30 * time.Second, - }, - err: nil, + expect: promql.NewPrometheusQueryOpts(true, 30*time.Second), + err: nil, }, { name: "with invalid lookback delta", From 68e59374741f104eab20a0a9eb0aaba8ea437cbc Mon Sep 17 00:00:00 2001 From: Patrick Oyarzun Date: Tue, 4 Jul 2023 04:37:58 -0500 Subject: [PATCH 88/93] Apply relevant label matchers in LabelValues before fetching extra postings (#12274) * Apply matchers when fetching label values Signed-off-by: Patrick Oyarzun * Avoid extra copying of label values Signed-off-by: Patrick Oyarzun --------- Signed-off-by: Patrick Oyarzun --- tsdb/querier.go | 20 ++++++++++++++++++++ tsdb/querier_bench_test.go | 1 + 2 files changed, 21 insertions(+) diff --git a/tsdb/querier.go b/tsdb/querier.go index 72b6b51414..98ee34e509 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -414,6 +414,26 @@ func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Mat if err != nil { return nil, errors.Wrapf(err, "fetching values of label %s", name) } + + // If we have a matcher for the label name, we can filter out values that don't match + // before we fetch postings. This is especially useful for labels with many values. + // e.g. __name__ with a selector like {__name__="xyz"} + for _, m := range matchers { + if m.Name != name { + continue + } + + // re-use the allValues slice to avoid allocations + // this is safe because the iteration is always ahead of the append + filteredValues := allValues[:0] + for _, v := range allValues { + if m.Matches(v) { + filteredValues = append(filteredValues, v) + } + } + allValues = filteredValues + } + valuesPostings := make([]index.Postings, len(allValues)) for i, value := range allValues { valuesPostings[i], err = r.Postings(name, value) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index c6deaeb44c..1657061fd9 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -188,6 +188,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { labelName string matchers []*labels.Matcher }{ + {`i with i="1"`, "i", []*labels.Matcher{i1}}, // i has 100k values. {`i with n="1"`, "i", []*labels.Matcher{n1}}, {`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}}, From e8d4466ef46d353b46b344724bedd5492cef8f76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 11:58:23 +0000 Subject: [PATCH 89/93] build(deps): bump bufbuild/buf-setup-action from 1.20.0 to 1.23.1 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.20.0 to 1.23.1. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.20.0...v1.23.1) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index a72837b79e..1b05a28693 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.20.0 + - uses: bufbuild/buf-setup-action@v1.23.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index edb7936d78..3a8d1d0402 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -10,7 +10,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.20.0 + - uses: bufbuild/buf-setup-action@v1.23.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 0f4c27e2bf4ba28707f96df18486165688341e79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Taavi=20V=C3=A4=C3=A4n=C3=A4nen?= Date: Fri, 30 Jun 2023 23:12:24 +0300 Subject: [PATCH 90/93] discovery/openstack: Include instance image ID in labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a new label to include the ID of the image that an instance is using. This can be used for example to filter a job to only include instances using a certain image as that image includes some exporter. Sometimes the image information isn't available, such as when the image is private and the user doesn't have the roles required to see it. In those cases we just don't set the label, as the rest of the information from the discovery provider can still be used. Signed-off-by: Taavi Väänänen --- discovery/openstack/instance.go | 11 +++++++++-- discovery/openstack/instance_test.go | 2 ++ docs/configuration/configuration.md | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 2f7e99a071..b2fe1e7870 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -36,6 +36,7 @@ const ( openstackLabelAddressPool = openstackLabelPrefix + "address_pool" openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" openstackLabelInstanceID = openstackLabelPrefix + "instance_id" + openstackLabelInstanceImage = openstackLabelPrefix + "instance_image" openstackLabelInstanceName = openstackLabelPrefix + "instance_name" openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" @@ -144,12 +145,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - id, ok := s.Flavor["id"].(string) + flavorId, ok := s.Flavor["id"].(string) if !ok { level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") continue } - labels[openstackLabelInstanceFlavor] = model.LabelValue(id) + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) + + imageId, ok := s.Image["id"].(string) + if ok { + labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + } + for k, v := range s.Metadata { name := strutil.SanitizeLabelName(k) labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d47cb0020e..d2da5d9681 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -73,6 +73,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.32:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("herp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.32"), @@ -85,6 +86,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.31:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("derp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.31"), diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b094bb4ecd..30bb07a8c1 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1276,6 +1276,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_address_pool`: the pool of the private IP. * `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. * `__meta_openstack_instance_id`: the OpenStack instance ID. +* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. * `__meta_openstack_instance_status`: the status of the OpenStack instance. * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. From 323ac8344490413eab79d5e85ea05245998c2a32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:43:15 +0000 Subject: [PATCH 91/93] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.2.1 to 1.3.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azidentity/v1.2.1...sdk/azcore/v1.3.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index b3ad4d41bd..48db762913 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 @@ -85,7 +85,7 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect diff --git a/go.sum b/go.sum index 9c0e3266a6..af10deb8b7 100644 --- a/go.sum +++ b/go.sum @@ -38,10 +38,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -66,8 +66,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= From ac32e19bccba790554bc4ee008cf54e0a53cfa3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:43:53 +0000 Subject: [PATCH 92/93] build(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.0 to 1.16.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.15.0...v1.16.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 4 ++-- documentation/examples/remote_storage/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 0dfb897ec0..b4c8e077d6 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.45.0 github.com/stretchr/testify v1.8.4 @@ -44,7 +44,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 79e1595c33..2ade3a1b7c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -194,8 +194,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -213,8 +213,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= From 4851ced266886b7aa5c9667bd163f6e517b0f784 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Tudur=C3=AD?= Date: Wed, 5 Jul 2023 11:44:13 +0200 Subject: [PATCH 93/93] tsdb: Support native histograms in snapshot on shutdown (#12258) Signed-off-by: Marc Tuduri --- tsdb/head_test.go | 63 +++++++++- tsdb/head_wal.go | 52 +++++--- tsdb/record/record.go | 286 ++++++++++++++++++++++-------------------- 3 files changed, 251 insertions(+), 150 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 8eb218b5ac..2828106204 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3230,8 +3230,12 @@ func TestChunkSnapshot(t *testing.T) { numSeries := 10 expSeries := make(map[string][]tsdbutil.Sample) + expHist := make(map[string][]tsdbutil.Sample) + expFloatHist := make(map[string][]tsdbutil.Sample) expTombstones := make(map[storage.SeriesRef]tombstones.Intervals) expExemplars := make([]ex, 0) + histograms := tsdbutil.GenerateTestGaugeHistograms(481) + floatHistogram := tsdbutil.GenerateTestGaugeFloatHistograms(481) addExemplar := func(app storage.Appender, ref storage.SeriesRef, lbls labels.Labels, ts int64) { e := ex{ @@ -3250,9 +3254,21 @@ func TestChunkSnapshot(t *testing.T) { checkSamples := func() { q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) require.NoError(t, err) - series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", ".*")) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) require.Equal(t, expSeries, series) } + checkHistograms := func() { + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "hist", "baz.*")) + require.Equal(t, expHist, series) + } + checkFloatHistograms := func() { + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "floathist", "bat.*")) + require.Equal(t, expFloatHist, series) + } checkTombstones := func() { tr, err := head.Tombstones() require.NoError(t, err) @@ -3301,6 +3317,8 @@ func TestChunkSnapshot(t *testing.T) { require.NoError(t, head.Init(math.MinInt64)) checkSamples() + checkHistograms() + checkFloatHistograms() checkTombstones() checkExemplars() } @@ -3311,6 +3329,11 @@ func TestChunkSnapshot(t *testing.T) { for i := 1; i <= numSeries; i++ { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i)) lblStr := lbls.String() + lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i)) + lblsHistStr := lblsHist.String() + lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i)) + lblsFloatHistStr := lblsFloatHist.String() + // 240 samples should m-map at least 1 chunk. for ts := int64(1); ts <= 240; ts++ { val := rand.Float64() @@ -3318,6 +3341,16 @@ func TestChunkSnapshot(t *testing.T) { ref, err := app.Append(0, lbls, ts, val) require.NoError(t, err) + hist := histograms[int(ts)] + expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil}) + _, err = app.AppendHistogram(0, lblsHist, ts, hist, nil) + require.NoError(t, err) + + floatHist := floatHistogram[int(ts)] + expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist}) + _, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist) + require.NoError(t, err) + // Add an exemplar and to create multiple WAL records. if ts%10 == 0 { addExemplar(app, ref, lbls, ts) @@ -3371,6 +3404,11 @@ func TestChunkSnapshot(t *testing.T) { for i := 1; i <= numSeries; i++ { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i)) lblStr := lbls.String() + lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i)) + lblsHistStr := lblsHist.String() + lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i)) + lblsFloatHistStr := lblsFloatHist.String() + // 240 samples should m-map at least 1 chunk. for ts := int64(241); ts <= 480; ts++ { val := rand.Float64() @@ -3378,6 +3416,16 @@ func TestChunkSnapshot(t *testing.T) { ref, err := app.Append(0, lbls, ts, val) require.NoError(t, err) + hist := histograms[int(ts)] + expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil}) + _, err = app.AppendHistogram(0, lblsHist, ts, hist, nil) + require.NoError(t, err) + + floatHist := floatHistogram[int(ts)] + expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist}) + _, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist) + require.NoError(t, err) + // Add an exemplar and to create multiple WAL records. if ts%10 == 0 { addExemplar(app, ref, lbls, ts) @@ -3468,6 +3516,19 @@ func TestSnapshotError(t *testing.T) { lbls := labels.FromStrings("foo", "bar") _, err := app.Append(0, lbls, 99, 99) require.NoError(t, err) + + // Add histograms + hist := tsdbutil.GenerateTestGaugeHistograms(1)[0] + floatHist := tsdbutil.GenerateTestGaugeFloatHistograms(1)[0] + lblsHist := labels.FromStrings("hist", "bar") + lblsFloatHist := labels.FromStrings("floathist", "bar") + + _, err = app.AppendHistogram(0, lblsHist, 99, hist, nil) + require.NoError(t, err) + + _, err = app.AppendHistogram(0, lblsFloatHist, 99, nil, floatHist) + require.NoError(t, err) + require.NoError(t, app.Commit()) // Add some tombstones. diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 71120c55e1..2fe33befba 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -943,10 +943,12 @@ const ( ) type chunkSnapshotRecord struct { - ref chunks.HeadSeriesRef - lset labels.Labels - mc *memChunk - lastValue float64 + ref chunks.HeadSeriesRef + lset labels.Labels + mc *memChunk + lastValue float64 + lastHistogramValue *histogram.Histogram + lastFloatHistogramValue *histogram.FloatHistogram } func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { @@ -961,18 +963,27 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { if s.headChunk == nil { buf.PutUvarint(0) } else { + enc := s.headChunk.chunk.Encoding() buf.PutUvarint(1) buf.PutBE64int64(s.headChunk.minTime) buf.PutBE64int64(s.headChunk.maxTime) - buf.PutByte(byte(s.headChunk.chunk.Encoding())) + buf.PutByte(byte(enc)) buf.PutUvarintBytes(s.headChunk.chunk.Bytes()) - // Backwards compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + + switch enc { + case chunkenc.EncXOR: + // Backwards compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + buf.PutBE64int64(0) + buf.PutBEFloat64(0) + } buf.PutBE64int64(0) - buf.PutBEFloat64(0) + buf.PutBEFloat64(s.lastValue) + case chunkenc.EncHistogram: + record.EncodeHistogram(&buf, s.lastHistogramValue) + default: // chunkenc.FloatHistogram. + record.EncodeFloatHistogram(&buf, s.lastFloatHistogramValue) } - buf.PutBE64int64(0) - buf.PutBEFloat64(s.lastValue) } s.Unlock() @@ -1012,13 +1023,22 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh } csr.mc.chunk = chk - // Backwards-compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + switch enc { + case chunkenc.EncXOR: + // Backwards-compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + _ = dec.Be64int64() + _ = dec.Be64Float64() + } _ = dec.Be64int64() - _ = dec.Be64Float64() + csr.lastValue = dec.Be64Float64() + case chunkenc.EncHistogram: + csr.lastHistogramValue = &histogram.Histogram{} + record.DecodeHistogram(&dec, csr.lastHistogramValue) + default: // chunkenc.FloatHistogram. + csr.lastFloatHistogramValue = &histogram.FloatHistogram{} + record.DecodeFloatHistogram(&dec, csr.lastFloatHistogramValue) } - _ = dec.Be64int64() - csr.lastValue = dec.Be64Float64() err = dec.Err() if err != nil && len(dec.B) > 0 { @@ -1396,6 +1416,8 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie series.nextAt = csr.mc.maxTime // This will create a new chunk on append. series.headChunk = csr.mc series.lastValue = csr.lastValue + series.lastHistogramValue = csr.lastHistogramValue + series.lastFloatHistogramValue = csr.lastFloatHistogramValue app, err := series.headChunk.chunk.Appender() if err != nil { diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 231b8b3c13..4cd51d46c0 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -441,59 +441,63 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) H: &histogram.Histogram{}, } - rh.H.CounterResetHint = histogram.CounterResetHint(dec.Byte()) + DecodeHistogram(&dec, rh.H) + histograms = append(histograms, rh) + } - rh.H.Schema = int32(dec.Varint64()) - rh.H.ZeroThreshold = math.Float64frombits(dec.Be64()) + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return histograms, nil +} - rh.H.ZeroCount = dec.Uvarint64() - rh.H.Count = dec.Uvarint64() - rh.H.Sum = math.Float64frombits(dec.Be64()) +// DecodeHistogram decodes a Histogram from a byte slice. +func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { + h.CounterResetHint = histogram.CounterResetHint(buf.Byte()) - l := dec.Uvarint() - if l > 0 { - rh.H.PositiveSpans = make([]histogram.Span, l) - } - for i := range rh.H.PositiveSpans { - rh.H.PositiveSpans[i].Offset = int32(dec.Varint64()) - rh.H.PositiveSpans[i].Length = dec.Uvarint32() - } + h.Schema = int32(buf.Varint64()) + h.ZeroThreshold = math.Float64frombits(buf.Be64()) - l = dec.Uvarint() - if l > 0 { - rh.H.NegativeSpans = make([]histogram.Span, l) - } - for i := range rh.H.NegativeSpans { - rh.H.NegativeSpans[i].Offset = int32(dec.Varint64()) - rh.H.NegativeSpans[i].Length = dec.Uvarint32() - } + h.ZeroCount = buf.Uvarint64() + h.Count = buf.Uvarint64() + h.Sum = math.Float64frombits(buf.Be64()) - l = dec.Uvarint() - if l > 0 { - rh.H.PositiveBuckets = make([]int64, l) - } - for i := range rh.H.PositiveBuckets { - rh.H.PositiveBuckets[i] = dec.Varint64() - } + l := buf.Uvarint() + if l > 0 { + h.PositiveSpans = make([]histogram.Span, l) + } + for i := range h.PositiveSpans { + h.PositiveSpans[i].Offset = int32(buf.Varint64()) + h.PositiveSpans[i].Length = buf.Uvarint32() + } - l = dec.Uvarint() - if l > 0 { - rh.H.NegativeBuckets = make([]int64, l) - } - for i := range rh.H.NegativeBuckets { - rh.H.NegativeBuckets[i] = dec.Varint64() - } + l = buf.Uvarint() + if l > 0 { + h.NegativeSpans = make([]histogram.Span, l) + } + for i := range h.NegativeSpans { + h.NegativeSpans[i].Offset = int32(buf.Varint64()) + h.NegativeSpans[i].Length = buf.Uvarint32() + } - histograms = append(histograms, rh) + l = buf.Uvarint() + if l > 0 { + h.PositiveBuckets = make([]int64, l) + } + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] = buf.Varint64() } - if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + l = buf.Uvarint() + if l > 0 { + h.NegativeBuckets = make([]int64, l) } - if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] = buf.Varint64() } - return histograms, nil } func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { @@ -519,59 +523,63 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr FH: &histogram.FloatHistogram{}, } - rh.FH.CounterResetHint = histogram.CounterResetHint(dec.Byte()) + DecodeFloatHistogram(&dec, rh.FH) + histograms = append(histograms, rh) + } - rh.FH.Schema = int32(dec.Varint64()) - rh.FH.ZeroThreshold = dec.Be64Float64() + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return histograms, nil +} - rh.FH.ZeroCount = dec.Be64Float64() - rh.FH.Count = dec.Be64Float64() - rh.FH.Sum = dec.Be64Float64() +// Decode decodes a Histogram from a byte slice. +func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { + fh.CounterResetHint = histogram.CounterResetHint(buf.Byte()) - l := dec.Uvarint() - if l > 0 { - rh.FH.PositiveSpans = make([]histogram.Span, l) - } - for i := range rh.FH.PositiveSpans { - rh.FH.PositiveSpans[i].Offset = int32(dec.Varint64()) - rh.FH.PositiveSpans[i].Length = dec.Uvarint32() - } + fh.Schema = int32(buf.Varint64()) + fh.ZeroThreshold = buf.Be64Float64() - l = dec.Uvarint() - if l > 0 { - rh.FH.NegativeSpans = make([]histogram.Span, l) - } - for i := range rh.FH.NegativeSpans { - rh.FH.NegativeSpans[i].Offset = int32(dec.Varint64()) - rh.FH.NegativeSpans[i].Length = dec.Uvarint32() - } + fh.ZeroCount = buf.Be64Float64() + fh.Count = buf.Be64Float64() + fh.Sum = buf.Be64Float64() - l = dec.Uvarint() - if l > 0 { - rh.FH.PositiveBuckets = make([]float64, l) - } - for i := range rh.FH.PositiveBuckets { - rh.FH.PositiveBuckets[i] = dec.Be64Float64() - } + l := buf.Uvarint() + if l > 0 { + fh.PositiveSpans = make([]histogram.Span, l) + } + for i := range fh.PositiveSpans { + fh.PositiveSpans[i].Offset = int32(buf.Varint64()) + fh.PositiveSpans[i].Length = buf.Uvarint32() + } - l = dec.Uvarint() - if l > 0 { - rh.FH.NegativeBuckets = make([]float64, l) - } - for i := range rh.FH.NegativeBuckets { - rh.FH.NegativeBuckets[i] = dec.Be64Float64() - } + l = buf.Uvarint() + if l > 0 { + fh.NegativeSpans = make([]histogram.Span, l) + } + for i := range fh.NegativeSpans { + fh.NegativeSpans[i].Offset = int32(buf.Varint64()) + fh.NegativeSpans[i].Length = buf.Uvarint32() + } - histograms = append(histograms, rh) + l = buf.Uvarint() + if l > 0 { + fh.PositiveBuckets = make([]float64, l) + } + for i := range fh.PositiveBuckets { + fh.PositiveBuckets[i] = buf.Be64Float64() } - if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + l = buf.Uvarint() + if l > 0 { + fh.NegativeBuckets = make([]float64, l) } - if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + for i := range fh.NegativeBuckets { + fh.NegativeBuckets[i] = buf.Be64Float64() } - return histograms, nil } // Encoder encodes series, sample, and tombstones records. @@ -719,39 +727,44 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) [] buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) - buf.PutByte(byte(h.H.CounterResetHint)) + EncodeHistogram(&buf, h.H) + } - buf.PutVarint64(int64(h.H.Schema)) - buf.PutBE64(math.Float64bits(h.H.ZeroThreshold)) + return buf.Get() +} - buf.PutUvarint64(h.H.ZeroCount) - buf.PutUvarint64(h.H.Count) - buf.PutBE64(math.Float64bits(h.H.Sum)) +// EncodeHistogram encodes a Histogram into a byte slice. +func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { + buf.PutByte(byte(h.CounterResetHint)) - buf.PutUvarint(len(h.H.PositiveSpans)) - for _, s := range h.H.PositiveSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } + buf.PutVarint64(int64(h.Schema)) + buf.PutBE64(math.Float64bits(h.ZeroThreshold)) - buf.PutUvarint(len(h.H.NegativeSpans)) - for _, s := range h.H.NegativeSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } + buf.PutUvarint64(h.ZeroCount) + buf.PutUvarint64(h.Count) + buf.PutBE64(math.Float64bits(h.Sum)) - buf.PutUvarint(len(h.H.PositiveBuckets)) - for _, b := range h.H.PositiveBuckets { - buf.PutVarint64(b) - } + buf.PutUvarint(len(h.PositiveSpans)) + for _, s := range h.PositiveSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } - buf.PutUvarint(len(h.H.NegativeBuckets)) - for _, b := range h.H.NegativeBuckets { - buf.PutVarint64(b) - } + buf.PutUvarint(len(h.NegativeSpans)) + for _, s := range h.NegativeSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) } - return buf.Get() + buf.PutUvarint(len(h.PositiveBuckets)) + for _, b := range h.PositiveBuckets { + buf.PutVarint64(b) + } + + buf.PutUvarint(len(h.NegativeBuckets)) + for _, b := range h.NegativeBuckets { + buf.PutVarint64(b) + } } func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { @@ -772,37 +785,42 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) - buf.PutByte(byte(h.FH.CounterResetHint)) + EncodeFloatHistogram(&buf, h.FH) + } + + return buf.Get() +} - buf.PutVarint64(int64(h.FH.Schema)) - buf.PutBEFloat64(h.FH.ZeroThreshold) +// Encode encodes the Float Histogram into a byte slice. +func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { + buf.PutByte(byte(h.CounterResetHint)) - buf.PutBEFloat64(h.FH.ZeroCount) - buf.PutBEFloat64(h.FH.Count) - buf.PutBEFloat64(h.FH.Sum) + buf.PutVarint64(int64(h.Schema)) + buf.PutBEFloat64(h.ZeroThreshold) - buf.PutUvarint(len(h.FH.PositiveSpans)) - for _, s := range h.FH.PositiveSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } + buf.PutBEFloat64(h.ZeroCount) + buf.PutBEFloat64(h.Count) + buf.PutBEFloat64(h.Sum) - buf.PutUvarint(len(h.FH.NegativeSpans)) - for _, s := range h.FH.NegativeSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } + buf.PutUvarint(len(h.PositiveSpans)) + for _, s := range h.PositiveSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } - buf.PutUvarint(len(h.FH.PositiveBuckets)) - for _, b := range h.FH.PositiveBuckets { - buf.PutBEFloat64(b) - } + buf.PutUvarint(len(h.NegativeSpans)) + for _, s := range h.NegativeSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } - buf.PutUvarint(len(h.FH.NegativeBuckets)) - for _, b := range h.FH.NegativeBuckets { - buf.PutBEFloat64(b) - } + buf.PutUvarint(len(h.PositiveBuckets)) + for _, b := range h.PositiveBuckets { + buf.PutBEFloat64(b) } - return buf.Get() + buf.PutUvarint(len(h.NegativeBuckets)) + for _, b := range h.NegativeBuckets { + buf.PutBEFloat64(b) + } }