diff --git a/CHANGELOG.md b/CHANGELOG.md index fba72a0e3548..6989204d9816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ Main (unreleased) - New Grafana Agent Flow components: - `otelcol.receiver.kafka` receives telemetry data from Kafka. (@rfratto) + - `phlare.scrape` collects application performance profiles. (@cyriltovena) + - `phlare.write` sends application performance profiles to Grafana Phlare. (@cyriltovena) ### Enhancements @@ -440,7 +442,6 @@ v0.25.1 (2022-06-16) - Unwrap replayWAL error before attempting corruption repair. (@rlankfo) - v0.25.0 (2022-06-06) -------------------- @@ -479,7 +480,7 @@ v0.25.0 (2022-06-06) - Enable `proxy_url` support on `oauth2` for metrics and logs (update **prometheus/common** dependency to `v0.33.0`). (@martin-jaeger-maersk) -- `extra-scrape-metrics` can now be enabled with the `--enable-features=extra-scrape-metrics` feature flag. See https://prometheus.io/docs/prometheus/2.31/feature_flags/#extra-scrape-metrics for details. (@rlankfo) +- `extra-scrape-metrics` can now be enabled with the `--enable-features=extra-scrape-metrics` feature flag. See for details. (@rlankfo) - Resolved issue in v2 integrations where if an instance name was a prefix of another the route handler would fail to match requests on the longer name (@mattdurham) @@ -895,7 +896,7 @@ v0.21.0 (2021-11-17) v0.20.1 (2021-12-08) -------------------- -> *NOTE*: The fixes in this patch are only present in v0.20.1 and >=v0.21.2. +> _NOTE_: The fixes in this patch are only present in v0.20.1 and >=v0.21.2. ### Security fixes @@ -994,8 +995,8 @@ v0.19.0 (2021-09-29) - Reduced verbosity of tracing autologging by not logging `STATUS_CODE_UNSET` status codes. (@mapno) -- Operator: rename Prometheus* CRDs to Metrics* and Prometheus* fields to - Metrics*. (@rfratto) +- Operator: rename `Prometheus*` CRDs to `Metrics*` and `Prometheus*` fields to + `Metrics*`. (@rfratto) - Operator: CRDs are no longer referenced using a hyphen in the name to be consistent with how Kubernetes refers to resources. (@rfratto) @@ -1254,7 +1255,7 @@ v0.14.0 (2021-05-24) ### Security fixes -* The Scraping service API will now reject configs that read credentials from +- The Scraping service API will now reject configs that read credentials from disk by default. This prevents malicious users from reading arbitrary files and sending their contents over the network. The old behavior can be re-enabled by setting `dangerous_allow_reading_files: true` in the scraping @@ -1262,7 +1263,7 @@ v0.14.0 (2021-05-24) ### Breaking changes -* Configuration for SigV4 has changed. (@rfratto) +- Configuration for SigV4 has changed. (@rfratto) ### Deprecations @@ -1415,9 +1416,9 @@ v0.12.0 (2021-02-05) ### Breaking Changes -* The configuration format for the `loki` block has changed. (@rfratto) +- The configuration format for the `loki` block has changed. (@rfratto) -* The configuration format for the `tempo` block has changed. (@rfratto) +- The configuration format for the `tempo` block has changed. (@rfratto) ### Features @@ -1601,7 +1602,7 @@ v0.8.0 (2020-11-06) ### Enhancements - Add `_build_info` metric to all integrations. The build - info displayed will match the build information of the Agent and *not* the + info displayed will match the build information of the Agent and _not_ the embedded exporter. This metric is used by community dashboards, so adding it to the Agent increases compatibility with existing dashboards that depend on it existing. (@rfratto) @@ -1980,7 +1981,7 @@ v0.1.0 (2020-03-16) ### Features -* Support for scraping Prometheus metrics and sharding the agent through the +- Support for scraping Prometheus metrics and sharding the agent through the presence of a `host_filter` flag within the Agent configuration file. [upgrade guide]: https://grafana.com/docs/agent/latest/upgrade-guide/ diff --git a/component/all/all.go b/component/all/all.go index 6a39c43c2573..38fffc47f0b3 100644 --- a/component/all/all.go +++ b/component/all/all.go @@ -23,6 +23,8 @@ import ( _ "github.com/grafana/agent/component/otelcol/receiver/kafka" // Import otelcol.receiver.kafka _ "github.com/grafana/agent/component/otelcol/receiver/otlp" // Import otelcol.receiver.otlp _ "github.com/grafana/agent/component/otelcol/receiver/prometheus" // Import otelcol.receiver.prometheus + _ "github.com/grafana/agent/component/phlare/scrape" // Import phlare.scrape + _ "github.com/grafana/agent/component/phlare/write" // Import phlare.write _ "github.com/grafana/agent/component/prometheus/integration/node_exporter" // Import prometheus.integration.node_exporter _ "github.com/grafana/agent/component/prometheus/relabel" // Import prometheus.relabel _ "github.com/grafana/agent/component/prometheus/remotewrite" // Import prometheus.remote_write diff --git a/component/phlare/appender.go b/component/phlare/appender.go new file mode 100644 index 000000000000..9bce62b8c010 --- /dev/null +++ b/component/phlare/appender.go @@ -0,0 +1,119 @@ +package phlare + +import ( + "context" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" +) + +var NoopAppendable = AppendableFunc(func(_ context.Context, _ labels.Labels, _ []*RawSample) error { return nil }) + +type Appendable interface { + Appender() Appender +} + +type Appender interface { + Append(ctx context.Context, labels labels.Labels, samples []*RawSample) error +} + +type RawSample struct { + // raw_profile is the set of bytes of the pprof profile + RawProfile []byte +} + +var _ Appendable = (*Fanout)(nil) + +// Fanout supports the default Flow style of appendables since it can go to multiple outputs. It also allows the intercepting of appends. +type Fanout struct { + mut sync.RWMutex + // children is where to fan out. + children []Appendable + // ComponentID is what component this belongs to. + componentID string + writeLatency prometheus.Histogram +} + +// NewFanout creates a fanout appendable. +func NewFanout(children []Appendable, componentID string, register prometheus.Registerer) *Fanout { + wl := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "phlare_fanout_latency", + Help: "Write latency for sending to phlare profiles", + }) + _ = register.Register(wl) + return &Fanout{ + children: children, + componentID: componentID, + writeLatency: wl, + } +} + +// UpdateChildren allows changing of the children of the fanout. +func (f *Fanout) UpdateChildren(children []Appendable) { + f.mut.Lock() + defer f.mut.Unlock() + f.children = children +} + +// Children returns the children of the fanout. +func (f *Fanout) Children() []Appendable { + f.mut.Lock() + defer f.mut.Unlock() + return f.children +} + +// Appender satisfies the Appendable interface. +func (f *Fanout) Appender() Appender { + f.mut.RLock() + defer f.mut.RUnlock() + + app := &appender{ + children: make([]Appender, 0), + componentID: f.componentID, + writeLatency: f.writeLatency, + } + for _, x := range f.children { + if x == nil { + continue + } + app.children = append(app.children, x.Appender()) + } + return app +} + +var _ Appender = (*appender)(nil) + +type appender struct { + children []Appender + componentID string + writeLatency prometheus.Histogram +} + +// Append satisfies the Appender interface. +func (a *appender) Append(ctx context.Context, labels labels.Labels, samples []*RawSample) error { + now := time.Now() + defer func() { + a.writeLatency.Observe(time.Since(now).Seconds()) + }() + var multiErr error + for _, x := range a.children { + err := x.Append(ctx, labels, samples) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + } + return multiErr +} + +type AppendableFunc func(ctx context.Context, labels labels.Labels, samples []*RawSample) error + +func (f AppendableFunc) Append(ctx context.Context, labels labels.Labels, samples []*RawSample) error { + return f(ctx, labels, samples) +} + +func (f AppendableFunc) Appender() Appender { + return f +} diff --git a/component/phlare/appender_test.go b/component/phlare/appender_test.go new file mode 100644 index 000000000000..65b251bf2fb1 --- /dev/null +++ b/component/phlare/appender_test.go @@ -0,0 +1,53 @@ +package phlare + +import ( + "context" + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func Test_FanOut(t *testing.T) { + totalAppend := atomic.NewInt32(0) + lbls := labels.Labels{ + {Name: "foo", Value: "bar"}, + } + f := NewFanout([]Appendable{ + AppendableFunc(func(_ context.Context, labels labels.Labels, _ []*RawSample) error { + require.Equal(t, lbls, labels) + totalAppend.Inc() + return nil + }), + AppendableFunc(func(_ context.Context, labels labels.Labels, _ []*RawSample) error { + require.Equal(t, lbls, labels) + totalAppend.Inc() + return nil + }), + AppendableFunc(func(_ context.Context, labels labels.Labels, _ []*RawSample) error { + require.Equal(t, lbls, labels) + totalAppend.Inc() + return nil + }), + }, "foo", prometheus.NewRegistry()) + require.NoError(t, f.Appender().Append(context.Background(), lbls, []*RawSample{})) + require.Equal(t, int32(3), totalAppend.Load()) + f.UpdateChildren([]Appendable{ + AppendableFunc(func(_ context.Context, labels labels.Labels, _ []*RawSample) error { + require.Equal(t, lbls, labels) + totalAppend.Inc() + return errors.New("foo") + }), + AppendableFunc(func(_ context.Context, labels labels.Labels, _ []*RawSample) error { + require.Equal(t, lbls, labels) + totalAppend.Inc() + return nil + }), + }) + totalAppend.Store(0) + require.Error(t, f.Appender().Append(context.Background(), lbls, []*RawSample{})) + require.Equal(t, int32(2), totalAppend.Load()) +} diff --git a/component/phlare/scrape/manager.go b/component/phlare/scrape/manager.go new file mode 100644 index 000000000000..1d99780dc760 --- /dev/null +++ b/component/phlare/scrape/manager.go @@ -0,0 +1,195 @@ +package scrape + +import ( + "errors" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/discovery/targetgroup" + + "github.com/grafana/agent/component/phlare" +) + +var reloadInterval = 5 * time.Second + +type Manager struct { + logger log.Logger + + graceShut chan struct{} + appendable phlare.Appendable + + mtxScrape sync.Mutex // Guards the fields below. + config Arguments + targetsGroups map[string]*scrapePool + targetSets map[string][]*targetgroup.Group + + triggerReload chan struct{} +} + +func NewManager(appendable phlare.Appendable, logger log.Logger) *Manager { + if logger == nil { + logger = log.NewNopLogger() + } + return &Manager{ + logger: logger, + appendable: appendable, + graceShut: make(chan struct{}), + triggerReload: make(chan struct{}, 1), + targetsGroups: make(map[string]*scrapePool), + targetSets: make(map[string][]*targetgroup.Group), + } +} + +// Run receives and saves target set updates and triggers the scraping loops reloading. +// Reloading happens in the background so that it doesn't block receiving targets updates. +func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + go m.reloader() + for { + select { + case ts := <-tsets: + m.updateTsets(ts) + + select { + case m.triggerReload <- struct{}{}: + default: + } + + case <-m.graceShut: + return + } + } +} + +func (m *Manager) reloader() { + ticker := time.NewTicker(reloadInterval) + + defer ticker.Stop() + + for { + select { + case <-m.graceShut: + return + case <-ticker.C: + select { + case <-m.triggerReload: + m.reload() + case <-m.graceShut: + return + } + } + } +} + +func (m *Manager) reload() { + m.mtxScrape.Lock() + var wg sync.WaitGroup + for setName, groups := range m.targetSets { + if _, ok := m.targetsGroups[setName]; !ok { + sp, err := newScrapePool(m.config, m.appendable, log.With(m.logger, "scrape_pool", setName)) + if err != nil { + level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + continue + } + m.targetsGroups[setName] = sp + } + + wg.Add(1) + // Run the sync in parallel as these take a while and at high load can't catch up. + go func(sp *scrapePool, groups []*targetgroup.Group) { + sp.sync(groups) + wg.Done() + }(m.targetsGroups[setName], groups) + } + m.mtxScrape.Unlock() + wg.Wait() +} + +// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg. +func (m *Manager) ApplyConfig(cfg Arguments) error { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + // Cleanup and reload pool if the configuration has changed. + var failed bool + m.config = cfg + + for name, sp := range m.targetsGroups { + err := sp.reload(cfg) + if err != nil { + level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + failed = true + } + } + + if failed { + return errors.New("failed to apply the new configuration") + } + return nil +} + +func (m *Manager) updateTsets(tsets map[string][]*targetgroup.Group) { + m.mtxScrape.Lock() + m.targetSets = tsets + m.mtxScrape.Unlock() +} + +// TargetsAll returns active and dropped targets grouped by job_name. +func (m *Manager) TargetsAll() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + targets := make(map[string][]*Target, len(m.targetsGroups)) + for tset, sp := range m.targetsGroups { + targets[tset] = append(sp.ActiveTargets(), sp.DroppedTargets()...) + } + return targets +} + +// TargetsActive returns the active targets currently being scraped. +func (m *Manager) TargetsActive() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + var ( + wg sync.WaitGroup + mtx sync.Mutex + ) + + targets := make(map[string][]*Target, len(m.targetsGroups)) + wg.Add(len(m.targetsGroups)) + for tset, sp := range m.targetsGroups { + // Running in parallel limits the blocking time of scrapePool to scrape + // interval when there's an update from SD. + go func(tset string, sp *scrapePool) { + mtx.Lock() + targets[tset] = sp.ActiveTargets() + mtx.Unlock() + wg.Done() + }(tset, sp) + } + wg.Wait() + return targets +} + +// TargetsDropped returns the dropped targets during relabelling. +func (m *Manager) TargetsDropped() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + targets := make(map[string][]*Target, len(m.targetsGroups)) + for tset, sp := range m.targetsGroups { + targets[tset] = sp.DroppedTargets() + } + return targets +} + +func (m *Manager) Stop() { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + for _, sp := range m.targetsGroups { + sp.stop() + } + close(m.graceShut) +} diff --git a/component/phlare/scrape/manager_test.go b/component/phlare/scrape/manager_test.go new file mode 100644 index 000000000000..bdb4dafadbbe --- /dev/null +++ b/component/phlare/scrape/manager_test.go @@ -0,0 +1,74 @@ +package scrape + +import ( + "context" + "testing" + "time" + + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/pkg/util" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestManager(t *testing.T) { + reloadInterval = time.Millisecond + + m := NewManager(phlare.AppendableFunc(func(ctx context.Context, labels labels.Labels, samples []*phlare.RawSample) error { + return nil + }), util.TestLogger(t)) + + defer m.Stop() + targetSetsChan := make(chan map[string][]*targetgroup.Group) + require.NoError(t, m.ApplyConfig(NewDefaultArguments())) + go m.Run(targetSetsChan) + + targetSetsChan <- map[string][]*targetgroup.Group{ + "group1": { + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + {model.AddressLabel: "localhost:8080"}, + }, + Labels: model.LabelSet{"foo": "bar"}, + }, + }, + } + require.Eventually(t, func() bool { + return len(m.TargetsActive()["group1"]) == 10 + }, time.Second, 10*time.Millisecond) + + new := NewDefaultArguments() + new.ScrapeInterval = 1 * time.Second + + // Trigger a config reload + require.NoError(t, m.ApplyConfig(new)) + + targetSetsChan <- map[string][]*targetgroup.Group{ + "group2": { + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + {model.AddressLabel: "localhost:8080"}, + }, + Labels: model.LabelSet{"foo": "bar"}, + }, + }, + } + + require.Eventually(t, func() bool { + return len(m.TargetsActive()["group2"]) == 10 + }, time.Second, 10*time.Millisecond) + + for _, ts := range m.targetsGroups { + require.Equal(t, 1*time.Second, ts.config.ScrapeInterval) + } + + targetSetsChan <- map[string][]*targetgroup.Group{"group1": {}, "group2": {}} + + require.Eventually(t, func() bool { + return len(m.TargetsAll()["group2"]) == 0 && len(m.TargetsAll()["group1"]) == 0 + }, time.Second, 10*time.Millisecond) +} diff --git a/component/phlare/scrape/scrape.go b/component/phlare/scrape/scrape.go new file mode 100644 index 000000000000..b1c5b8e9bf09 --- /dev/null +++ b/component/phlare/scrape/scrape.go @@ -0,0 +1,345 @@ +package scrape + +import ( + "context" + "fmt" + "net/url" + "sync" + "time" + + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + + "github.com/grafana/agent/component" + component_config "github.com/grafana/agent/component/common/config" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/component/prometheus/scrape" +) + +const ( + pprofMemory string = "memory" + pprofBlock string = "block" + pprofGoroutine string = "goroutine" + pprofMutex string = "mutex" + pprofProcessCPU string = "process_cpu" + pprofFgprof string = "fgprof" +) + +func init() { + component.Register(component.Registration{ + Name: "phlare.scrape", + Args: Arguments{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + return New(opts, args.(Arguments)) + }, + }) +} + +// Arguments holds values which are used to configure the pprof.scrape +// component. +type Arguments struct { + Targets []discovery.Target `river:"targets,attr"` + ForwardTo []phlare.Appendable `river:"forward_to,attr"` + + // The job name to override the job label with. + JobName string `river:"job_name,attr,optional"` + // A set of query parameters with which the target is scraped. + Params url.Values `river:"params,attr,optional"` + // How frequently to scrape the targets of this scrape config. + ScrapeInterval time.Duration `river:"scrape_interval,attr,optional"` + // The timeout for scraping targets of this config. + ScrapeTimeout time.Duration `river:"scrape_timeout,attr,optional"` + // The URL scheme with which to fetch metrics from targets. + Scheme string `river:"scheme,attr,optional"` + + // todo(ctovena): add support for limits. + // // An uncompressed response body larger than this many bytes will cause the + // // scrape to fail. 0 means no limit. + // BodySizeLimit units.Base2Bytes `river:"body_size_limit,attr,optional"` + // // More than this many targets after the target relabeling will cause the + // // scrapes to fail. + // TargetLimit uint `river:"target_limit,attr,optional"` + // // More than this many labels post metric-relabeling will cause the scrape + // // to fail. + // LabelLimit uint `river:"label_limit,attr,optional"` + // // More than this label name length post metric-relabeling will cause the + // // scrape to fail. + // LabelNameLengthLimit uint `river:"label_name_length_limit,attr,optional"` + // // More than this label value length post metric-relabeling will cause the + // // scrape to fail. + // LabelValueLengthLimit uint `river:"label_value_length_limit,attr,optional"` + + HTTPClientConfig component_config.HTTPClientConfig `river:"http_client_config,block,optional"` + + ProfilingConfig ProfilingConfig `river:"profiling_config,block,optional"` +} + +type ProfilingConfig struct { + Memory ProfilingTarget `river:"profile.memory,block,optional"` + Block ProfilingTarget `river:"profile.block,block,optional"` + Goroutine ProfilingTarget `river:"profile.goroutine,block,optional"` + Mutex ProfilingTarget `river:"profile.mutex,block,optional"` + ProcessCPU ProfilingTarget `river:"profile.process_cpu,block,optional"` + FGProf ProfilingTarget `river:"profile.fgprof,block,optional"` + Custom []CustomProfilingTarget `river:"profile.custom,block,optional"` + + PprofPrefix string `river:"path_prefix,attr,optional"` +} + +// AllTargets returns the set of all standard and custom profiling targets, +// regardless of whether they're enabled. The key in the map indicates the name +// of the target. +func (cfg ProfilingConfig) AllTargets() map[string]ProfilingTarget { + targets := map[string]ProfilingTarget{ + pprofMemory: cfg.Memory, + pprofBlock: cfg.Block, + pprofGoroutine: cfg.Goroutine, + pprofMutex: cfg.Mutex, + pprofProcessCPU: cfg.ProcessCPU, + pprofFgprof: cfg.FGProf, + } + + for _, custom := range cfg.Custom { + targets[custom.Name] = ProfilingTarget{ + Enabled: custom.Enabled, + Path: custom.Path, + Delta: custom.Delta, + } + } + + return targets +} + +var DefaultProfilingConfig = ProfilingConfig{ + Memory: ProfilingTarget{ + Enabled: true, + Path: "/debug/pprof/allocs", + }, + Block: ProfilingTarget{ + Enabled: true, + Path: "/debug/pprof/block", + }, + Goroutine: ProfilingTarget{ + Enabled: true, + Path: "/debug/pprof/goroutine", + }, + Mutex: ProfilingTarget{ + Enabled: true, + Path: "/debug/pprof/mutex", + }, + ProcessCPU: ProfilingTarget{ + Enabled: true, + Path: "/debug/pprof/profile", + Delta: true, + }, + FGProf: ProfilingTarget{ + Enabled: false, + Path: "/debug/fgprof", + Delta: true, + }, +} + +// UnmarshalRiver implements river.Unmarshaler and applies defaults before +// unmarshaling. +func (cfg *ProfilingConfig) UnmarshalRiver(f func(interface{}) error) error { + *cfg = DefaultProfilingConfig + + type args ProfilingConfig + if err := f((*args)(cfg)); err != nil { + return err + } + + return nil +} + +type ProfilingTarget struct { + Enabled bool `river:"enabled,attr,optional"` + Path string `river:"path,attr,optional"` + Delta bool `river:"delta,attr,optional"` +} + +type CustomProfilingTarget struct { + Enabled bool `river:"enabled,attr"` + Path string `river:"path,attr"` + Delta bool `river:"delta,attr,optional"` + Name string `river:",label"` +} + +var DefaultArguments = NewDefaultArguments() + +// NewDefaultArguments create the default settings for a scrape job. +func NewDefaultArguments() Arguments { + return Arguments{ + Scheme: "http", + HTTPClientConfig: component_config.DefaultHTTPClientConfig, + ScrapeInterval: 15 * time.Second, + ScrapeTimeout: 15*time.Second + (3 * time.Second), + ProfilingConfig: DefaultProfilingConfig, + } +} + +// UnmarshalRiver implements river.Unmarshaler. +func (arg *Arguments) UnmarshalRiver(f func(interface{}) error) error { + *arg = NewDefaultArguments() + + type args Arguments + if err := f((*args)(arg)); err != nil { + return err + } + + if arg.ScrapeTimeout <= 0 { + return fmt.Errorf("scrape_timeout must be greater than 0") + } + if arg.ScrapeTimeout <= arg.ScrapeInterval { + return fmt.Errorf("scrape_timeout must be greater than scrape_interval") + } + + if cfg, ok := arg.ProfilingConfig.ProcessCPU, true; ok { + if cfg.Enabled && arg.ScrapeTimeout < time.Second*2 { + return fmt.Errorf("%v scrape_timeout must be at least 2 seconds", pprofProcessCPU) + } + } + + return nil +} + +// Component implements the pprof.scrape component. +type Component struct { + opts component.Options + + reloadTargets chan struct{} + + mut sync.RWMutex + args Arguments + scraper *Manager + appendable *phlare.Fanout +} + +var _ component.Component = (*Component)(nil) + +// New creates a new pprof.scrape component. +func New(o component.Options, args Arguments) (*Component, error) { + flowAppendable := phlare.NewFanout(args.ForwardTo, o.ID, o.Registerer) + scraper := NewManager(flowAppendable, o.Logger) + c := &Component{ + opts: o, + reloadTargets: make(chan struct{}, 1), + scraper: scraper, + appendable: flowAppendable, + } + + // Call to Update() to set the receivers and targets once at the start. + if err := c.Update(args); err != nil { + return nil, err + } + + return c, nil +} + +// Run implements component.Component. +func (c *Component) Run(ctx context.Context) error { + defer c.scraper.Stop() + + targetSetsChan := make(chan map[string][]*targetgroup.Group) + + go func() { + c.scraper.Run(targetSetsChan) + level.Info(c.opts.Logger).Log("msg", "scrape manager stopped") + }() + + for { + select { + case <-ctx.Done(): + return nil + case <-c.reloadTargets: + c.mut.RLock() + var ( + tgs = c.args.Targets + jobName = c.opts.ID + ) + if c.args.JobName != "" { + jobName = c.args.JobName + } + c.mut.RUnlock() + promTargets := c.componentTargetsToProm(jobName, tgs) + + select { + case targetSetsChan <- promTargets: + level.Debug(c.opts.Logger).Log("msg", "passed new targets to scrape manager") + case <-ctx.Done(): + return nil + } + } + } +} + +// Update implements component.Component. +func (c *Component) Update(args component.Arguments) error { + newArgs := args.(Arguments) + + c.mut.Lock() + defer c.mut.Unlock() + c.args = newArgs + + c.appendable.UpdateChildren(newArgs.ForwardTo) + + err := c.scraper.ApplyConfig(newArgs) + if err != nil { + return fmt.Errorf("error applying scrape configs: %w", err) + } + level.Debug(c.opts.Logger).Log("msg", "scrape config was updated") + + select { + case c.reloadTargets <- struct{}{}: + default: + } + + return nil +} + +func (c *Component) componentTargetsToProm(jobName string, tgs []discovery.Target) map[string][]*targetgroup.Group { + promGroup := &targetgroup.Group{Source: jobName} + for _, tg := range tgs { + promGroup.Targets = append(promGroup.Targets, convertLabelSet(tg)) + } + + return map[string][]*targetgroup.Group{jobName: {promGroup}} +} + +func convertLabelSet(tg discovery.Target) model.LabelSet { + lset := make(model.LabelSet, len(tg)) + for k, v := range tg { + lset[model.LabelName(k)] = model.LabelValue(v) + } + return lset +} + +// DebugInfo implements component.DebugComponent +func (c *Component) DebugInfo() interface{} { + var res []scrape.TargetStatus + + for job, stt := range c.scraper.TargetsActive() { + for _, st := range stt { + var lastError string + if st.LastError() != nil { + lastError = st.LastError().Error() + } + if st != nil { + res = append(res, scrape.TargetStatus{ + JobName: job, + URL: st.URL().String(), + Health: string(st.Health()), + Labels: st.discoveredLabels.Map(), + LastError: lastError, + LastScrape: st.LastScrape(), + LastScrapeDuration: st.LastScrapeDuration(), + }) + } + } + } + + return scrape.ScraperStatus{TargetStatus: res} +} diff --git a/component/phlare/scrape/scrape_loop.go b/component/phlare/scrape/scrape_loop.go new file mode 100644 index 000000000000..67edf06f0367 --- /dev/null +++ b/component/phlare/scrape/scrape_loop.go @@ -0,0 +1,289 @@ +package scrape + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + commonconfig "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/pool" + "golang.org/x/net/context/ctxhttp" + + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/pkg/build" +) + +var ( + payloadBuffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + userAgentHeader = fmt.Sprintf("GrafanaAgent/%s", build.Version) +) + +type scrapePool struct { + config Arguments + + logger log.Logger + scrapeClient *http.Client + appendable phlare.Appendable + + mtx sync.RWMutex + activeTargets map[uint64]*scrapeLoop + droppedTargets []*Target +} + +func newScrapePool(cfg Arguments, appendable phlare.Appendable, logger log.Logger) (*scrapePool, error) { + scrapeClient, err := commonconfig.NewClientFromConfig(*cfg.HTTPClientConfig.Convert(), cfg.JobName) + if err != nil { + return nil, err + } + + return &scrapePool{ + config: cfg, + logger: logger, + scrapeClient: scrapeClient, + appendable: appendable, + activeTargets: map[uint64]*scrapeLoop{}, + }, nil +} + +func (tg *scrapePool) sync(groups []*targetgroup.Group) { + tg.mtx.Lock() + defer tg.mtx.Unlock() + + level.Info(tg.logger).Log("msg", "syncing target groups", "job", tg.config.JobName) + var actives []*Target + tg.droppedTargets = []*Target{} + for _, group := range groups { + targets, dropped, err := targetsFromGroup(group, tg.config) + if err != nil { + level.Error(tg.logger).Log("msg", "creating targets failed", "err", err) + continue + } + for _, t := range targets { + if t.Labels().Len() > 0 { + actives = append(actives, t) + } + } + tg.droppedTargets = append(tg.droppedTargets, dropped...) + } + + for _, t := range actives { + if _, ok := tg.activeTargets[t.hash()]; !ok { + loop := newScrapeLoop(t, tg.scrapeClient, tg.appendable, tg.config.ScrapeInterval, tg.config.ScrapeTimeout, tg.logger) + tg.activeTargets[t.hash()] = loop + loop.start() + } else { + tg.activeTargets[t.hash()].SetDiscoveredLabels(t.DiscoveredLabels()) + } + } + + // Removes inactive targets. +Outer: + for h, t := range tg.activeTargets { + for _, at := range actives { + if h == at.hash() { + continue Outer + } + } + t.stop() + delete(tg.activeTargets, h) + } +} + +func (tg *scrapePool) reload(cfg Arguments) error { + tg.mtx.Lock() + defer tg.mtx.Unlock() + tg.config = cfg + scrapeClient, err := commonconfig.NewClientFromConfig(*cfg.HTTPClientConfig.Convert(), cfg.JobName) + if err != nil { + return err + } + tg.scrapeClient = scrapeClient + for _, t := range tg.activeTargets { + t.reload(scrapeClient, cfg.ScrapeInterval, cfg.ScrapeTimeout) + } + return nil +} + +func (tg *scrapePool) stop() { + tg.mtx.Lock() + defer tg.mtx.Unlock() + + for _, t := range tg.activeTargets { + t.stop() + } +} + +func (tg *scrapePool) ActiveTargets() []*Target { + tg.mtx.RLock() + defer tg.mtx.RUnlock() + result := make([]*Target, 0, len(tg.activeTargets)) + for _, target := range tg.activeTargets { + result = append(result, target.Target) + } + return result +} + +func (tg *scrapePool) DroppedTargets() []*Target { + tg.mtx.RLock() + defer tg.mtx.RUnlock() + result := make([]*Target, 0, len(tg.droppedTargets)) + result = append(result, tg.droppedTargets...) + return result +} + +type scrapeLoop struct { + *Target + + lastScrapeSize int + + scrapeClient *http.Client + appendable phlare.Appendable + + req *http.Request + logger log.Logger + interval, timeout time.Duration + cancel context.CancelFunc + wg sync.WaitGroup +} + +func newScrapeLoop(t *Target, scrapeClient *http.Client, appendable phlare.Appendable, interval, timeout time.Duration, logger log.Logger) *scrapeLoop { + return &scrapeLoop{ + Target: t, + logger: logger, + scrapeClient: scrapeClient, + appendable: appendable, + interval: interval, + timeout: timeout, + } +} + +func (t *scrapeLoop) start() { + ctx, cancel := context.WithCancel(context.Background()) + t.cancel = cancel + t.wg.Add(1) + + go func() { + defer func() { + cancel() + t.wg.Done() + }() + select { + case <-time.After(t.offset(t.interval)): + case <-ctx.Done(): + return + } + ticker := time.NewTicker(t.interval) + defer ticker.Stop() + + tick := func() { + select { + case <-ctx.Done(): + return + case <-ticker.C: + } + } + for ; true; tick() { + if ctx.Err() != nil { + return + } + t.scrape(ctx) + } + }() +} + +func (t *scrapeLoop) scrape(ctx context.Context) { + var ( + start = time.Now() + b = payloadBuffers.Get(t.lastScrapeSize).([]byte) + buf = bytes.NewBuffer(b) + profileType string + scrapeCtx, cancel = context.WithTimeout(ctx, t.timeout) + ) + defer cancel() + + for _, l := range t.labels { + if l.Name == ProfileName { + profileType = l.Value + break + } + } + err := t.fetchProfile(scrapeCtx, profileType, buf) + t.mtx.Lock() + defer t.mtx.Unlock() + if err != nil { + level.Error(t.logger).Log("msg", "fetch profile failed", "target", t.Labels().String(), "err", err) + t.health = HealthBad + t.lastScrapeDuration = time.Since(start) + t.lastError = err + t.lastScrape = start + return + } + + b = buf.Bytes() + if len(b) > 0 { + t.lastScrapeSize = len(b) + } + t.health = HealthGood + t.lastScrapeDuration = time.Since(start) + t.lastError = nil + t.lastScrape = start + if err := t.appendable.Appender().Append(ctx, t.labels, []*phlare.RawSample{{RawProfile: b}}); err != nil { + level.Error(t.logger).Log("msg", "push failed", "labels", t.Labels().String(), "err", err) + } +} + +func (t *scrapeLoop) reload(scrapeClient *http.Client, interval, timeout time.Duration) { + t.stop() + t.scrapeClient = scrapeClient + t.interval = interval + t.timeout = timeout + t.start() +} + +func (t *scrapeLoop) fetchProfile(ctx context.Context, profileType string, buf io.Writer) error { + if t.req == nil { + req, err := http.NewRequest("GET", t.URL().String(), nil) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgentHeader) + + t.req = req + } + + level.Debug(t.logger).Log("msg", "scraping profile", "labels", t.Labels().String(), "url", t.req.URL.String()) + resp, err := ctxhttp.Do(ctx, t.scrapeClient, t.req) + if err != nil { + return err + } + defer resp.Body.Close() + + b, err := io.ReadAll(io.TeeReader(resp.Body, buf)) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + + if resp.StatusCode/100 != 2 { + if len(b) > 0 { + return fmt.Errorf("server returned HTTP status (%d) %v", resp.StatusCode, string(bytes.TrimSpace(b))) + } + return fmt.Errorf("server returned HTTP status (%d) %v", resp.StatusCode, resp.Status) + } + + if len(b) == 0 { + return fmt.Errorf("empty %s profile from %s", profileType, t.req.URL.String()) + } + return nil +} + +func (t *scrapeLoop) stop() { + t.cancel() + t.wg.Wait() +} diff --git a/component/phlare/scrape/scrape_loop_test.go b/component/phlare/scrape/scrape_loop_test.go new file mode 100644 index 000000000000..e9ae33d8d8e0 --- /dev/null +++ b/component/phlare/scrape/scrape_loop_test.go @@ -0,0 +1,197 @@ +package scrape + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "sort" + "strings" + "testing" + "time" + + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/pkg/util" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "go.uber.org/goleak" +) + +func TestScrapePool(t *testing.T) { + defer goleak.VerifyNone(t) + + args := NewDefaultArguments() + args.Targets = []discovery.Target{ + {"instance": "foo"}, + } + args.ProfilingConfig.Block.Enabled = false + args.ProfilingConfig.Goroutine.Enabled = false + args.ProfilingConfig.Memory.Enabled = false + + p, err := newScrapePool(args, phlare.AppendableFunc( + func(ctx context.Context, labels labels.Labels, samples []*phlare.RawSample) error { + return nil + }), + util.TestLogger(t)) + require.NoError(t, err) + + defer p.stop() + + for _, tt := range []struct { + name string + groups []*targetgroup.Group + expected []*Target + }{ + { + name: "no targets", + groups: []*targetgroup.Group{}, + expected: []*Target{}, + }, + { + name: "targets", + groups: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + {model.AddressLabel: "localhost:8080"}, + }, + Labels: model.LabelSet{"foo": "bar"}, + }, + }, + expected: []*Target{ + NewTarget( + labels.FromStrings("instance", "localhost:8080", "foo", "bar", model.AddressLabel, "localhost:8080", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + labels.FromStrings("foo", "bar", model.AddressLabel, "localhost:8080", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + url.Values{}, + ), + NewTarget( + labels.FromStrings("instance", "localhost:8080", "foo", "bar", model.AddressLabel, "localhost:8080", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + labels.FromStrings("foo", "bar", model.AddressLabel, "localhost:8080", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + url.Values{"seconds": []string{"15"}}, + ), + NewTarget( + labels.FromStrings("instance", "localhost:9090", "foo", "bar", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + labels.FromStrings("foo", "bar", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + url.Values{}, + ), + NewTarget( + labels.FromStrings("instance", "localhost:9090", "foo", "bar", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + labels.FromStrings("foo", "bar", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + url.Values{"seconds": []string{"15"}}, + ), + }, + }, + { + name: "Remove targets", + groups: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + }, + }, + }, + expected: []*Target{ + NewTarget( + labels.FromStrings("instance", "localhost:9090", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + labels.FromStrings(model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + url.Values{}, + ), + NewTarget( + labels.FromStrings("instance", "localhost:9090", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + labels.FromStrings(model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + url.Values{"seconds": []string{"15"}}, + ), + }, + }, + { + name: "Sync targets", + groups: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090", "__type__": "foo"}, + }, + }, + }, + expected: []*Target{ + NewTarget( + labels.FromStrings("instance", "localhost:9090", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + labels.FromStrings("__type__", "foo", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofMutex, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/mutex"), + url.Values{}, + ), + NewTarget( + labels.FromStrings("instance", "localhost:9090", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + labels.FromStrings("__type__", "foo", model.AddressLabel, "localhost:9090", model.MetricNameLabel, pprofProcessCPU, model.SchemeLabel, "http", ProfilePath, "/debug/pprof/profile"), + url.Values{"seconds": []string{"15"}}, + ), + }, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + p.sync(tt.groups) + actual := p.ActiveTargets() + sort.Sort(Targets(actual)) + sort.Sort(Targets(tt.expected)) + require.Equal(t, tt.expected, actual) + require.Empty(t, p.DroppedTargets()) + }) + } + + // reload the cfg + args.ScrapeTimeout = 1 * time.Second + args.ScrapeInterval = 2 * time.Second + p.reload(args) + for _, ta := range p.activeTargets { + require.Equal(t, 1*time.Second, ta.timeout) + require.Equal(t, 2*time.Second, ta.interval) + } +} + +func TestScrapeLoop(t *testing.T) { + defer goleak.VerifyNone(t) + down := atomic.NewBool(false) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if down.Load() { + w.WriteHeader(http.StatusInternalServerError) + } + w.Write([]byte("ok")) + })) + defer server.Close() + appendTotal := atomic.NewInt64(0) + + loop := newScrapeLoop( + NewTarget( + labels.FromStrings( + model.SchemeLabel, "http", + model.AddressLabel, strings.TrimPrefix(server.URL, "http://"), + ProfilePath, "/debug/pprof/profile", + ), labels.FromStrings(), url.Values{ + "seconds": []string{"1"}, + }), + server.Client(), + phlare.AppendableFunc(func(_ context.Context, labels labels.Labels, samples []*phlare.RawSample) error { + appendTotal.Inc() + require.Equal(t, []byte("ok"), samples[0].RawProfile) + return nil + }), + 200*time.Millisecond, 30*time.Second, util.TestLogger(t)) + defer loop.stop() + + require.Equal(t, HealthUnknown, loop.Health()) + loop.start() + require.Eventually(t, func() bool { return appendTotal.Load() > 3 }, time.Second, 100*time.Millisecond) + require.Equal(t, HealthGood, loop.Health()) + + down.Store(true) + require.Eventually(t, func() bool { + return HealthBad == loop.Health() + }, time.Second, 100*time.Millisecond) + + require.Error(t, loop.LastError()) + require.WithinDuration(t, time.Now(), loop.LastScrape(), 1*time.Second) + require.NotEmpty(t, loop.LastScrapeDuration()) +} diff --git a/component/phlare/scrape/scrape_test.go b/component/phlare/scrape/scrape_test.go new file mode 100644 index 000000000000..502a3216491c --- /dev/null +++ b/component/phlare/scrape/scrape_test.go @@ -0,0 +1,160 @@ +package scrape + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/component/prometheus/scrape" + "github.com/grafana/agent/pkg/river" + "github.com/grafana/agent/pkg/util" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" +) + +func TestComponent(t *testing.T) { + defer goleak.VerifyNone(t) + reloadInterval = 100 * time.Millisecond + arg := NewDefaultArguments() + arg.JobName = "test" + c, err := New(component.Options{ + Logger: util.TestLogger(t), + Registerer: prometheus.NewRegistry(), + }, arg) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + err := c.Run(ctx) + require.NoError(t, err) + }() + + // triger an update + require.Empty(t, c.appendable.Children()) + require.Empty(t, c.DebugInfo().(scrape.ScraperStatus).TargetStatus) + + arg.ForwardTo = []phlare.Appendable{phlare.NoopAppendable} + arg.Targets = []discovery.Target{ + { + model.AddressLabel: "foo", + }, + { + model.AddressLabel: "bar", + }, + } + c.Update(arg) + + require.Eventually(t, func() bool { + fmt.Println(c.DebugInfo().(scrape.ScraperStatus).TargetStatus) + return len(c.appendable.Children()) == 1 && len(c.DebugInfo().(scrape.ScraperStatus).TargetStatus) == 10 + }, 5*time.Second, 100*time.Millisecond) +} + +func TestUnmarshalConfig(t *testing.T) { + for name, tt := range map[string]struct { + in string + expected func() Arguments + expectedErr string + }{ + "default": { + in: ` + targets = [ + {"__address__" = "localhost:9090", "foo" = "bar"}, + ] + forward_to = null + `, + expected: func() Arguments { + r := NewDefaultArguments() + r.Targets = []discovery.Target{ + { + "__address__": "localhost:9090", + "foo": "bar", + }, + } + return r + }, + }, + "custom": { + in: ` + targets = [ + {"__address__" = "localhost:9090", "foo" = "bar"}, + {"__address__" = "localhost:8080", "foo" = "buzz"}, + ] + forward_to = null + profiling_config { + path_prefix = "v1/" + + profile.block { + enabled = false + } + + profile.custom "something" { + enabled = true + path = "/debug/fgprof" + delta = true + } + } + `, + expected: func() Arguments { + r := NewDefaultArguments() + r.Targets = []discovery.Target{ + { + "__address__": "localhost:9090", + "foo": "bar", + }, + { + "__address__": "localhost:8080", + "foo": "buzz", + }, + } + r.ProfilingConfig.Block.Enabled = false + r.ProfilingConfig.Custom = append(r.ProfilingConfig.Custom, CustomProfilingTarget{ + Enabled: true, + Path: "/debug/fgprof", + Delta: true, + Name: "something", + }) + r.ProfilingConfig.PprofPrefix = "v1/" + return r + }, + }, + "invalid cpu timeout": { + in: ` + targets = [] + forward_to = null + scrape_timeout = "1s" + scrape_interval = "0.5s" + `, + expectedErr: "process_cpu scrape_timeout must be at least 2 seconds", + }, + "invalid timeout/interval": { + in: ` + targets = [] + forward_to = null + scrape_timeout = "4s" + scrape_interval = "5s" + `, + expectedErr: "scrape_timeout must be greater than scrape_interval", + }, + } { + tt := tt + name := name + t.Run(name, func(t *testing.T) { + arg := Arguments{} + if tt.expectedErr != "" { + err := river.Unmarshal([]byte(tt.in), &arg) + require.Error(t, err) + require.Equal(t, tt.expectedErr, err.Error()) + return + } + require.NoError(t, river.Unmarshal([]byte(tt.in), &arg)) + require.Equal(t, tt.expected(), arg) + }) + } +} diff --git a/component/phlare/scrape/target.go b/component/phlare/scrape/target.go new file mode 100644 index 000000000000..18e0e2d4abe8 --- /dev/null +++ b/component/phlare/scrape/target.go @@ -0,0 +1,402 @@ +// Copyright 2022 The Parca Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "errors" + "fmt" + "hash/fnv" + "net" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +// TargetHealth describes the health state of a target. +type TargetHealth string + +// The possible health states of a target based on the last performed scrape. +const ( + HealthUnknown TargetHealth = "unknown" + HealthGood TargetHealth = "up" + HealthBad TargetHealth = "down" +) + +// Target refers to a singular HTTP or HTTPS endpoint. +type Target struct { + // Labels before any processing. + discoveredLabels labels.Labels + // Any labels that are added to this target and its metrics. + labels labels.Labels + // Additional URL parmeters that are part of the target URL. + params url.Values + + mtx sync.RWMutex + lastError error + lastScrape time.Time + lastScrapeDuration time.Duration + health TargetHealth +} + +// NewTarget creates a reasonably configured target for querying. +func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { + return &Target{ + labels: labels, + discoveredLabels: discoveredLabels, + params: params, + health: HealthUnknown, + } +} + +func (t *Target) String() string { + return t.URL().String() +} + +// hash returns an identifying hash for the target. +func (t *Target) hash() uint64 { + h := fnv.New64a() + _, _ = h.Write([]byte(fmt.Sprintf("%016d", t.Labels().Hash()))) + _, _ = h.Write([]byte(t.URL().String())) + return h.Sum64() +} + +// offset returns the time until the next scrape cycle for the target. +func (t *Target) offset(interval time.Duration) time.Duration { + now := time.Now().UnixNano() + + var ( + base = now % int64(interval) + offset = t.hash() % uint64(interval) + next = base + int64(offset) + ) + + if next > int64(interval) { + next -= int64(interval) + } + return time.Duration(next) +} + +// Params returns a copy of the set of all public params of the target. +func (t *Target) Params() url.Values { + q := make(url.Values, len(t.params)) + for k, values := range t.params { + q[k] = make([]string, len(values)) + copy(q[k], values) + } + return q +} + +// Labels returns a copy of the set of all public labels of the target. +func (t *Target) Labels() labels.Labels { + lset := make(labels.Labels, 0, len(t.labels)) + for _, l := range t.labels { + if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { + lset = append(lset, l) + } + } + return lset +} + +// DiscoveredLabels returns a copy of the target's labels before any processing. +func (t *Target) DiscoveredLabels() labels.Labels { + t.mtx.Lock() + defer t.mtx.Unlock() + lset := make(labels.Labels, len(t.discoveredLabels)) + copy(lset, t.discoveredLabels) + return lset +} + +// Clone returns a clone of the target. +func (t *Target) Clone() *Target { + return NewTarget( + t.Labels(), + t.DiscoveredLabels(), + t.Params(), + ) +} + +// SetDiscoveredLabels sets new DiscoveredLabels. +func (t *Target) SetDiscoveredLabels(l labels.Labels) { + t.mtx.Lock() + defer t.mtx.Unlock() + t.discoveredLabels = l +} + +// URL returns a copy of the target's URL. +func (t *Target) URL() *url.URL { + params := url.Values{} + + for k, v := range t.params { + params[k] = make([]string, len(v)) + copy(params[k], v) + } + for _, l := range t.labels { + if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) { + continue + } + ks := l.Name[len(model.ParamLabelPrefix):] + + if len(params[ks]) > 0 { + params[ks][0] = l.Value + } else { + params[ks] = []string{l.Value} + } + } + + return &url.URL{ + Scheme: t.labels.Get(model.SchemeLabel), + Host: t.labels.Get(model.AddressLabel), + Path: t.labels.Get(ProfilePath), + RawQuery: params.Encode(), + } +} + +// LastError returns the error encountered during the last scrape. +func (t *Target) LastError() error { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastError +} + +// LastScrape returns the time of the last scrape. +func (t *Target) LastScrape() time.Time { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrape +} + +// LastScrapeDuration returns how long the last scrape of the target took. +func (t *Target) LastScrapeDuration() time.Duration { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrapeDuration +} + +// Health returns the last known health state of the target. +func (t *Target) Health() TargetHealth { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.health +} + +// LabelsByProfiles returns the labels for a given ProfilingConfig. +func LabelsByProfiles(lset labels.Labels, c *ProfilingConfig) []labels.Labels { + res := []labels.Labels{} + add := func(profileType string, cfgs ...ProfilingTarget) { + for _, p := range cfgs { + if p.Enabled { + l := lset.Copy() + l = append(l, labels.Label{Name: ProfilePath, Value: p.Path}, labels.Label{Name: ProfileName, Value: profileType}) + res = append(res, l) + } + } + } + + for profilingType, profilingConfig := range c.AllTargets() { + add(profilingType, profilingConfig) + } + + return res +} + +// Targets is a sortable list of targets. +type Targets []*Target + +func (ts Targets) Len() int { return len(ts) } +func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() } +func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } + +const ( + ProfilePath = "__profile_path__" + ProfileName = "__name__" + ProfileTraceType = "trace" +) + +// populateLabels builds a label set from the given label set and scrape configuration. +// It returns a label set before relabeling was applied as the second return value. +// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. +func populateLabels(lset labels.Labels, cfg Arguments) (res, orig labels.Labels, err error) { + // Copy labels into the labelset for the target if they are not set already. + scrapeLabels := []labels.Label{ + {Name: model.JobLabel, Value: cfg.JobName}, + {Name: model.SchemeLabel, Value: cfg.Scheme}, + } + lb := labels.NewBuilder(lset) + + for _, l := range scrapeLabels { + if lv := lset.Get(l.Name); lv == "" { + lb.Set(l.Name, l.Value) + } + } + // Encode scrape query parameters as labels. + for k, v := range cfg.Params { + if len(v) > 0 { + lb.Set(model.ParamLabelPrefix+k, v[0]) + } + } + + preRelabelLabels := lb.Labels(nil) + // todo(ctovena): add relabeling after pprof discovery. + // lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...) + lset = relabel.Process(preRelabelLabels) + + // Check if the target was dropped. + if lset == nil { + return nil, preRelabelLabels, nil + } + if v := lset.Get(model.AddressLabel); v == "" { + return nil, nil, errors.New("no address") + } + + if v := lset.Get(model.AddressLabel); v == "" { + return nil, nil, fmt.Errorf("no address") + } + + lb = labels.NewBuilder(lset) + + // addPort checks whether we should add a default port to the address. + // If the address is not valid, we don't append a port either. + addPort := func(s string) bool { + // If we can split, a port exists and we don't have to add one. + if _, _, err := net.SplitHostPort(s); err == nil { + return false + } + // If adding a port makes it valid, the previous error + // was not due to an invalid address and we can append a port. + _, _, err := net.SplitHostPort(s + ":1234") + return err == nil + } + addr := lset.Get(model.AddressLabel) + // If it's an address with no trailing port, infer it based on the used scheme. + if addPort(addr) { + // Addresses reaching this point are already wrapped in [] if necessary. + switch lset.Get(model.SchemeLabel) { + case "http", "": + addr = addr + ":80" + case "https": + addr = addr + ":443" + default: + return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) + } + lb.Set(model.AddressLabel, addr) + } + + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { + return nil, nil, err + } + + // Meta labels are deleted after relabelling. Other internal labels propagate to + // the target which decides whether they will be part of their label set. + for _, l := range lset { + if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { + lb.Del(l.Name) + } + } + + // Default the instance label to the target address. + if v := lset.Get(model.InstanceLabel); v == "" { + lb.Set(model.InstanceLabel, addr) + } + + res = lb.Labels(nil) + for _, l := range res { + // Check label values are valid, drop the target if not. + if !model.LabelValue(l.Value).IsValid() { + return nil, nil, fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value) + } + } + + return res, lset, nil +} + +// targetsFromGroup builds targets based on the given TargetGroup and config. +func targetsFromGroup(group *targetgroup.Group, cfg Arguments) ([]*Target, []*Target, error) { + var ( + targets = make([]*Target, 0, len(group.Targets)) + droppedTargets = make([]*Target, 0, len(group.Targets)) + ) + + for i, tlset := range group.Targets { + lbls := make([]labels.Label, 0, len(tlset)+len(group.Labels)) + + for ln, lv := range tlset { + lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + } + for ln, lv := range group.Labels { + if _, ok := tlset[ln]; !ok { + lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + } + } + + lset := labels.New(lbls...) + lsets := LabelsByProfiles(lset, &cfg.ProfilingConfig) + + for _, lset := range lsets { + var profType string + for _, label := range lset { + if label.Name == ProfileName { + profType = label.Value + } + } + lbls, origLabels, err := populateLabels(lset, cfg) + if err != nil { + return nil, nil, fmt.Errorf("instance %d in group %s: %s", i, group, err) + } + // This is a dropped target, according to the current return behaviour of populateLabels + if lbls == nil && origLabels != nil { + // ensure we get the full url path for dropped targets + params := cfg.Params + if params == nil { + params = url.Values{} + } + lbls = append(lbls, labels.Label{Name: model.AddressLabel, Value: lset.Get(model.AddressLabel)}) + lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) + lbls = append(lbls, labels.Label{Name: ProfilePath, Value: lset.Get(ProfilePath)}) + // Encode scrape query parameters as labels. + for k, v := range cfg.Params { + if len(v) > 0 { + lbls = append(lbls, labels.Label{Name: model.ParamLabelPrefix + k, Value: v[0]}) + } + } + droppedTargets = append(droppedTargets, NewTarget(lbls, origLabels, params)) + continue + } + if lbls != nil || origLabels != nil { + params := cfg.Params + if params == nil { + params = url.Values{} + } + + if pcfg, found := cfg.ProfilingConfig.AllTargets()[profType]; found && pcfg.Delta { + params.Add("seconds", strconv.Itoa(int((cfg.ScrapeInterval)/time.Second))) + } + targets = append(targets, NewTarget(lbls, origLabels, params)) + } + } + } + + return targets, droppedTargets, nil +} diff --git a/component/phlare/scrape/target_test.go b/component/phlare/scrape/target_test.go new file mode 100644 index 000000000000..3c780158a7d0 --- /dev/null +++ b/component/phlare/scrape/target_test.go @@ -0,0 +1,67 @@ +package scrape + +import ( + "net/url" + "sort" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func Test_targetsFromGroup(t *testing.T) { + args := NewDefaultArguments() + args.ProfilingConfig.Block.Enabled = false + args.ProfilingConfig.Goroutine.Enabled = false + args.ProfilingConfig.Mutex.Enabled = false + + active, dropped, err := targetsFromGroup(&targetgroup.Group{ + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + }, + Labels: model.LabelSet{"foo": "bar"}, + }, args) + expected := []*Target{ + NewTarget( + labels.FromMap(map[string]string{ + model.AddressLabel: "localhost:9090", + model.MetricNameLabel: pprofMemory, + ProfilePath: "/debug/pprof/allocs", + model.SchemeLabel: "http", + "foo": "bar", + "instance": "localhost:9090", + }), + labels.FromMap(map[string]string{ + model.AddressLabel: "localhost:9090", + model.MetricNameLabel: pprofMemory, + ProfilePath: "/debug/pprof/allocs", + model.SchemeLabel: "http", + "foo": "bar", + }), + url.Values{}), + NewTarget( + labels.FromMap(map[string]string{ + model.AddressLabel: "localhost:9090", + model.MetricNameLabel: pprofProcessCPU, + ProfilePath: "/debug/pprof/profile", + model.SchemeLabel: "http", + "foo": "bar", + "instance": "localhost:9090", + }), + labels.FromMap(map[string]string{ + model.AddressLabel: "localhost:9090", + model.MetricNameLabel: pprofProcessCPU, + ProfilePath: "/debug/pprof/profile", + model.SchemeLabel: "http", + "foo": "bar", + }), + url.Values{"seconds": []string{"15"}}), + } + require.NoError(t, err) + sort.Sort(Targets(active)) + sort.Sort(Targets(expected)) + require.Equal(t, expected, active) + require.Empty(t, dropped) +} diff --git a/component/phlare/write/write.go b/component/phlare/write/write.go new file mode 100644 index 000000000000..7e6034ba96d2 --- /dev/null +++ b/component/phlare/write/write.go @@ -0,0 +1,261 @@ +package write + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/bufbuild/connect-go" + "github.com/go-kit/log/level" + "github.com/oklog/run" + commonconfig "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "go.uber.org/multierr" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/common/config" + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/pkg/build" + pushv1 "github.com/grafana/phlare/api/gen/proto/go/push/v1" + pushv1connect "github.com/grafana/phlare/api/gen/proto/go/push/v1/pushv1connect" + typesv1 "github.com/grafana/phlare/api/gen/proto/go/types/v1" +) + +var ( + userAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) + DefaultArguments = func() Arguments { + return Arguments{} + } + DefaultEndpointOptions = func() EndpointOptions { + return EndpointOptions{ + RemoteTimeout: 30 * time.Second, + } + } + _ component.Component = (*Component)(nil) +) + +func init() { + component.Register(component.Registration{ + Name: "phlare.write", + Args: Arguments{}, + Exports: Exports{}, + Build: func(o component.Options, c component.Arguments) (component.Component, error) { + return NewComponent(o, c.(Arguments)) + }, + }) +} + +// Arguments represents the input state of the phlare.write +// component. +type Arguments struct { + ExternalLabels map[string]string `river:"external_labels,attr,optional"` + Endpoints []*EndpointOptions `river:"endpoint,block,optional"` +} + +// UnmarshalRiver implements river.Unmarshaler. +func (rc *Arguments) UnmarshalRiver(f func(interface{}) error) error { + *rc = DefaultArguments() + + type config Arguments + return f((*config)(rc)) +} + +// EndpointOptions describes an individual location for where profiles +// should be delivered to using the Phlare push API. +type EndpointOptions struct { + Name string `river:"name,attr,optional"` + URL string `river:"url,attr"` + RemoteTimeout time.Duration `river:"remote_timeout,attr,optional"` + Headers map[string]string `river:"headers,attr,optional"` + HTTPClientConfig *config.HTTPClientConfig `river:"http_client_config,block,optional"` +} + +// UnmarshalRiver implements river.Unmarshaler. +func (r *EndpointOptions) UnmarshalRiver(f func(v interface{}) error) error { + *r = DefaultEndpointOptions() + + type arguments EndpointOptions + return f((*arguments)(r)) +} + +// Component is the phlare.write component. +type Component struct { + opts component.Options + cfg Arguments +} + +// Exports are the set of fields exposed by the phlare.write component. +type Exports struct { + Receiver phlare.Appendable `river:"receiver,attr"` +} + +// NewComponent creates a new phlare.write component. +func NewComponent(o component.Options, c Arguments) (*Component, error) { + receiver, err := NewFanOut(o, c) + if err != nil { + return nil, err + } + // Immediately export the receiver + o.OnStateChange(Exports{Receiver: receiver}) + + return &Component{ + cfg: c, + opts: o, + }, nil +} + +var _ component.Component = (*Component)(nil) + +// Run implements Component. +func (c *Component) Run(ctx context.Context) error { + <-ctx.Done() + return ctx.Err() +} + +// Update implements Component. +func (c *Component) Update(newConfig component.Arguments) error { + c.cfg = newConfig.(Arguments) + level.Debug(c.opts.Logger).Log("msg", "updating phlare.write config", "old", c.cfg, "new", newConfig) + receiver, err := NewFanOut(c.opts, newConfig.(Arguments)) + if err != nil { + return err + } + c.opts.OnStateChange(Exports{Receiver: receiver}) + return nil +} + +type fanOutClient struct { + // The list of push clients to fan out to. + clients []pushv1connect.PusherServiceClient + + config Arguments + opts component.Options +} + +// NewFanOut creates a new fan out client that will fan out to all endpoints. +func NewFanOut(opts component.Options, config Arguments) (*fanOutClient, error) { + clients := make([]pushv1connect.PusherServiceClient, 0, len(config.Endpoints)) + for _, endpoint := range config.Endpoints { + httpClient, err := commonconfig.NewClientFromConfig(*endpoint.HTTPClientConfig.Convert(), endpoint.Name) + if err != nil { + return nil, err + } + clients = append(clients, pushv1connect.NewPusherServiceClient(httpClient, endpoint.URL, WithUserAgent(userAgent))) + } + return &fanOutClient{ + clients: clients, + config: config, + opts: opts, + }, nil +} + +// Push implements the PusherServiceClient interface. +func (f *fanOutClient) Push(ctx context.Context, req *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) { + // Don't flow the context down to the `run.Group`. + // We want to fan out to all even in case of failures to one. + var ( + g run.Group + errs error + ) + + for i, client := range f.clients { + client := client + i := i + g.Add(func() error { + ctx, cancel := context.WithTimeout(ctx, f.config.Endpoints[i].RemoteTimeout) + defer cancel() + + req := connect.NewRequest(req.Msg) + for k, v := range f.config.Endpoints[i].Headers { + req.Header().Set(k, v) + } + _, err := client.Push(ctx, req) + if err != nil { + f.opts.Logger.Log("msg", "failed to push to endpoint", "endpoint", f.config.Endpoints[i].Name, "err", err) + errs = multierr.Append(errs, err) + } + return err + }, func(err error) {}) + } + if err := g.Run(); err != nil { + return nil, err + } + if errs != nil { + return nil, errs + } + return connect.NewResponse(&pushv1.PushResponse{}), nil +} + +// Append implements the phlare.Appendable interface. +func (f *fanOutClient) Appender() phlare.Appender { + return f +} + +// Append implements the Appender interface. +func (f *fanOutClient) Append(ctx context.Context, lbs labels.Labels, samples []*phlare.RawSample) error { + // todo(ctovena): we should probably pool the label pair arrays and label builder to avoid allocs. + var ( + protoLabels = make([]*typesv1.LabelPair, 0, len(lbs)+len(f.config.ExternalLabels)) + protoSamples = make([]*pushv1.RawSample, 0, len(samples)) + lbsBuilder = labels.NewBuilder(nil) + ) + + for _, label := range lbs { + // only __name__ is required as a private label. + if strings.HasPrefix(label.Name, model.ReservedLabelPrefix) && label.Name != labels.MetricName { + continue + } + lbsBuilder.Set(label.Name, label.Value) + } + for name, value := range f.config.ExternalLabels { + lbsBuilder.Set(name, value) + } + for _, l := range lbsBuilder.Labels(lbs) { + protoLabels = append(protoLabels, &typesv1.LabelPair{ + Name: l.Name, + Value: l.Value, + }) + } + for _, sample := range samples { + protoSamples = append(protoSamples, &pushv1.RawSample{ + RawProfile: sample.RawProfile, + }) + } + // push to all clients + _, err := f.Push(ctx, connect.NewRequest(&pushv1.PushRequest{ + Series: []*pushv1.RawProfileSeries{ + {Labels: protoLabels, Samples: protoSamples}, + }, + })) + return err +} + +// WithUserAgent returns a `connect.ClientOption` that sets the User-Agent header on. +func WithUserAgent(agent string) connect.ClientOption { + return connect.WithInterceptors(&agentInterceptor{agent}) +} + +type agentInterceptor struct { + agent string +} + +func (i *agentInterceptor) WrapUnary(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + req.Header().Set("User-Agent", i.agent) + return next(ctx, req) + } +} + +func (i *agentInterceptor) WrapStreamingClient(next connect.StreamingClientFunc) connect.StreamingClientFunc { + return func(ctx context.Context, spec connect.Spec) connect.StreamingClientConn { + conn := next(ctx, spec) + conn.RequestHeader().Set("User-Agent", i.agent) + return conn + } +} + +func (i *agentInterceptor) WrapStreamingHandler(next connect.StreamingHandlerFunc) connect.StreamingHandlerFunc { + return next +} diff --git a/component/phlare/write/write_test.go b/component/phlare/write/write_test.go new file mode 100644 index 000000000000..5ac84706fe01 --- /dev/null +++ b/component/phlare/write/write_test.go @@ -0,0 +1,207 @@ +package write + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/bufbuild/connect-go" + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/phlare" + "github.com/grafana/agent/pkg/river" + "github.com/grafana/agent/pkg/util" + pushv1 "github.com/grafana/phlare/api/gen/proto/go/push/v1" + pushv1connect "github.com/grafana/phlare/api/gen/proto/go/push/v1/pushv1connect" + typesv1 "github.com/grafana/phlare/api/gen/proto/go/types/v1" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +type PushFunc func(context.Context, *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) + +func (p PushFunc) Push(ctx context.Context, r *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) { + return p(ctx, r) +} + +func Test_Write_FanOut(t *testing.T) { + var ( + export Exports + argument = DefaultArguments() + pushTotal = atomic.NewInt32(0) + serverCount = int32(10) + servers []*httptest.Server = make([]*httptest.Server, serverCount) + ) + argument.ExternalLabels = map[string]string{"foo": "buzz"} + handlerFn := func(err error) http.Handler { + _, handler := pushv1connect.NewPusherServiceHandler(PushFunc( + func(_ context.Context, req *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) { + pushTotal.Inc() + require.Equal(t, "test", req.Header()["X-Test-Header"][0]) + require.Contains(t, req.Header()["User-Agent"][0], "GrafanaAgent/") + require.Equal(t, []*typesv1.LabelPair{ + {Name: "__name__", Value: "test"}, + {Name: "foo", Value: "buzz"}, + {Name: "job", Value: "foo"}, + }, req.Msg.Series[0].Labels) + require.Equal(t, []byte("pprofraw"), req.Msg.Series[0].Samples[0].RawProfile) + return &connect.Response[pushv1.PushResponse]{}, err + }, + )) + return handler + } + + for i := int32(0); i < serverCount; i++ { + if i == 0 { + servers[i] = httptest.NewServer(handlerFn(errors.New("test"))) + } else { + servers[i] = httptest.NewServer(handlerFn(nil)) + } + argument.Endpoints = append(argument.Endpoints, &EndpointOptions{ + URL: servers[i].URL, + RemoteTimeout: DefaultEndpointOptions().RemoteTimeout, + Headers: map[string]string{ + "X-Test-Header": "test", + }, + }) + } + defer func() { + for _, s := range servers { + s.Close() + } + }() + createReceiver := func(t *testing.T, arg Arguments) phlare.Appendable { + var wg sync.WaitGroup + wg.Add(1) + c, err := NewComponent(component.Options{ + ID: "1", + Logger: util.TestLogger(t), + OnStateChange: func(e component.Exports) { + defer wg.Done() + export = e.(Exports) + }, + Registerer: prometheus.NewRegistry(), + }, arg) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go c.Run(ctx) + wg.Wait() // wait for the state change to happen + require.NotNil(t, export.Receiver) + return export.Receiver + } + + t.Run("with_failure", func(t *testing.T) { + r := createReceiver(t, argument) + pushTotal.Store(0) + err := r.Appender().Append(context.Background(), labels.FromMap(map[string]string{ + "__name__": "test", + "__type__": "type", + "job": "foo", + "foo": "bar", + }), []*phlare.RawSample{ + {RawProfile: []byte("pprofraw")}, + }) + require.EqualErrorf(t, err, "unknown: test", "expected error to be test") + require.Equal(t, serverCount, pushTotal.Load()) + }) + + t.Run("all_success", func(t *testing.T) { + argument.Endpoints = argument.Endpoints[1:] + r := createReceiver(t, argument) + pushTotal.Store(0) + err := r.Appender().Append(context.Background(), labels.FromMap(map[string]string{ + "__name__": "test", + "__type__": "type", + "job": "foo", + "foo": "bar", + }), []*phlare.RawSample{ + {RawProfile: []byte("pprofraw")}, + }) + require.NoError(t, err) + require.Equal(t, serverCount-1, pushTotal.Load()) + }) +} + +func Test_Write_Update(t *testing.T) { + var ( + export Exports + argument = DefaultArguments() + pushTotal = atomic.NewInt32(0) + ) + var wg sync.WaitGroup + wg.Add(1) + c, err := NewComponent(component.Options{ + ID: "1", + Logger: util.TestLogger(t), + OnStateChange: func(e component.Exports) { + defer wg.Done() + export = e.(Exports) + }, + Registerer: prometheus.NewRegistry(), + }, argument) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go c.Run(ctx) + wg.Wait() // wait for the state change to happen + require.NotNil(t, export.Receiver) + // First one is a noop + err = export.Receiver.Appender().Append(context.Background(), labels.FromMap(map[string]string{ + "__name__": "test", + }), []*phlare.RawSample{ + {RawProfile: []byte("pprofraw")}, + }) + require.NoError(t, err) + + _, handler := pushv1connect.NewPusherServiceHandler(PushFunc( + func(_ context.Context, req *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) { + pushTotal.Inc() + return &connect.Response[pushv1.PushResponse]{}, err + }, + )) + server := httptest.NewServer(handler) + defer server.Close() + argument.Endpoints = []*EndpointOptions{ + { + URL: server.URL, + RemoteTimeout: DefaultEndpointOptions().RemoteTimeout, + }, + } + wg.Add(1) + require.NoError(t, c.Update(argument)) + wg.Wait() + err = export.Receiver.Appender().Append(context.Background(), labels.FromMap(map[string]string{ + "__name__": "test", + }), []*phlare.RawSample{ + {RawProfile: []byte("pprofraw")}, + }) + require.NoError(t, err) + require.Equal(t, int32(1), pushTotal.Load()) +} + +func Test_Unmarshal_Config(t *testing.T) { + var arg Arguments + river.Unmarshal([]byte(` + endpoint { + url = "http://localhost:4100" + remote_timeout = "10s" + } + endpoint { + url = "http://localhost:4200" + remote_timeout = "5s" + } + external_labels = { + "foo" = "bar", + }`), &arg) + require.Equal(t, "http://localhost:4100", arg.Endpoints[0].URL) + require.Equal(t, "http://localhost:4200", arg.Endpoints[1].URL) + require.Equal(t, time.Second*10, arg.Endpoints[0].RemoteTimeout) + require.Equal(t, time.Second*5, arg.Endpoints[1].RemoteTimeout) + require.Equal(t, "bar", arg.ExternalLabels["foo"]) +} diff --git a/docs/sources/flow/reference/components/phlare.scrape.md b/docs/sources/flow/reference/components/phlare.scrape.md new file mode 100644 index 000000000000..7dea4cda7528 --- /dev/null +++ b/docs/sources/flow/reference/components/phlare.scrape.md @@ -0,0 +1,341 @@ +--- +aliases: +- /docs/agent/latest/flow/reference/components/phlare.scrape +title: phlare.scrape +--- + +# phlare.scrape + +`phlare.scrape` configures a [pprof] scraping job for a given set of +`targets`. The scraped performance profiles are forwarded to the list of receivers passed in +`forward_to`. + +Multiple `phlare.scrape` components can be specified by giving them different labels. + +## Usage + +``` +phlare.scrape "LABEL" { + targets = TARGET_LIST + forward_to = RECEIVER_LIST +} +``` + +## Arguments + +The component configures and starts a new scrape job to scrape all of the +input targets. Multiple scrape jobs can be spawned for a single input target +when scraping multiple profile types. + +The list of arguments that can be used to configure the block is +presented below. + +The scrape job name defaults to the component's unique identifier. + +Any omitted fields take on their default values. If conflicting +attributes are being passed (e.g., defining both a BearerToken and +BearerTokenFile or configuring both Basic Authorization and OAuth2 at the same +time), the component reports an error. + +The following arguments are supported: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(ProfilesReceiver)` | List of receivers to send scraped profiles to. | | yes +`job_name` | `string` | The job name to override the job label with. | component name | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape config. | `"15s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this config. | `"15s"` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no + +## Blocks + +The following blocks are supported inside the definition of `phlare.scrape`: + +Hierarchy | Block | Description | Required +--------- | ----- | ----------- | -------- +http_client_config | [http_client_config][] | HTTP client settings when connecting to targets. | no +http_client_config > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to targets. | no +http_client_config > authorization | [authorization][] | Configure generic authorization to targets. | no +http_client_config > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to targets. | no +http_client_config > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to targets via OAuth2. | no +http_client_config > tls_config | [tls_config][] | Configure TLS settings for connecting to targets. | no +profiling_config | [profiling_config][] | Configure profiling settings for the scrape job. | no +profiling_config > profile.memory | [profile.memory][] | Collect memory profiles. | no +profiling_config > profile.block | [profile.block][] | Collect profiles on blocks. | no +profiling_config > profile.goroutine | [profile.goroutine][] | Collect goroutine profiles. | no +profiling_config > profile.mutex | [profile.mutex][] | Collect mutex profiles. | no +profiling_config > profile.process_cpu | [profile.process_cpu][] | Collect CPU profiles. | no +profiling_config > profile.fgprof | [profile.fgprof][] | Collect [fgprof][] profiles. | no +profiling_config > profile.custom | [profile.custom][] | Collect custom profiles. | no + +The `>` symbol indicates deeper levels of nesting. For example, +`http_client_config > basic_auth` refers to a `basic_auth` block defined inside +an `http_client_config` block. + +[http_client_config]: #http_client_config-block +[basic_auth]: #basic_auth-block +[authorization]: #authorization-block +[oauth2]: #oauth2-block +[tls_config]: #tls_config-block +[profiling_config]: #profiling_config-block +[profile.memory]: #profile.memory-block +[profile.block]: #profile.block-block +[profile.goroutine]: #profile.goroutine-block +[profile.mutex]: #profile.mutex-block +[profile.process_cpu]: #profile.process_cpu-block +[profile.fgprof]: #profile.fgprof-block +[profile.custom]: #profile.custom-block +[pprof]: https://github.com/google/pprof/blob/main/doc/README.md + +[fgprof]: https://github.com/felixge/fgprof + +### http_client_config block + +The `http_client_config` block configures settings used to connect to +endpoints. + +{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" >}} + +### basic_auth block + +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" >}} + +### authorization block + +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" >}} + +### oauth2 block + +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" >}} + +### tls_config block + +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" >}} + +### profiling_config block + +The `profiling_config` block configures the profiling settings when scraping +targets. + +The block contains the following attributes: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`path_prefix` | `string` | The path prefix to use when scraping targets. | | no + +### profile.memory block + +The `profile.memory` block collects profiles on memory consumption. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/pprof/memory"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.block block + +The `profile.block` block collects profiles on process blocking. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/pprof/block"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.goroutine block + +The `profile.goroutine` block collects profiles on the number of goroutines. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/pprof/goroutine"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.mutex block + +The `profile.mutex` block collects profiles on mutexes. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/pprof/mutex"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.process_cpu block + +The `profile.process_cpu` block collects profiles on CPU consumption for the +process. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/pprof/profile"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `true` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.fgprof block + +The `profile.fgprof` block collects profiles from an [fgprof][] endpoint. + +It accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | `false` | no +`path` | `string` | The path to the profile type on the target. | `"/debug/fgprof"` | no +`delta` | `boolean` | Whether to scrape the profile as a delta. | `true` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +### profile.custom block + +The `profile.custom` block allows for collecting profiles from custom +endpoints. Blocks must be specified with a label: + +```river +profile.custom "PROFILE_TYPE" { + enabled = true + path = "PROFILE_PATH" +} +``` + +Multiple `profile.custom` blocks can be specified. Labels assigned to +`profile.custom` blocks must be unique across the component. + +The `profile.custom` block accepts the following arguments: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Enable this profile type to be scraped. | | yes +`path` | `string` | The path to the profile type on the target. | | yes +`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no + +When the `delta` argument is `true`, a `seconds` query parameter is +automatically added to requests. + +## Exported fields + +`phlare.scrape` does not export any fields that can be referenced by other +components. + +## Component health + +`phlare.scrape` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`phlare.scrape` reports the status of the last scrape for each configured +scrape job on the component's debug endpoint. + +## Debug metrics + +* `phlare_fanout_latency` (histogram): Write latency for sending to direct and indirect components. + +## Scraping behavior + +The `phlare.scrape` component borrows the scraping behavior of Prometheus. +Prometheus, and by extension, this component, uses a pull model for scraping +profiles from a given set of _targets_. +Each scrape target is defined as a set of key-value pairs called _labels_. + +The set of targets can either be _static_, or dynamically provided periodically +by a service discovery component such as `discovery.kubernetes`. The special +label `__address__` _must always_ be present and corresponds to the +`:` that is used for the scrape request. + +By default, the scrape job tries to scrape all available targets' `/debug/pprof` +endpoints using HTTP, with a scrape interval of 15 seconds and scrape timeout of +15 seconds. The profile paths, protocol scheme, scrape interval and timeout, +query parameters, as well as any other settings can be configured using the +component's arguments. + +The scrape job expects profiles exposed by the endpoint to follow the +[pprof] protobuf format. All profiles are then propagated +to each receiver listed in the component's `forward_to` argument. + +Labels coming from targets, that start with a double underscore `__` are +treated as _internal_, and are removed prior to scraping. + +The `phlare.scrape` component regards a scrape as successful if it +responded with an HTTP `200 OK` status code and returned a body of valid [pprof] profile. + +If the scrape request fails, the component's debug UI section contains more +detailed information about the failure, the last successful scrape, as well as +the labels last used for scraping. + +The following labels are automatically injected to the scraped profiles and +can help pin down a scrape target. + +Label | Description +--------------------- | ---------- +job | The configured job name that the target belongs to. Defaults to the fully formed component name. +instance | The `__address__` or `:` of the scrape target's URL. + +## Example + +The following example sets up the scrape job with certain attributes (profiling config, targets) and lets it scrape two local applications (the Agent itself and Phlare). +The exposed profiles are sent over to the provided list of receivers, as defined by other components. + +```river +phlare.scrape "local" { + targets = [ + {"__address__" = "localhost:4100", "app"="phlare"}, + {"__address__" = "localhost:12345", "app"="agent"}, + ] + forward_to = [phlare.write.local.receiver] + profiling_config { + profile.fgprof { + enabled = true + } + profile.block { + enabled = false + } + profile.mutex { + enabled = false + } + } +} +``` + +Here are the the endpoints that are being scraped every 15 seconds: + +``` +http://localhost:4100/debug/pprof/allocs +http://localhost:4100/debug/pprof/goroutine +http://localhost:4100/debug/pprof/profile?seconds=14 +http://localhost:4100/debug/fgprof?seconds=14 +http://localhost:12345/debug/pprof/allocs +http://localhost:12345/debug/pprof/goroutine +http://localhost:12345/debug/pprof/profile?seconds=14 +http://localhost:12345/debug/fgprof?seconds=14 +``` diff --git a/docs/sources/flow/reference/components/phlare.write.md b/docs/sources/flow/reference/components/phlare.write.md new file mode 100644 index 000000000000..97b533e6d350 --- /dev/null +++ b/docs/sources/flow/reference/components/phlare.write.md @@ -0,0 +1,139 @@ +--- +aliases: +- /docs/agent/latest/flow/reference/components/phlare.write +title: phlare.write +--- + +# phlare.write + +`phlare.write` receives performance profiles from other components and forwards them +to a series of user-supplied endpoints using [Phlare' Push API](https://grafana.com/oss/phlare/). + +Multiple `phlare.write` components can be specified by giving them +different labels. + +## Usage + +```river +phlare.write "LABEL" { + endpoint { + url = PHLARE_URL + + ... + } + + ... +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`external_labels` | `map(string)` | Labels to add to profiles sent over the network. | | no + +## Blocks + +The following blocks are supported inside the definition of +`phlare.write`: + +Hierarchy | Block | Description | Required +--------- | ----- | ----------- | -------- +endpoint | [endpoint][] | Location to send profiles to. | no +endpoint > http_client_config | [http_client_config][] | HTTP client settings when connecting to the endpoint. | no +endpoint > http_client_config > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +endpoint > http_client_config > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +endpoint > http_client_config > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +endpoint > http_client_config > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +endpoint > http_client_config > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no + +The `>` symbol indicates deeper levels of nesting. For example, `endpoint > +http_client_config` refers to an `http_client_config` block defined inside an +`endpoint` block. + +### endpoint block + +The `endpoint` block describes a single location to send profiles to. Multiple +`endpoint` blocks can be provided to send profiles to multiple locations. + +The following arguments are supported: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`url` | `string` | Full URL to send metrics to. | | yes +`name` | `string` | Optional name to identify the endpoint in metrics. | | no +`remote_timeout` | `duration` | Timeout for requests made to the URL. | `"30s"` | no +`headers` | `map(string)` | Extra headers to deliver with the request. | | no + +When multiple `endpoint` blocks are provided, profiles are concurrently forwarded to all +configured locations. + +### http_client_config block + +The `http_client_config` configures settings used to connect to the +remote_write server. + +{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" >}} + +### basic_auth block + +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" >}} + +### authorization block + +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" >}} + +### oauth2 block + +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" >}} + +### tls_config block + +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" >}} + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +---- | ---- | ----------- +`receiver` | `receiver` | A value that other components can use to send profiles to. + +## Component health + +`phlare.write` is only reported as unhealthy if given an invalid +configuration. In those cases, exported fields are kept at their last healthy +values. + +## Debug information + +`phlare.write` does not expose any component-specific debug +information. + +## Example + +```river +phlare.write "staging" { + // Send metrics to a locally running Phlare instance. + endpoint { + url = "http://phlare:4100" + headers = { + "X-Scope-Org-ID" = "squad-1", + } + } + external_labels = { + "env" = "staging", + } +} + + +phlare.scrape "default" { + targets = [ + {"__address__" = "phlare:4100", "app"="phlare"}, + {"__address__" = "agent:12345", "app"="agent"}, + ] + forward_to = [phlare.write.staging.receiver] +} +``` diff --git a/go.mod b/go.mod index f4edb55e8ab4..8e84c46a43c3 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( github.com/prometheus-operator/prometheus-operator v0.61.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.61.1 github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/common v0.37.0 + github.com/prometheus/common v0.39.0 github.com/prometheus/consul_exporter v0.8.0 github.com/prometheus/memcached_exporter v0.10.0 github.com/prometheus/mysqld_exporter v0.14.0 @@ -89,15 +89,15 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d github.com/wk8/go-ordered-map v0.2.0 - go.opencensus.io v0.23.0 + go.opencensus.io v0.24.0 go.opentelemetry.io/collector v0.63.1 go.opentelemetry.io/otel/metric v0.33.0 go.opentelemetry.io/otel/trace v1.11.1 go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.23.0 - golang.org/x/net v0.2.0 - golang.org/x/sys v0.2.0 + golang.org/x/net v0.4.0 + golang.org/x/sys v0.3.0 golang.org/x/time v0.2.0 google.golang.org/grpc v1.51.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -121,9 +121,11 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.17.8 github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 github.com/bmatcuk/doublestar v1.2.2 + github.com/bufbuild/connect-go v1.4.1 github.com/fatih/color v1.13.0 github.com/google/go-cmp v0.5.9 github.com/google/renameio/v2 v2.0.0 + github.com/grafana/phlare/api v0.1.2 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 github.com/grafana/vmware_exporter v0.0.4-beta github.com/hashicorp/golang-lru v0.5.4 @@ -152,17 +154,18 @@ require ( go.opentelemetry.io/otel/sdk v1.11.1 go.opentelemetry.io/otel/sdk/metric v0.33.0 go.opentelemetry.io/proto/otlp v0.19.0 + go.uber.org/goleak v1.2.0 golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 - golang.org/x/text v0.4.0 + golang.org/x/text v0.5.0 google.golang.org/protobuf v1.28.1 ) require ( cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute v1.13.0 // indirect cloud.google.com/go/compute/metadata v0.2.1 // indirect - cloud.google.com/go/iam v0.7.0 // indirect - cloud.google.com/go/pubsub v1.25.1 // indirect + cloud.google.com/go/iam v0.8.0 // indirect + cloud.google.com/go/pubsub v1.27.1 // indirect cloud.google.com/go/storage v1.27.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect @@ -228,7 +231,7 @@ require ( github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cilium/ebpf v0.7.0 // indirect github.com/cloudflare/cloudflare-go v0.27.0 // indirect @@ -313,7 +316,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/wire v0.5.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gophercloud/gophercloud v1.0.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -322,7 +325,7 @@ require ( github.com/gosnmp/gosnmp v1.34.0 // indirect github.com/grafana/go-gelf/v2 v2.0.1 // indirect github.com/grobie/gomemcache v0.0.0-20201204163352-08d7c80fcac6 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect @@ -511,21 +514,20 @@ require ( go.mongodb.org/mongo-driver v1.10.2 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect - go.uber.org/goleak v1.2.0 // indirect go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect gocloud.dev v0.24.0 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/mod v0.6.0 // indirect - golang.org/x/oauth2 v0.2.0 // indirect + golang.org/x/oauth2 v0.3.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/term v0.2.0 // indirect + golang.org/x/term v0.3.0 // indirect golang.org/x/tools v0.2.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.102.0 // indirect + google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 23b856a7bd74..419945e59342 100644 --- a/go.sum +++ b/go.sum @@ -101,8 +101,9 @@ cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLq cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -165,8 +166,8 @@ cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2r cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= @@ -219,8 +220,8 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.16.0/go.mod h1:6A8EfoWZ/lUvCWStKGwAWauJZSiuV0Mkmu6WilK/TxQ= -cloud.google.com/go/pubsub v1.25.1 h1:l0wCNZKuEp2Q54wAy8283EV9O57+7biWOXnnU2/Tq/A= -cloud.google.com/go/pubsub v1.25.1/go.mod h1:bY6l7rF8kCcwz6V3RaQ6kK4p5g7qc7PqjRoE9wDOqOU= +cloud.google.com/go/pubsub v1.27.1 h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -693,6 +694,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bufbuild/connect-go v1.4.1 h1:6usL3JGjKhxQpvDlizP7u8VfjAr1JkckcAUbrdcbgNY= +github.com/bufbuild/connect-go v1.4.1/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= @@ -729,8 +732,9 @@ github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= @@ -1562,8 +1566,9 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -1629,6 +1634,8 @@ github.com/grafana/opentelemetry-collector/pdata v0.0.0-20221101113646-2be4b341c github.com/grafana/opentelemetry-collector/pdata v0.0.0-20221101113646-2be4b341c150/go.mod h1:IzvXUGQml2mrnvdb8zIlEW3qQs9oFLdD2hLwJdZ+pek= github.com/grafana/perflib_exporter v0.1.1-0.20211013152516-e37e14fb8b0a h1:H+syQKTZuBnoVp9qOuHoktNeuCDrgySAbMT9jyTp564= github.com/grafana/perflib_exporter v0.1.1-0.20211013152516-e37e14fb8b0a/go.mod h1:MinSWm88jguXFFrGsP56PtleUb4Qtm4tNRH/wXNXRTI= +github.com/grafana/phlare/api v0.1.2 h1:1jrwd3KnsXMzj/tJih9likx5EvbY3pbvLbDqAAYem30= +github.com/grafana/phlare/api v0.1.2/go.mod h1:29vcLwFDmZBDce2jwFIMtzvof7fzPadT8VMKw9ks7FU= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= github.com/grafana/process-exporter v0.7.3-0.20210106202358-831154072e2a h1:JUnP/laSl2GylHT0+fqAqOZY+7XkLh1mLefLN0n8Mmk= @@ -1671,8 +1678,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= @@ -2568,8 +2575,9 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= @@ -3026,8 +3034,9 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector/semconv v0.63.1 h1:o9Zz/vwqT85XXYf9XTIXa0qkmfEY8b/JMm4lXf+dwpc= go.opentelemetry.io/collector/semconv v0.63.1/go.mod h1:5o9yhOa+ABt7g2E5JABDxGZ1PQPbtfxrKNbYn+LOTXU= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= @@ -3340,8 +3349,8 @@ golang.org/x/net v0.0.0-20220921155015-db77216a4ee9/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -3375,8 +3384,8 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU= -golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3579,14 +3588,14 @@ golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3599,8 +3608,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3788,8 +3798,9 @@ google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3932,8 +3943,8 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=