From 1e533bec7bcfdac414d34e2d83910ff0aa21fb52 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 14:41:02 +0200 Subject: [PATCH 1/6] Reduce the number of Kafka clients used by producers Signed-off-by: Marco Pracucci --- pkg/storage/ingest/config.go | 6 ++++ pkg/storage/ingest/config_test.go | 9 ++++++ pkg/storage/ingest/writer.go | 51 ++++++++++++++----------------- pkg/storage/ingest/writer_test.go | 44 +++++++++++++++++++++++++- 4 files changed, 81 insertions(+), 29 deletions(-) diff --git a/pkg/storage/ingest/config.go b/pkg/storage/ingest/config.go index 9f321c30911..4de4e2dd73e 100644 --- a/pkg/storage/ingest/config.go +++ b/pkg/storage/ingest/config.go @@ -22,6 +22,7 @@ const ( var ( ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured") ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured") + ErrInvalidWriteClients = errors.New("the configured number of write clients is invalid (must be greater than 0)") ErrInvalidConsumePosition = errors.New("the configured consume position is invalid") consumeFromPositionOptions = []string{consumeFromLastOffset, consumeFromStart, consumeFromEnd, consumeFromTimestamp} @@ -61,6 +62,7 @@ type KafkaConfig struct { ClientID string `yaml:"client_id"` DialTimeout time.Duration `yaml:"dial_timeout"` WriteTimeout time.Duration `yaml:"write_timeout"` + WriteClients int `yaml:"write_clients"` ConsumerGroup string `yaml:"consumer_group"` @@ -85,6 +87,7 @@ func (cfg *KafkaConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) f.StringVar(&cfg.ClientID, prefix+".client-id", "", "The Kafka client ID.") f.DurationVar(&cfg.DialTimeout, prefix+".dial-timeout", 2*time.Second, "The maximum time allowed to open a connection to a Kafka broker.") f.DurationVar(&cfg.WriteTimeout, prefix+".write-timeout", 10*time.Second, "How long to wait for an incoming write request to be successfully committed to the Kafka backend.") + f.IntVar(&cfg.WriteClients, prefix+".write-clients", 1, "The number of Kafka clients used by producers. When the configured number of clients is greater than 1, partitions are sharded among Kafka clients. An higher number of clients may provide higher write throughput at the cost of additional Metadata requests pressure to Kafka.") f.StringVar(&cfg.ConsumerGroup, prefix+".consumer-group", "", "The consumer group used by the consumer to track the last consumed offset. The consumer group must be different for each ingester. If the configured consumer group contains the '' placeholder, it will be replaced with the actual partition ID owned by the ingester. When empty (recommended), Mimir will use the ingester instance ID to guarantee uniqueness.") @@ -105,6 +108,9 @@ func (cfg *KafkaConfig) Validate() error { if cfg.Topic == "" { return ErrMissingKafkaTopic } + if cfg.WriteClients < 1 { + return ErrInvalidWriteClients + } if !slices.Contains(consumeFromPositionOptions, cfg.ConsumeFromPositionAtStartup) { return ErrInvalidConsumePosition } diff --git a/pkg/storage/ingest/config_test.go b/pkg/storage/ingest/config_test.go index ef5ff0ba876..544f28b2f11 100644 --- a/pkg/storage/ingest/config_test.go +++ b/pkg/storage/ingest/config_test.go @@ -68,6 +68,15 @@ func TestConfig_Validate(t *testing.T) { }, expectedErr: ErrInvalidConsumePosition, }, + "should fail if ingest storage is enabled and the configured number of Kafka write clients is 0": { + setup: func(cfg *Config) { + cfg.Enabled = true + cfg.KafkaConfig.Address = "localhost" + cfg.KafkaConfig.Topic = "test" + cfg.KafkaConfig.WriteClients = 0 + }, + expectedErr: ErrInvalidWriteClients, + }, } for testName, testData := range tests { diff --git a/pkg/storage/ingest/writer.go b/pkg/storage/ingest/writer.go index a607809b00f..4a546679e2d 100644 --- a/pkg/storage/ingest/writer.go +++ b/pkg/storage/ingest/writer.go @@ -36,9 +36,10 @@ type Writer struct { logger log.Logger registerer prometheus.Registerer - // We create 1 writer per partition to better parallelize the workload. + // We support multiple Kafka clients to better parallelize the workload. The number of + // clients is fixed during the Writer lifecycle, but they're initialised lazily. writersMx sync.RWMutex - writers map[int32]*kgo.Client + writers []*kgo.Client // Metrics. writeLatency prometheus.Histogram @@ -53,7 +54,7 @@ func NewWriter(kafkaCfg KafkaConfig, logger log.Logger, reg prometheus.Registere kafkaCfg: kafkaCfg, logger: logger, registerer: reg, - writers: map[int32]*kgo.Client{}, + writers: make([]*kgo.Client, kafkaCfg.WriteClients), maxInflightProduceRequests: 20, // Metrics. @@ -84,12 +85,8 @@ func (w *Writer) starting(_ context.Context) error { } func (w *Writer) stopping(_ error) error { - w.writersMx.Lock() - defer w.writersMx.Unlock() - - for partitionID, client := range w.writers { + for _, client := range w.writers { client.Close() - delete(w.writers, partitionID) } return nil @@ -112,8 +109,9 @@ func (w *Writer) WriteSync(ctx context.Context, partitionID int32, userID string // Prepare the record to write. record := &kgo.Record{ - Key: []byte(userID), // We don't partition based on the key, so the value here doesn't make any difference. - Value: data, + Key: []byte(userID), // We don't partition based on the key, so the value here doesn't make any difference. + Value: data, + Partition: partitionID, } // Write to backend. @@ -157,7 +155,8 @@ func (w *Writer) produceSync(ctx context.Context, client *kgo.Client, record *kg func (w *Writer) getKafkaWriterForPartition(partitionID int32) (*kgo.Client, error) { // Check if the writer has already been created. w.writersMx.RLock() - writer := w.writers[partitionID] + clientID := int(partitionID) % len(w.writers) + writer := w.writers[clientID] w.writersMx.RUnlock() if writer != nil { @@ -168,25 +167,25 @@ func (w *Writer) getKafkaWriterForPartition(partitionID int32) (*kgo.Client, err defer w.writersMx.Unlock() // Ensure a new writer wasn't created in the meanwhile. If so, use it. - writer = w.writers[partitionID] + writer = w.writers[clientID] if writer != nil { return writer, nil } - newWriter, err := w.newKafkaWriter(partitionID) + newWriter, err := w.newKafkaWriter(clientID) if err != nil { return nil, err } - w.writers[partitionID] = newWriter + w.writers[clientID] = newWriter return newWriter, nil } -// newKafkaWriter creates a new Kafka client used to write to a specific partition. -func (w *Writer) newKafkaWriter(partitionID int32) (*kgo.Client, error) { - logger := log.With(w.logger, "partition", partitionID) +// newKafkaWriter creates a new Kafka client. +func (w *Writer) newKafkaWriter(clientID int) (*kgo.Client, error) { + logger := log.With(w.logger, "client_id", clientID) // Do not export the client ID, because we use it to specify options to the backend. metrics := kprom.NewMetrics("cortex_ingest_storage_writer", - kprom.Registerer(prometheus.WrapRegistererWith(prometheus.Labels{"partition": strconv.Itoa(int(partitionID))}, w.registerer)), + kprom.Registerer(prometheus.WrapRegistererWith(prometheus.Labels{"client_id": strconv.Itoa(clientID)}, w.registerer)), kprom.FetchAndProduceDetail(kprom.Batches, kprom.Records, kprom.CompressedBytes, kprom.UncompressedBytes)) opts := append( @@ -195,7 +194,7 @@ func (w *Writer) newKafkaWriter(partitionID int32) (*kgo.Client, error) { kgo.DefaultProduceTopic(w.kafkaCfg.Topic), // Use a static partitioner because we want to be in control of the partition. - kgo.RecordPartitioner(newKafkaStaticPartitioner(int(partitionID))), + kgo.RecordPartitioner(newKafkaStaticPartitioner()), // Set the upper bounds the size of a record batch. kgo.ProducerBatchMaxBytes(16_000_000), @@ -234,14 +233,10 @@ func (w *Writer) newKafkaWriter(partitionID int32) (*kgo.Client, error) { return kgo.NewClient(opts...) } -type kafkaStaticPartitioner struct { - partitionID int -} +type kafkaStaticPartitioner struct{} -func newKafkaStaticPartitioner(partitionID int) *kafkaStaticPartitioner { - return &kafkaStaticPartitioner{ - partitionID: partitionID, - } +func newKafkaStaticPartitioner() *kafkaStaticPartitioner { + return &kafkaStaticPartitioner{} } // ForTopic implements kgo.Partitioner. @@ -257,6 +252,6 @@ func (p *kafkaStaticPartitioner) RequiresConsistency(_ *kgo.Record) bool { } // Partition implements kgo.TopicPartitioner. -func (p *kafkaStaticPartitioner) Partition(_ *kgo.Record, _ int) int { - return p.partitionID +func (p *kafkaStaticPartitioner) Partition(r *kgo.Record, _ int) int { + return int(r.Partition) } diff --git a/pkg/storage/ingest/writer_test.go b/pkg/storage/ingest/writer_test.go index 521718cadf8..9d4e27df569 100644 --- a/pkg/storage/ingest/writer_test.go +++ b/pkg/storage/ingest/writer_test.go @@ -34,7 +34,7 @@ func TestMain(m *testing.M) { func TestWriter_WriteSync(t *testing.T) { const ( topicName = "test" - numPartitions = 1 + numPartitions = 2 partitionID = 0 tenantID = "user-1" ) @@ -99,6 +99,48 @@ func TestWriter_WriteSync(t *testing.T) { `, len(fetches.Records()[0].Value))), "cortex_ingest_storage_writer_sent_bytes_total")) }) + t.Run("should write to the requested partition", func(t *testing.T) { + t.Parallel() + + seriesPerPartition := map[int32][]mimirpb.PreallocTimeseries{ + 0: series1, + 1: series2, + } + + _, clusterAddr := testkafka.CreateCluster(t, numPartitions, topicName) + writer, _ := createTestWriter(t, createTestKafkaConfig(clusterAddr, topicName)) + + // Write to partitions. + for partitionID, series := range seriesPerPartition { + err := writer.WriteSync(ctx, partitionID, tenantID, &mimirpb.WriteRequest{Timeseries: series, Metadata: nil, Source: mimirpb.API}) + require.NoError(t, err) + } + + // Read back from Kafka. + for partitionID, expectedSeries := range seriesPerPartition { + consumer, err := kgo.NewClient(kgo.SeedBrokers(clusterAddr), kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{topicName: {partitionID: kgo.NewOffset().AtStart()}})) + require.NoError(t, err) + t.Cleanup(consumer.Close) + + fetchCtx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + fetches := consumer.PollFetches(fetchCtx) + require.NoError(t, fetches.Err()) + require.Len(t, fetches.Records(), 1) + assert.Equal(t, []byte(tenantID), fetches.Records()[0].Key) + + received := mimirpb.WriteRequest{} + require.NoError(t, received.Unmarshal(fetches.Records()[0].Value)) + require.Len(t, received.Timeseries, len(expectedSeries)) + + for idx, expected := range expectedSeries { + assert.Equal(t, expected.Labels, received.Timeseries[idx].Labels) + assert.Equal(t, expected.Samples, received.Timeseries[idx].Samples) + } + } + }) + t.Run("should interrupt the WriteSync() on context cancelled but other concurrent requests should not fail", func(t *testing.T) { t.Parallel() From c3647e292a1e9aeb97bfb64143889fd35a46384f Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 14:45:57 +0200 Subject: [PATCH 2/6] Fix Writer.stopping() Signed-off-by: Marco Pracucci --- pkg/storage/ingest/writer.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/storage/ingest/writer.go b/pkg/storage/ingest/writer.go index 4a546679e2d..20e1cae44cc 100644 --- a/pkg/storage/ingest/writer.go +++ b/pkg/storage/ingest/writer.go @@ -85,8 +85,12 @@ func (w *Writer) starting(_ context.Context) error { } func (w *Writer) stopping(_ error) error { - for _, client := range w.writers { + w.writersMx.Lock() + defer w.writersMx.Unlock() + + for idx, client := range w.writers { client.Close() + w.writers[idx] = nil } return nil From 16f46875d14c26d6721739780b89e31d7b4cf1b4 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 14:49:23 +0200 Subject: [PATCH 3/6] Improved TestWriter_WriteSync and fixed Writer.stopping() Signed-off-by: Marco Pracucci --- pkg/storage/ingest/writer.go | 4 ++ pkg/storage/ingest/writer_test.go | 80 +++++++++++++++++-------------- 2 files changed, 48 insertions(+), 36 deletions(-) diff --git a/pkg/storage/ingest/writer.go b/pkg/storage/ingest/writer.go index 20e1cae44cc..78d1648cbaf 100644 --- a/pkg/storage/ingest/writer.go +++ b/pkg/storage/ingest/writer.go @@ -89,6 +89,10 @@ func (w *Writer) stopping(_ error) error { defer w.writersMx.Unlock() for idx, client := range w.writers { + if client == nil { + continue + } + client.Close() w.writers[idx] = nil } diff --git a/pkg/storage/ingest/writer_test.go b/pkg/storage/ingest/writer_test.go index 9d4e27df569..dc0dfe2ed51 100644 --- a/pkg/storage/ingest/writer_test.go +++ b/pkg/storage/ingest/writer_test.go @@ -102,42 +102,50 @@ func TestWriter_WriteSync(t *testing.T) { t.Run("should write to the requested partition", func(t *testing.T) { t.Parallel() - seriesPerPartition := map[int32][]mimirpb.PreallocTimeseries{ - 0: series1, - 1: series2, - } - - _, clusterAddr := testkafka.CreateCluster(t, numPartitions, topicName) - writer, _ := createTestWriter(t, createTestKafkaConfig(clusterAddr, topicName)) - - // Write to partitions. - for partitionID, series := range seriesPerPartition { - err := writer.WriteSync(ctx, partitionID, tenantID, &mimirpb.WriteRequest{Timeseries: series, Metadata: nil, Source: mimirpb.API}) - require.NoError(t, err) - } - - // Read back from Kafka. - for partitionID, expectedSeries := range seriesPerPartition { - consumer, err := kgo.NewClient(kgo.SeedBrokers(clusterAddr), kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{topicName: {partitionID: kgo.NewOffset().AtStart()}})) - require.NoError(t, err) - t.Cleanup(consumer.Close) - - fetchCtx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - fetches := consumer.PollFetches(fetchCtx) - require.NoError(t, fetches.Err()) - require.Len(t, fetches.Records(), 1) - assert.Equal(t, []byte(tenantID), fetches.Records()[0].Key) - - received := mimirpb.WriteRequest{} - require.NoError(t, received.Unmarshal(fetches.Records()[0].Value)) - require.Len(t, received.Timeseries, len(expectedSeries)) - - for idx, expected := range expectedSeries { - assert.Equal(t, expected.Labels, received.Timeseries[idx].Labels) - assert.Equal(t, expected.Samples, received.Timeseries[idx].Samples) - } + for _, writeClients := range []int{1, 2, 10} { + t.Run(fmt.Sprintf("Write clients = %d", writeClients), func(t *testing.T) { + t.Parallel() + + seriesPerPartition := map[int32][]mimirpb.PreallocTimeseries{ + 0: series1, + 1: series2, + } + + _, clusterAddr := testkafka.CreateCluster(t, numPartitions, topicName) + config := createTestKafkaConfig(clusterAddr, topicName) + config.WriteClients = writeClients + writer, _ := createTestWriter(t, config) + + // Write to partitions. + for partitionID, series := range seriesPerPartition { + err := writer.WriteSync(ctx, partitionID, tenantID, &mimirpb.WriteRequest{Timeseries: series, Metadata: nil, Source: mimirpb.API}) + require.NoError(t, err) + } + + // Read back from Kafka. + for partitionID, expectedSeries := range seriesPerPartition { + consumer, err := kgo.NewClient(kgo.SeedBrokers(clusterAddr), kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{topicName: {partitionID: kgo.NewOffset().AtStart()}})) + require.NoError(t, err) + t.Cleanup(consumer.Close) + + fetchCtx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + fetches := consumer.PollFetches(fetchCtx) + require.NoError(t, fetches.Err()) + require.Len(t, fetches.Records(), 1) + assert.Equal(t, []byte(tenantID), fetches.Records()[0].Key) + + received := mimirpb.WriteRequest{} + require.NoError(t, received.Unmarshal(fetches.Records()[0].Value)) + require.Len(t, received.Timeseries, len(expectedSeries)) + + for idx, expected := range expectedSeries { + assert.Equal(t, expected.Labels, received.Timeseries[idx].Labels) + assert.Equal(t, expected.Samples, received.Timeseries[idx].Samples) + } + } + }) } }) From f2efb1596d68bc927d5ddcd584fc364846c5a85c Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 14:55:46 +0200 Subject: [PATCH 4/6] Fix unit test Signed-off-by: Marco Pracucci --- pkg/storage/ingest/writer_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/storage/ingest/writer_test.go b/pkg/storage/ingest/writer_test.go index dc0dfe2ed51..e305af0a6c6 100644 --- a/pkg/storage/ingest/writer_test.go +++ b/pkg/storage/ingest/writer_test.go @@ -103,6 +103,8 @@ func TestWriter_WriteSync(t *testing.T) { t.Parallel() for _, writeClients := range []int{1, 2, 10} { + writeClients := writeClients + t.Run(fmt.Sprintf("Write clients = %d", writeClients), func(t *testing.T) { t.Parallel() From 32c5ba5ab12f931e2d171d9afee4a070398739e6 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 15:03:10 +0200 Subject: [PATCH 5/6] Use kgo.ManualPartitioner Signed-off-by: Marco Pracucci --- pkg/storage/ingest/writer.go | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/pkg/storage/ingest/writer.go b/pkg/storage/ingest/writer.go index 78d1648cbaf..f3b6b292d14 100644 --- a/pkg/storage/ingest/writer.go +++ b/pkg/storage/ingest/writer.go @@ -201,8 +201,8 @@ func (w *Writer) newKafkaWriter(clientID int) (*kgo.Client, error) { kgo.RequiredAcks(kgo.AllISRAcks()), kgo.DefaultProduceTopic(w.kafkaCfg.Topic), - // Use a static partitioner because we want to be in control of the partition. - kgo.RecordPartitioner(newKafkaStaticPartitioner()), + // We set the partition field in each record. + kgo.RecordPartitioner(kgo.ManualPartitioner()), // Set the upper bounds the size of a record batch. kgo.ProducerBatchMaxBytes(16_000_000), @@ -240,26 +240,3 @@ func (w *Writer) newKafkaWriter(clientID int) (*kgo.Client, error) { ) return kgo.NewClient(opts...) } - -type kafkaStaticPartitioner struct{} - -func newKafkaStaticPartitioner() *kafkaStaticPartitioner { - return &kafkaStaticPartitioner{} -} - -// ForTopic implements kgo.Partitioner. -func (p *kafkaStaticPartitioner) ForTopic(string) kgo.TopicPartitioner { - return p -} - -// RequiresConsistency implements kgo.TopicPartitioner. -func (p *kafkaStaticPartitioner) RequiresConsistency(_ *kgo.Record) bool { - // Never let Kafka client to write the record to another partition - // if the partition is down. - return true -} - -// Partition implements kgo.TopicPartitioner. -func (p *kafkaStaticPartitioner) Partition(r *kgo.Record, _ int) int { - return int(r.Partition) -} From 4b20d91e0cebd316ed8f4764bbfdf9136a50fe3d Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 May 2024 15:17:57 +0200 Subject: [PATCH 6/6] Fix TestDistributor_Push_ShouldSupportIngestStorage Signed-off-by: Marco Pracucci --- pkg/distributor/distributor_ingest_storage_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/distributor/distributor_ingest_storage_test.go b/pkg/distributor/distributor_ingest_storage_test.go index 5781a17ad20..a251d057d08 100644 --- a/pkg/distributor/distributor_ingest_storage_test.go +++ b/pkg/distributor/distributor_ingest_storage_test.go @@ -136,6 +136,11 @@ func TestDistributor_Push_ShouldSupportIngestStorage(t *testing.T) { ingestStorageEnabled: true, ingestStoragePartitions: 3, limits: limits, + configure: func(cfg *Config) { + // Run a number of clients equal to the number of partitions, so that each partition + // has its own client, as requested by some test cases. + cfg.IngestStorageConfig.KafkaConfig.WriteClients = 3 + }, } distributors, _, regs, kafkaCluster := prepare(t, testConfig)