Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kafka replay speed: upstream local development environment #9455

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 16 additions & 9 deletions development/mimir-ingest-storage/config/datasource-mimir.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
apiVersion: 1
datasources:
- name: Mimir
type: prometheus
access: proxy
uid: mimir
orgID: 1
url: http://nginx:8080/prometheus
isDefault: true
jsonData:
prometheusType: Mimir
- name: Mimir
type: prometheus
access: proxy
uid: mimir
orgID: 1
url: http://nginx:8080/prometheus
isDefault: true
jsonData:
prometheusType: Mimir
timeInterval: 5s
- name: Jaeger
type: jaeger
access: proxy
uid: jaeger
orgID: 1
url: http://jaeger:16686/
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,13 @@ prometheus.scrape "metrics_local_mimir_read_write_mode_mimir_write" {
container = "mimir-write",
namespace = "mimir-read-write-mode",
}],
[{
__address__ = "mimir-write-zone-c-61:8080",
cluster = "docker-compose",
container = "mimir-write",
namespace = "mimir-read-write-mode",
job = "mimir-write-zone-c",
}],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(nit) Seems that tabs and spaces are mixed in this config here, right? ;)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think it's only using spaces.. i asked goland to convert "to spaces" and it said it's already formatted. i fixed the indentation of this line in the meantime

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@dimitarvdimitrov have you forgotten to push the changes, maybe? The indentation is still off here ;)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is what i pushed f5b6bcb
Screenshot 2024-10-10 at 11 19 41

i think GH doesn't update the diff on the comment

)
forward_to = [prometheus.remote_write.metrics_local.receiver]
job_name = "mimir-read-write-mode/mimir-write"
Expand Down Expand Up @@ -97,7 +104,7 @@ prometheus.scrape "metrics_local_mimir_read_write_mode_mimir_backend" {
prometheus.remote_write "metrics_local" {
endpoint {
name = "local"
url = "http://mimir-write-zone-a-1:8080/api/v1/push"
url = "http://mimir-write-zone-a-2:8080/api/v1/push"
send_native_histograms = true

queue_config { }
Expand Down
11 changes: 9 additions & 2 deletions development/mimir-ingest-storage/config/mimir.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,19 @@ common:
ingest_storage:
enabled: true
kafka:
address: kafka:9092
address: kafka_1:9092
topic: mimir-ingest
last_produced_offset_poll_interval: 500ms
startup_fetch_concurrency: 15
startup_records_per_fetch: 2400
ongoing_fetch_concurrency: 2
ongoing_records_per_fetch: 30

ingester:
track_ingester_owned_series: true

partition_ring:
min_partition_owners_count: 2
min_partition_owners_count: 1
min_partition_owners_duration: 10s
delete_inactive_partition_after: 1m

Expand Down Expand Up @@ -99,3 +103,6 @@ limits:

runtime_config:
file: ./config/runtime.yaml

server:
log_level: debug
167 changes: 118 additions & 49 deletions development/mimir-ingest-storage/docker-compose.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@ std.manifestYamlDoc({
self.grafana +
self.grafana_agent +
self.memcached +
self.kafka +
self.kafka_1 +
self.kafka_2 +
self.kafka_3 +
self.jaeger +
{},

write:: {
Expand All @@ -35,28 +38,25 @@ std.manifestYamlDoc({
extraArguments: ['-ingester.ring.instance-availability-zone=zone-a'],
extraVolumes: ['.data-mimir-write-zone-a-3:/data:delegated'],
}),

// Zone-b.
'mimir-write-zone-b-1': mimirService({
name: 'mimir-write-zone-b-1',
target: 'write',
publishedHttpPort: 8011,
extraArguments: ['-ingester.ring.instance-availability-zone=zone-b'],
extraVolumes: ['.data-mimir-write-zone-b-1:/data:delegated'],
}),
'mimir-write-zone-b-2': mimirService({
name: 'mimir-write-zone-b-2',
target: 'write',
publishedHttpPort: 8012,
extraArguments: ['-ingester.ring.instance-availability-zone=zone-b'],
extraVolumes: ['.data-mimir-write-zone-b-2:/data:delegated'],
}),
'mimir-write-zone-b-3': mimirService({
name: 'mimir-write-zone-b-3',
target: 'write',
publishedHttpPort: 8013,
extraArguments: ['-ingester.ring.instance-availability-zone=zone-b'],
extraVolumes: ['.data-mimir-write-zone-b-3:/data:delegated'],
'mimir-write-zone-c-61': mimirService({
name: 'mimir-write-zone-c-61',
target: 'ingester',
publishedHttpPort: 8064,
extraArguments: [
'-ingester.ring.instance-availability-zone=zone-c',
'-ingester.ring.instance-id=ingester-zone-c-61',
'-ingester.partition-ring.prefix=exclusive-prefix',
'-ingester.ring.prefix=exclusive-prefix',
dimitarvdimitrov marked this conversation as resolved.
Show resolved Hide resolved
'-ingest-storage.kafka.consume-from-position-at-startup=end',
'-ingest-storage.kafka.consume-from-timestamp-at-startup=0',
'-ingest-storage.kafka.ingestion-concurrency=2',
'-ingest-storage.kafka.ingestion-concurrency-batch-size=150',
'-ingest-storage.kafka.startup-fetch-concurrency=15',
'-ingest-storage.kafka.startup-records-per-fetch=2400',
'-ingest-storage.kafka.ongoing-fetch-concurrency=2',
'-ingest-storage.kafka.ongoing-records-per-fetch=30',
],
extraVolumes: ['.data-mimir-write-zone-c-61:/data:delegated'],
}),
},

Expand Down Expand Up @@ -116,27 +116,31 @@ std.manifestYamlDoc({
},
},

kafka:: {
kafka: {
local commonKafkaEnvVars = [
'CLUSTER_ID=zH1GDqcNTzGMDCXm5VZQdg', // Cluster ID is required in KRaft mode; the value is random UUID.
'KAFKA_NUM_PARTITIONS=100', // Default number of partitions for auto-created topics.
'KAFKA_PROCESS_ROLES=broker,controller',
'KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT',
'KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT',
'KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER',
'KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka_1:9093,2@kafka_2:9093,3@kafka_3:9093',
'KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=2',
'KAFKA_DEFAULT_REPLICATION_FACTOR=2',
'KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=10000',

// Decomment the following config to keep a short retention of records in Kafka.
// This is useful to test the behaviour when Kafka records are deleted.
// 'KAFKA_LOG_RETENTION_MINUTES=1',
// 'KAFKA_LOG_SEGMENT_BYTES=1000000',
],

kafka_1:: {
kafka_1: {
image: 'confluentinc/cp-kafka:latest',
environment: [
'CLUSTER_ID=zH1GDqcNTzGMDCXm5VZQdg', // Cluster ID is required in KRaft mode; the value is random UUID.
environment: commonKafkaEnvVars + [
'KAFKA_BROKER_ID=1',
'KAFKA_NUM_PARTITIONS=100', // Default number of partitions for auto-created topics.
'KAFKA_PROCESS_ROLES=broker,controller',
'KAFKA_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,PLAINTEXT_HOST://:29092',
'KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092',
'KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT',
'KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT',
'KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER',
'KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka:9093',
'KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1',
'KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=10000',

// Decomment the following config to keep a short retention of records in Kafka.
// This is useful to test the behaviour when Kafka records are deleted.
// 'KAFKA_LOG_RETENTION_MINUTES=1',
// 'KAFKA_LOG_SEGMENT_BYTES=1000000',
'KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka_1:9092,PLAINTEXT_HOST://localhost:29092',
],
ports: [
'29092:29092',
Expand All @@ -151,6 +155,48 @@ std.manifestYamlDoc({
},
},


kafka_2:: {
kafka_2: {
image: 'confluentinc/cp-kafka:latest',
environment: commonKafkaEnvVars + [
'KAFKA_BROKER_ID=2',
'KAFKA_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,PLAINTEXT_HOST://:29093',
'KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka_2:9092,PLAINTEXT_HOST://localhost:29093',
],
ports: [
'29093:29093',
],
healthcheck: {
test: 'nc -z localhost 9092 || exit -1',
start_period: '1s',
interval: '1s',
timeout: '1s',
retries: '30',
},
},
},
kafka_3:: {
kafka_3: {
image: 'confluentinc/cp-kafka:latest',
environment: commonKafkaEnvVars + [
'KAFKA_BROKER_ID=3',
'KAFKA_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,PLAINTEXT_HOST://:29094',
'KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka_3:9092,PLAINTEXT_HOST://localhost:29094',
],
ports: [
'29094:29094',
],
healthcheck: {
test: 'nc -z localhost 9092 || exit -1',
start_period: '1s',
interval: '1s',
timeout: '1s',
retries: '30',
},
},
},

memcached:: {
memcached: {
image: 'memcached:1.6.19-alpine',
Expand Down Expand Up @@ -187,6 +233,22 @@ std.manifestYamlDoc({
},
},

jaeger:: {
jaeger: {
image: 'jaegertracing/all-in-one',
ports: ['16686:16686', '14268'],
},
},

local jaegerEnv(appName) = {
JAEGER_AGENT_HOST: 'jaeger',
JAEGER_AGENT_PORT: 6831,
JAEGER_SAMPLER_TYPE: 'const',
JAEGER_SAMPLER_PARAM: 1,
JAEGER_TAGS: 'app=%s' % appName,
JAEGER_REPORTER_MAX_QUEUE_SIZE: 1000,
},

// This function builds docker-compose declaration for Mimir service.
local mimirService(serviceOptions) = {
local defaultOptions = {
Expand All @@ -196,10 +258,13 @@ std.manifestYamlDoc({
publishedHttpPort: error 'missing publishedHttpPort',
dependsOn: {
minio: { condition: 'service_started' },
kafka: { condition: 'service_healthy' },
kafka_1: { condition: 'service_healthy' },
kafka_2: { condition: 'service_healthy' },
},
env: {},
env: jaegerEnv(self.target),
extraArguments: [],
debug: false,
debugPort: self.publishedHttpPort + 3000,
extraVolumes: [],
memberlistBindPort: self.publishedHttpPort + 2000,
},
Expand All @@ -212,19 +277,23 @@ std.manifestYamlDoc({
},
image: 'mimir',
command: [
'./mimir',
'-config.file=./config/mimir.yaml' % options,
'-target=%(target)s' % options,
'-activity-tracker.filepath=/activity/%(name)s' % options,
] + options.extraArguments,
'sh',
'-c',
std.join(' ', [
(if options.debug then 'exec ./dlv exec ./mimir --listen=:%(debugPort)d --headless=true --api-version=2 --accept-multiclient --continue -- ' % options else 'exec ./mimir'),
'-config.file=./config/mimir.yaml' % options,
'-target=%(target)s' % options,
'-activity-tracker.filepath=/activity/%(name)s' % options,
] + options.extraArguments),
],
environment: [
'%s=%s' % [key, options.env[key]]
for key in std.objectFields(options.env)
if options.env[key] != null
],
hostname: options.name,
// Only publish HTTP port, but not gRPC one.
ports: ['%d:8080' % options.publishedHttpPort],
ports: ['%d:8080' % options.publishedHttpPort, '%(debugPort)d:%(debugPort)d' % options],
depends_on: options.dependsOn,
volumes: ['./config:/mimir/config', './activity:/activity'] + options.extraVolumes,
},
Expand Down
Loading
Loading