Skip to content

Commit

Permalink
Refacto/podman resources (#898)
Browse files Browse the repository at this point in the history
  • Loading branch information
l0r1s authored Apr 5, 2023
1 parent c3f8e99 commit 3b134b4
Show file tree
Hide file tree
Showing 12 changed files with 885 additions and 400 deletions.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import { Node } from "../../../types";
import { Client } from "../../client";
import { NodeResource } from "./nodeResource";
import { Container, PodSpec, Volume } from "./types";

export class BootNodeResource extends NodeResource {
constructor(client: Client, namespace: string, nodeSetupConfig: Node) {
super(client, namespace, nodeSetupConfig);
}

protected generatePodSpec(
containers: Container[],
volumes: Volume[],
): PodSpec {
return {
apiVersion: "v1",
kind: "Pod",
metadata: {
name: "bootnode",
namespace: this.namespace,
labels: {
"zombie-role": "bootnode",
app: "zombienet",
"zombie-ns": this.namespace,
},
},
spec: {
hostname: "bootnode",
initContainers: [],
restartPolicy: "OnFailure",
volumes,
containers,
},
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# config file version
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
orgId: 1
url: http://{{PROMETHEUS_IP}}:9090
version: 1
editable: true
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://{{TEMPO_IP}}:3200
version: 1
editable: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# config
global:
scrape_interval: 5s
external_labels:
monitor: "zombienet-monitor"

# Scraping Prometheus itself
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "dynamic"
file_sd_configs:
- files:
- /data/sd_config*.yaml
- /data/sd_config*.json
refresh_interval: 5s
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
server:
http_listen_port: 3100

distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:

ingester:
trace_idle_period: 10s # the length of time after a trace has not received spans to consider it complete and flush it
max_block_bytes: 1_000_000 # cut the head block when it hits this size or ...
max_block_duration: 5m # this much time passes

compactor:
compaction:
compaction_window: 1h # blocks in this time window will be compacted together
max_block_bytes: 100_000_000 # maximum size of compacted blocks
block_retention: 1h
compacted_block_retention: 10m

storage:
trace:
backend: local # backend configuration to use
block:
bloom_filter_false_positive: .05 # bloom filter false positive rate. lower values create larger filters but fewer false positives
index_downsample_bytes: 1000 # number of bytes per index record
encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
wal:
path: /tmp/tempo/wal # where to store the the wal locally
encoding: snappy # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
local:
path: /tmp/tempo/blocks
pool:
max_workers: 100 # worker pool determines the number of parallel requests to the object store backend
queue_depth: 10000
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import { getRandomPort, makeDir } from "@zombienet/utils";
import fs from "fs/promises";
import path from "path";
import { Client } from "../../client";
import {
Container,
ContainerPort,
PodSpec,
Volume,
VolumeMount,
} from "./types";

export class GrafanaResource {
private readonly dataSourcesPath: string;

constructor(
client: Client,
private readonly namespace: string,
private readonly prometheusIp: string,
private readonly tempoIp: string,
) {
this.dataSourcesPath = `${client.tmpDir}/grafana/datasources`;
}

public async generateSpec() {
const volumes = await this.generateVolumes();
const volumeMounts = this.generateVolumesMounts();
const containersPorts = await this.generateContainersPorts();
const containers = this.generateContainers(volumeMounts, containersPorts);

return this.generatePodSpec(containers, volumes);
}

private async createVolumeDirectories() {
try {
await makeDir(this.dataSourcesPath, true);
} catch {
throw new Error("Error creating directory for grafana resource");
}
}

private async generateGrafanaConfig() {
try {
const templateConfigPath = path.resolve(
__dirname,
"./configs/grafana.yml",
);
const grafanaConfigBuffer = await fs.readFile(templateConfigPath);

let grafanaConfig = grafanaConfigBuffer.toString("utf8");
grafanaConfig = grafanaConfig
.replace("{{PROMETHEUS_IP}}", this.prometheusIp)
.replace("{{TEMPO_IP}}", this.tempoIp);

await fs.writeFile(
`${this.dataSourcesPath}/prometheus.yml`,
grafanaConfig,
);
} catch {
throw new Error("Error generating config for grafana resource");
}
}

private async generateVolumes(): Promise<Volume[]> {
await this.createVolumeDirectories();
await this.generateGrafanaConfig();

return [
{
name: "datasources-cfg",
hostPath: { type: "Directory", path: this.dataSourcesPath },
},
];
}

private generateVolumesMounts() {
return [
{
name: "datasources-cfg",
mountPath: "/etc/grafana/provisioning/datasources",
readOnly: false,
},
];
}

private async generateContainersPorts(): Promise<ContainerPort[]> {
return [
{
containerPort: 3000,
name: "grafana_web",
hostPort: await getRandomPort(),
},
];
}

private generateContainers(
volumeMounts: VolumeMount[],
ports: ContainerPort[],
): Container[] {
return [
{
image: "docker.io/grafana/grafana",
name: "grafana",
imagePullPolicy: "Always",
ports,
volumeMounts,
},
];
}

private generatePodSpec(containers: Container[], volumes: Volume[]): PodSpec {
return {
apiVersion: "v1",
kind: "Pod",
metadata: {
name: "grafana",
namespace: this.namespace,
labels: {
"zombie-role": "grafana",
app: "zombienet",
"zombie-ns": this.namespace,
},
},
spec: {
hostname: "grafana",
restartPolicy: "OnFailure",
volumes,
containers,
},
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
export { BootNodeResource } from "./bootnodeResource";
export { GrafanaResource } from "./grafanaResource";
export { IntrospectorResource } from "./introspectorResource";
export { NodeResource } from "./nodeResource";
export { PrometheusResource } from "./prometheusResource";
export { TempoResource } from "./tempoResource";
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import { getRandomPort } from "@zombienet/utils";
import { INTROSPECTOR_POD_NAME } from "../../../constants";
import { Container, ContainerPort, PodSpec } from "./types";

export class IntrospectorResource {
constructor(
private readonly namespace: string,
private readonly wsUri: string,
) {}

public async generateSpec() {
const containerPorts = await this.generateContainersPorts();
const containers = this.generateContainers(containerPorts);

return this.generatePodSpec(containers);
}

private async generateContainersPorts(): Promise<ContainerPort[]> {
return [
{
containerPort: 65432,
name: "prometheus",
hostPort: await getRandomPort(),
},
];
}

private generateContainers(ports: ContainerPort[]): Container[] {
return [
{
image: "docker.io/paritytech/polkadot-introspector:latest",
name: INTROSPECTOR_POD_NAME,
args: ["block-time-monitor", `--ws=${this.wsUri}`, "prometheus"],
imagePullPolicy: "Always",
ports,
volumeMounts: [],
},
];
}

private generatePodSpec(containers: Container[]): PodSpec {
return {
apiVersion: "v1",
kind: "Pod",
metadata: {
name: INTROSPECTOR_POD_NAME,
namespace: this.namespace,
labels: {
"zombie-role": INTROSPECTOR_POD_NAME,
app: "zombienet",
"zombie-ns": this.namespace,
},
},
spec: {
hostname: INTROSPECTOR_POD_NAME,
containers: containers,
restartPolicy: "OnFailure",
},
};
}
}
Loading

0 comments on commit 3b134b4

Please sign in to comment.