Skip to content

Commit

Permalink
Merge pull request #280 from SumoLogic/rmiller-generate-non-helm-terr…
Browse files Browse the repository at this point in the history
…aform

Generate setup job yaml from helm template
  • Loading branch information
Ryan Miller authored Nov 7, 2019
2 parents e15bed6 + 4ea0f83 commit b0aaea2
Show file tree
Hide file tree
Showing 2 changed files with 282 additions and 3 deletions.
22 changes: 19 additions & 3 deletions ci/build.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash

VERSION="${TRAVIS_TAG:-0.0.0}"
VERSION="${VERSION#v}"
Expand Down Expand Up @@ -71,14 +71,16 @@ ruby deploy/test/test_docker.rb

# Check for changes that require re-generating overrides yaml files
if [ -n "$GITHUB_TOKEN" ] && [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
echo "Generating yaml from helm chart..."
echo "Generating deployment yaml from helm chart..."
echo "# This file is auto-generated." > deploy/kubernetes/fluentd-sumologic.yaml.tmpl
sudo helm init --client-only
cd deploy/helm/sumologic
sudo helm dependency update
cd ../../../

with_files=`ls deploy/helm/sumologic/templates/*.yaml | grep -v "setup-*.yaml" | sed 's#deploy/helm/sumologic/templates#-x templates#g' | sed 's/yaml/yaml \\\/g'`
# NOTE(ryan, 2019-11-06): helm template -execute is going away in Helm 3 so we will need to revisit this
# https://github.com/helm/helm/issues/5887
with_files=`ls deploy/helm/sumologic/templates/*.yaml | sed 's#deploy/helm/sumologic/templates#-x templates#g' | sed 's/yaml/yaml \\\/g'`
eval 'sudo helm template deploy/helm/sumologic $with_files --namespace "\$NAMESPACE" --name collection --set dryRun=true >> deploy/kubernetes/fluentd-sumologic.yaml.tmpl --set sumologic.endpoint="bogus" --set sumologic.accessId="bogus" --set sumologic.accessKey="bogus"'

if [[ $(git diff deploy/kubernetes/fluentd-sumologic.yaml.tmpl) ]]; then
Expand All @@ -90,6 +92,20 @@ if [ -n "$GITHUB_TOKEN" ] && [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
echo "No changes in 'fluentd-sumologic.yaml.tmpl'."
fi

echo "Generating setup job yaml from helm chart..."
echo "# This file is auto-generated." > deploy/kubernetes/setup-sumologic.yaml.tmpl

with_files=`ls deploy/helm/sumologic/templates/setup/*.yaml | sed 's#deploy/helm/sumologic/templates#-x templates#g' | sed 's/yaml/yaml \\\/g'`
eval 'sudo helm template deploy/helm/sumologic $with_files --namespace "\$NAMESPACE" --name collection --set dryRun=true >> deploy/kubernetes/setup-sumologic.yaml.tmpl --set sumologic.endpoint="\$SUMOLOGIC_BASE_URL" --set sumologic.accessId="\$SUMOLOGIC_ACCESSID" --set sumologic.accessKey="\$SUMOLOGIC_ACCESSKEY" --set sumologic.collectorName="\$COLLECTOR_NAME" --set sumologic.clusterName="\$CLUSTER_NAME"'
if [[ $(git diff deploy/kubernetes/setup-sumologic.yaml.tmpl) ]]; then
echo "Detected changes in 'setup-sumologic.yaml.tmpl', committing the updated version to $TRAVIS_PULL_REQUEST_BRANCH..."
git add deploy/kubernetes/setup-sumologic.yaml.tmpl
git commit -m "Generate new 'setup-sumologic.yaml.tmpl'"
git push --quiet origin-repo "$TRAVIS_PULL_REQUEST_BRANCH"
else
echo "No changes in 'setup-sumologic.yaml.tmpl'."
fi

# Generate override yaml files for chart dependencies to determine if changes are made to overrides yaml files
echo "Generating overrides files..."

Expand Down
263 changes: 263 additions & 0 deletions deploy/kubernetes/setup-sumologic.yaml.tmpl
Original file line number Diff line number Diff line change
@@ -1 +1,264 @@
# This file is auto-generated.
---
# Source: sumologic/templates/setup/setup-configmap.yaml

apiVersion: v1
kind: ConfigMap
metadata:
name: collection-sumologic-setup
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: collection-sumologic

data:
setup.sh: |-
#!/bin/sh
cp /etc/terraform/sumo-k8s.tf /terraform
cd /terraform

# Fix URL to remove "v1" or "v1/"
export SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL%v1*}

COLLECTOR_NAME=$COLLECTOR_NAME

terraform init

# Sumo Collector and HTTP sources
terraform import sumologic_collector.collector "$COLLECTOR_NAME"
terraform import sumologic_http_source.default_metrics_source "$COLLECTOR_NAME/(default-metrics)"
terraform import sumologic_http_source.apiserver_metrics_source "$COLLECTOR_NAME/apiserver-metrics"
terraform import sumologic_http_source.events_source "$COLLECTOR_NAME/events"
terraform import sumologic_http_source.kube_controller_manager_metrics_source "$COLLECTOR_NAME/kube-controller-manager-metrics"
terraform import sumologic_http_source.kube_scheduler_metrics_source "$COLLECTOR_NAME/kube-scheduler-metrics"
terraform import sumologic_http_source.kube_state_metrics_source "$COLLECTOR_NAME/kube-state-metrics"
terraform import sumologic_http_source.kubelet_metrics_source "$COLLECTOR_NAME/kubelet-metrics"
terraform import sumologic_http_source.logs_source "$COLLECTOR_NAME/logs"
terraform import sumologic_http_source.node_exporter_metrics_source "$COLLECTOR_NAME/node-exporter-metrics"

# Kubernetes Namespace and Secret
terraform import kubernetes_namespace.sumologic_collection_namespace $NAMESPACE
terraform import kubernetes_secret.sumologic_collection_secret $NAMESPACE/sumologic

terraform apply -auto-approve
sumo-k8s.tf: |-
variable "cluster_name" {
type = string
default = "$CLUSTER_NAME"
}
variable "collector_name" {
type = string
default = "$COLLECTOR_NAME"
}

variable "namespace_name" {
type = string
default = "$NAMESPACE"
}

locals {
default-metrics-source-name = "(default-metrics)"
apiserver-metrics-source-name = "apiserver-metrics"
events-source-name = "events"
kube-controller-manager-metrics-source-name = "kube-controller-manager-metrics"
kube-scheduler-metrics-source-name = "kube-scheduler-metrics"
kube-state-metrics-source-name = "kube-state-metrics"
kubelet-metrics-source-name = "kubelet-metrics"
logs-source-name = "logs"
node-exporter-metrics-source-name = "node-exporter-metrics"
}

provider "sumologic" {}

resource "sumologic_collector" "collector" {
name = var.collector_name
fields = {
cluster = var.cluster_name
}
}

resource "sumologic_http_source" "default_metrics_source" {
name = local.default-metrics-source-name
category = "${var.cluster_name}/${local.default-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "apiserver_metrics_source" {
name = local.apiserver-metrics-source-name
category = "${var.cluster_name}/${local.apiserver-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "events_source" {
name = local.events-source-name
category = "${var.cluster_name}/${local.events-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "kube_controller_manager_metrics_source" {
name = local.kube-controller-manager-metrics-source-name
category = "${var.cluster_name}/${local.kube-controller-manager-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "kube_scheduler_metrics_source" {
name = local.kube-scheduler-metrics-source-name
category = "${var.cluster_name}/${local.kube-scheduler-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "kube_state_metrics_source" {
name = local.kube-state-metrics-source-name
category = "${var.cluster_name}/${local.kube-state-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "kubelet_metrics_source" {
name = local.kubelet-metrics-source-name
category = "${var.cluster_name}/${local.kubelet-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "logs_source" {
name = local.logs-source-name
category = "${var.cluster_name}/${local.logs-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

resource "sumologic_http_source" "node_exporter_metrics_source" {
name = local.node-exporter-metrics-source-name
category = "${var.cluster_name}/${local.node-exporter-metrics-source-name}"
collector_id = "${sumologic_collector.collector.id}"
}

provider "kubernetes" {
host = "https://kubernetes.default.svc"
token = "${file("/var/run/secrets/kubernetes.io/serviceaccount/token")}"
cluster_ca_certificate = "${file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")}"
}

resource "kubernetes_namespace" "sumologic_collection_namespace" {
metadata {
name = var.namespace_name
}
}

resource "kubernetes_secret" "sumologic_collection_secret" {
metadata {
name = "sumologic"
namespace = var.namespace_name
}

data = {
endpoint-events = "${sumologic_http_source.events_source.url}"
endpoint-logs = "${sumologic_http_source.logs_source.url}"
endpoint-metrics = "${sumologic_http_source.default_metrics_source.url}"
endpoint-metrics-apiserver = "${sumologic_http_source.apiserver_metrics_source.url}"
endpoint-metrics-kube-controller-manager = "${sumologic_http_source.kube_controller_manager_metrics_source.url}"
endpoint-metrics-kube-scheduler = "${sumologic_http_source.kube_scheduler_metrics_source.url}"
endpoint-metrics-kube-state = "${sumologic_http_source.kube_state_metrics_source.url}"
endpoint-metrics-kubelet = "${sumologic_http_source.kubelet_metrics_source.url}"
endpoint-metrics-node-exporter = "${sumologic_http_source.node_exporter_metrics_source.url}"
}

type = "Opaque"
}
---
# Source: sumologic/templates/setup/setup-serviceaccount.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
name: collection-sumologic-setup
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "0"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: collection-sumologic

---
# Source: sumologic/templates/setup/setup-clusterrole.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: collection-sumologic-setup
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: collection-sumologic

rules:
- apiGroups:
- ""
resources:
- secrets
- namespaces
verbs: ["get", "create", "describe", "patch"]
---
# Source: sumologic/templates/setup/setup-clusterrolebinding.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: collection-sumologic-setup
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: collection-sumologic

roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collection-sumologic-setup
subjects:
- kind: ServiceAccount
name: collection-sumologic-setup
namespace: $NAMESPACE
---
# Source: sumologic/templates/setup/setup-job.yaml

apiVersion: batch/v1
kind: Job
metadata:
name: collection-sumologic-setup
namespace: $NAMESPACE
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "3"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: collection-sumologic

spec:
template:
spec:
restartPolicy: OnFailure
serviceAccountName: collection-sumologic-setup
volumes:
- name: setup
configMap:
name: collection-sumologic-setup
defaultMode: 0777
containers:
- name: setup
image: sumologic/kubernetes-fluentd:0.10.0
imagePullPolicy: IfNotPresent
command: ["/etc/terraform/setup.sh"]
volumeMounts:
- name: setup
mountPath: /etc/terraform
env:
- name: SUMOLOGIC_ACCESSID
value: $SUMOLOGIC_ACCESSID
- name: SUMOLOGIC_ACCESSKEY
value: $SUMOLOGIC_ACCESSKEY
- name: SUMOLOGIC_BASE_URL
value: $SUMOLOGIC_BASE_URL

0 comments on commit b0aaea2

Please sign in to comment.