diff --git a/README.md b/README.md index 42cf90e..cc2f978 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,24 @@ default values are setup via `system_default_ram_mb` and `system_default_cpus` which can also be overridden if you wish different default values. (Current defaults are 2048MB and 4 vCPU.) +In case of HA (stacked control plane nodes), you will need to change `roles/ka-init/group_vars/all.yml` to add lb and master_slave in `virtual_machines` as following. + +``` +virtual_machines: + - name: kube-lb + node_type: lb + - name: kube-master1 + node_type: master + - name: kube-master2 + node_type: master_slave + - name: kube-master3 + node_type: master_slave + - name: kube-node-1 + node_type: nodes + - name: kube-node-2 + node_type: nodes +``` + > **WARNING** > > If you're not going to be connecting to the virtual machines from the same @@ -136,7 +154,6 @@ ansible-playbook -i inventory/virthost/ -e ssh_proxy_enabled=true playbooks/virt > * `ssh_proxy_port: 2222` _port of the virthost (optional, default 22)_ > * `vm_ssh_key_path: /home/lmadsen/.ssh/id_vm_rsa` _path to local SSH key_ - ### Step 3. Install Kubernetes During the execution of _Step 1_ a local inventory should have been diff --git a/playbooks/ka-init/group_vars/all.yml b/playbooks/ka-init/group_vars/all.yml index 61f43db..65ae455 100644 --- a/playbooks/ka-init/group_vars/all.yml +++ b/playbooks/ka-init/group_vars/all.yml @@ -61,12 +61,20 @@ images_directory: /home/images system_default_ram_mb: 4096 system_default_cpus: 4 virtual_machines: - - name: kube-master + - name: kube-master1 node_type: master - name: kube-node-1 node_type: nodes - name: kube-node-2 node_type: nodes +# Uncomment following (lb/master_slave) for k8s master HA cluster +# - name: kube-lb +# node_type: lb +# - name: kube-master2 +# node_type: master_slave +# - name: kube-master3 +# node_type: master_slave + # - name: builder # node_type: builder # system_ram_mb: 24576 diff --git a/playbooks/kube-install-ovn.yml b/playbooks/kube-install-ovn.yml index 19273b6..c590525 100644 --- a/playbooks/kube-install-ovn.yml +++ b/playbooks/kube-install-ovn.yml @@ -1,7 +1,14 @@ --- - import_playbook: ka-init/init.yml -- hosts: master,nodes +- hosts: lb + become: true + become_user: root + tasks: [] + roles: + - { role: lb-setup } + +- hosts: master,nodes,master_slave become: true become_user: root tasks: [] @@ -15,10 +22,17 @@ - { role: install-docker, when: container_runtime == 'docker' } - { role: kube-install } -- hosts: master,nodes +- hosts: master,nodes,master_slave become: true become_user: root tasks: + - name: Set ipv4 ip_forward to 1 + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + state: present + reload: yes - name: Load module if parameter is not in node # noqa 301 shell: > if [ ! -f /proc/sys/net/bridge/bridge-nf-call-iptables ]; then \ @@ -39,6 +53,20 @@ roles: - { role: kube-init } +- hosts: master_slave + become: true + become_user: root + pre_tasks: + - name: Get cert related args from master + set_fact: + kubeadm_cert_key: "{{ hostvars[groups['master'][0]]['kubeadm_cert_key'] }}" + - name: Get kubeadm_join_command from master + set_fact: + kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" + tasks: [] + roles: + - { role: kube-master-join-cluster } + # ---- placeholder: kube-cni # without become. diff --git a/playbooks/kube-install.yml b/playbooks/kube-install.yml index ababaaa..9dc8f04 100644 --- a/playbooks/kube-install.yml +++ b/playbooks/kube-install.yml @@ -1,7 +1,14 @@ --- - import_playbook: ka-init/init.yml -- hosts: master,nodes +- hosts: lb + become: true + become_user: root + tasks: [] + roles: + - { role: lb-setup } + +- hosts: master,nodes,master_slave become: true become_user: root tasks: [] @@ -14,10 +21,17 @@ - { role: install-docker, when: container_runtime == 'docker' } - { role: kube-install } -- hosts: master,nodes +- hosts: master,nodes,master_slave become: true become_user: root tasks: + - name: Set ipv4 ip_forward to 1 + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + state: present + reload: yes - name: Load module if parameter is not in node # noqa 301 shell: > if [ ! -f /proc/sys/net/bridge/bridge-nf-call-iptables ]; then \ @@ -38,6 +52,20 @@ roles: - { role: kube-init } +- hosts: master_slave + become: true + become_user: root + pre_tasks: + - name: Get cert related args from master + set_fact: + kubeadm_cert_key: "{{ hostvars[groups['master'][0]]['kubeadm_cert_key'] }}" + - name: Get kubeadm_join_command from master + set_fact: + kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" + tasks: [] + roles: + - { role: kube-master-join-cluster } + # without become. - hosts: master tasks: [] diff --git a/roles/kube-init/tasks/main.yml b/roles/kube-init/tasks/main.yml index 729d270..6269e98 100644 --- a/roles/kube-init/tasks/main.yml +++ b/roles/kube-init/tasks/main.yml @@ -70,12 +70,21 @@ set_fact: k8s_version: "" +- name: set upload-certs in kubeadm option + set_fact: + k8s_upload_certs: "" + +- name: set upload-certs in kubeadm option + set_fact: + k8s_upload_certs: "--upload-certs" + when: groups.master_slave is defined and groups.master_slave|length > 0 + # Was trying to use flannel and running with: # kubeadm init > /etc/kubeadm.init.txt # abandonded for now... - name: Run kubeadm init shell: > - kubeadm init {{ k8s_version }} {{ arg_crio }} --config=/root/kubeadm.cfg > /var/log/kubeadm.init.log + kubeadm init {{ k8s_version }} {{ arg_crio }} {{ k8s_upload_certs }} --config=/root/kubeadm.cfg > /var/log/kubeadm.init.log args: creates: /etc/.kubeadm-complete @@ -93,6 +102,16 @@ set_fact: kubeadm_join_command: "{{ kubeadm_join_output.stdout }}" +- name: Get certificate-key + shell: > + kubeadm init phase upload-certs --upload-certs -v 0 2> /dev/null | tail -n 1 + register: kubeadm_cert_key_output + when: groups.master_slave is defined and groups.master_slave|length > 0 + +- name: Set fact with certificate-key + set_fact: + kubeadm_cert_key: "{{ kubeadm_cert_key_output.stdout }}" + when: groups.master_slave is defined and groups.master_slave|length > 0 # -------- Copy in admin.conf # ---- Kube 1.6, apparently you can't use kubectl as root? weird/awesome. diff --git a/roles/kube-init/templates/kubeadm.cfg.v1beta2.j2 b/roles/kube-init/templates/kubeadm.cfg.v1beta2.j2 index 3c7d7ff..e52d997 100644 --- a/roles/kube-init/templates/kubeadm.cfg.v1beta2.j2 +++ b/roles/kube-init/templates/kubeadm.cfg.v1beta2.j2 @@ -38,6 +38,11 @@ scheduler: extraArgs: address: 0.0.0.0 {% endif %} +{% if groups.lb is defined and groups.lb|length > 0 %} +{% for node in groups["lb"] %} +controlPlaneEndpoint: {{ hostvars[node]['ansible_host'] }}:6443 +{% endfor %} +{% endif %} {% if enable_endpointslice|default(false) %} controllerManager: extraArgs: diff --git a/roles/kube-master-join-cluster/tasks/main.yml b/roles/kube-master-join-cluster/tasks/main.yml new file mode 100644 index 0000000..d514277 --- /dev/null +++ b/roles/kube-master-join-cluster/tasks/main.yml @@ -0,0 +1,27 @@ +- name: Default cri-o flags to empty + set_fact: + arg_crio: "" + +- name: Set cri-o flags + set_fact: + arg_crio: "--ignore-preflight-errors=all" + when: container_runtime == "crio" + +- name: Default cri-o flags to empty + set_fact: + kubeadm_master_cert: "" + +- name: set master config in kubeadm option + set_fact: + kubeadm_master_cert: "--control-plane --certificate-key {{ kubeadm_cert_key }}" + +- name: Join each node to the master with the join command + shell: > + {{ kubeadm_join_command }} {{ kubeadm_master_cert }} + args: + creates: /etc/.kubeadm-joined + +- name: Mark the nodes as joined + file: + path: /etc/.kubeadm-joined + state: directory diff --git a/roles/lb-setup/tasks/main.yml b/roles/lb-setup/tasks/main.yml new file mode 100644 index 0000000..b97eb91 --- /dev/null +++ b/roles/lb-setup/tasks/main.yml @@ -0,0 +1,24 @@ +- name: "Disable SELinux :(" + selinux: + state: disabled + +- name: Install haproxy + package: + name: haproxy + state: present + +- name: backup original + copy: + src: /etc/haproxy/haproxy.cfg + dest: /etc/haproxy/haproxy.cfg.orig + +- name: create haproxy.cfg + template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + +- name: start haproxy + systemd: + name: haproxy.service + state: started + enabled: yes diff --git a/roles/lb-setup/templates/haproxy.cfg.j2 b/roles/lb-setup/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..6ca57f4 --- /dev/null +++ b/roles/lb-setup/templates/haproxy.cfg.j2 @@ -0,0 +1,86 @@ +#--------------------------------------------------------------------- +# Example configuration for a possible web application. See the +# full configuration options online. +# +# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt +# +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend kubernetes + bind {{ ansible_default_ipv4.address }}:6443 + option tcplog + mode tcp + default_backend kubernetes-master-nodes +#--------------------------------------------------------------------- +# static backend for serving up images, stylesheets and such +#--------------------------------------------------------------------- +backend static + balance roundrobin + server static 127.0.0.1:4331 check +#--------------------------------------------------------------------- +# round robin balancing between the various backends +#--------------------------------------------------------------------- +backend kubernetes-master-nodes + mode tcp + balance roundrobin + option tcp-check +{% for node in groups["master"] %} + server {{ node }} {{ hostvars[node]['ansible_host'] }}:6443 check +{% endfor %} +{% for node in groups["master_slave"] %} + server {{ node }} {{ hostvars[node]['ansible_host'] }}:6443 check +{% endfor %}