Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Rotation certification Check, remove func to restart agents #7097

Merged
merged 7 commits into from
May 12, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 130 additions & 0 deletions tests/e2e/certrotation/certrotation_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
package secretsencryption

import (
"flag"
"fmt"
"os"
"regexp"
"strings"
"testing"

"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2204", "VM operating system")
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")

// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master

func Test_E2ECustomCARotation(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Custom Certificate Rotation Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
agentNodeNames []string
serverNodeNames []string
)

var _ = ReportAfterEach(e2e.GenReport)
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved

// RotateCertificate rotate the Certificate on each node given
func RotateCertificate(nodeNames []string) error {
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved
for _, nodeName := range nodeNames {
cmd := "sudo k3s --debug certificate rotate"
if _, err := e2e.RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
}
return nil
}

var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
Context("Custom CA is rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Verifies Certificate Rotation", func() {
const grepCert = "sudo ls -lt /var/lib/rancher/k3s/server/ | grep tls"
expectedResult := []string{
"client-ca.crt", "client-ca.key",
"client-ca.nochain.crt", "client-ca.pem",
"dynamic-cert.json", "peer-ca.crt",
"peer-ca.key", "peer-ca.pem",
"server-ca.crt", "server-ca.key",
"server-ca.pem", "intermediate-ca.crt",
"intermediate-ca.key", "intermediate-ca.pem",
"request-header-ca.crt", "request-header-ca.key",
"request-header-ca.pem", "root-ca.crt",
"root-ca.key", "root-ca.pem",
"server-ca.crt", "server-ca.key",
"server-ca.nochain.crt", "server-ca.pem",
"service.current.key", "service.key",
"apiserver-loopback-client__.crt", "apiserver-loopback-client__.key",
"",
}

var finalResult string
var finalErr error
errStop := e2e.StopCluster(serverNodeNames)
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved
Expect(errStop).NotTo(HaveOccurred(), "Server not stop correctly")
errRotate := RotateCertificate(serverNodeNames)
Expect(errRotate).NotTo(HaveOccurred(), "Certificate not rotate correctly")
errStart := e2e.StartCluster(serverNodeNames)
Expect(errStart).NotTo(HaveOccurred(), "Server not start correctly")

for _, nodeName := range serverNodeNames {
grCert, errGrep := e2e.RunCmdOnNode(grepCert, nodeName)
Expect(errGrep).NotTo(HaveOccurred(), "Certificate not created correctly")
re := regexp.MustCompile("tls-[0-9]+")
tls := re.FindAllString(grCert, -1)[0]
final := fmt.Sprintf("sudo diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/ | grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls)
finalResult, finalErr = e2e.RunCmdOnNode(final, nodeName)
Expect(finalErr).NotTo(HaveOccurred(), "Final Certification does not created correctly")
}
if len(agentNodeNames) > 0 {
errRestartAgent := e2e.RestartCluster(agentNodeNames)
Expect(errRestartAgent).NotTo(HaveOccurred(), "Restart Agent not happened correctly")
}
Eventually(func(g Gomega) {
fmoral2 marked this conversation as resolved.
Show resolved Hide resolved
finalCert := strings.Replace(finalResult, "\n", ",", -1)
finalCertArray := strings.Split(finalCert, ",")
Expect((finalCertArray)).Should((Equal(expectedResult)), "Final certification does not match the expected results")
}, "620s", "5s").Should(Succeed())

})

})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
88 changes: 88 additions & 0 deletions tests/e2e/certrotation/vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""

def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"

vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)

defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "shell", inline: "ping -c 2 k3s.io"

if role.include?("server") && role_num == 0
vm.provision 'file' do |scp|
scp.source = '../../../contrib/util/generate-custom-ca-certs.sh'
scp.destination = '/tmp/generate-custom-ca-certs.sh'
end
vm.provision 'custom-ca', type: 'shell', run: 'once' do |script|
script.inline = 'bash /tmp/generate-custom-ca-certs.sh'
script.env = {'PRODUCT' => 'vagrant-e2e-test', 'DATA_DIR' => '/var/lib/rancher/k3s'}
end
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("server") && role_num != 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("agent")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end
4 changes: 2 additions & 2 deletions tests/e2e/multiclustercidr/multiclustercidr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {

It("Restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartClusterAgent(agents)
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})

Expand Down Expand Up @@ -223,7 +223,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {

It("Delete and restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartClusterAgent(agents)
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})

Expand Down
17 changes: 14 additions & 3 deletions tests/e2e/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,10 +424,21 @@ func RestartCluster(nodeNames []string) error {
return nil
}

// RestartCluster restarts the k3s service on each node given
func RestartClusterAgent(nodeNames []string) error {
// StartCluster starts the k3s service on each node given
func StartCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl start k3s*"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
}
return nil
}

// StopCluster starts the k3s service on each node given
func StopCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl restart k3s-agent"
cmd := "sudo systemctl stop k3s*"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
Expand Down