forked from k3s-io/k3s
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: Ian Cardoso <osodracnai@gmail.com>
- Loading branch information
1 parent
0d23cfe
commit 4d85545
Showing
4 changed files
with
510 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
ENV['VAGRANT_NO_PARALLEL'] = 'no' | ||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || | ||
["server-0"]) | ||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || | ||
['generic/ubuntu2204']) | ||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") | ||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") | ||
GOCOVER = (ENV['E2E_GOCOVER'] || "") | ||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i | ||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i | ||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks | ||
NETWORK_PREFIX = "10.10.10" | ||
install_type = "" | ||
|
||
def provision(vm, role, role_num, node_num) | ||
vm.box = NODE_BOXES[node_num] | ||
vm.hostname = role | ||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 | ||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0" | ||
|
||
vagrant_defaults = '../vagrantdefaults.rb' | ||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" | ||
load vagrant_defaults if File.exists?(vagrant_defaults) | ||
|
||
defaultOSConfigure(vm) | ||
dockerInstall(vm) | ||
addCoverageDir(vm, role, GOCOVER) | ||
|
||
vm.provision "shell", inline: "ping -c 2 k3s.io" | ||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "server " | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_ENABLE=true] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
k3s.config = <<~YAML | ||
token: vagrant | ||
node-external-ip: #{NETWORK_PREFIX}.100 | ||
flannel-iface: eth1 | ||
YAML | ||
end | ||
|
||
vm.provision "Enable cgroup v2", type: "shell", path: scripts_location + "/enable_cgroupv2.sh" | ||
vm.provision 'k3s-reload', type: 'reload', run: 'once' | ||
end | ||
|
||
Vagrant.configure("2") do |config| | ||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] | ||
# Default provider is libvirt, virtualbox is only provided as a backup | ||
config.vm.provider "libvirt" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
end | ||
config.vm.provider "virtualbox" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
end | ||
|
||
if NODE_ROLES.kind_of?(String) | ||
NODE_ROLES = NODE_ROLES.split(" ", -1) | ||
end | ||
if NODE_BOXES.kind_of?(String) | ||
NODE_BOXES = NODE_BOXES.split(" ", -1) | ||
end | ||
|
||
# Must iterate on the index, vagrant does not understand iterating | ||
# over the node roles themselves | ||
NODE_ROLES.length.times do |i| | ||
name = NODE_ROLES[i] | ||
role_num = name.split("-", -1).pop.to_i | ||
config.vm.define name do |node| | ||
provision(node.vm, name, role_num, i) | ||
end | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,281 @@ | ||
package rootless | ||
|
||
import ( | ||
"flag" | ||
"fmt" | ||
"strings" | ||
"testing" | ||
|
||
"github.com/k3s-io/k3s/tests/e2e" | ||
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
) | ||
|
||
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 | ||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2204", "VM operating system") | ||
var ci = flag.Bool("ci", false, "running on CI") | ||
var local = flag.Bool("local", false, "deploy a locally built K3s binary") | ||
|
||
// Environment Variables Info: | ||
// E2E_RELEASE_VERSION=v1.27.1+k3s2 or nil for latest commit from master | ||
|
||
// Rootless is only valid on a single node, but requires node/kernel configuration, requiring a E2E test environment. | ||
|
||
func Test_E2ERootlessStartupValidation(t *testing.T) { | ||
RegisterFailHandler(Fail) | ||
flag.Parse() | ||
suiteConfig, reporterConfig := GinkgoConfiguration() | ||
RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig) | ||
} | ||
|
||
var ( | ||
serverNodeNames []string | ||
agentNodeNames []string | ||
) | ||
|
||
func StartK3sCluster(nodes []string, serverYAML string) error { | ||
for _, node := range nodes { | ||
var yamlCmd string | ||
var resetCmd string | ||
var startCmd string | ||
|
||
resetCmd = "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" | ||
yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) | ||
startCmd = "systemctl --user restart k3s-rootless" | ||
|
||
if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil { | ||
return err | ||
} | ||
if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil { | ||
return err | ||
} | ||
if _, err := RunCmdOnRootlesNode("systemctl --user daemon-reload", node); err != nil { | ||
return err | ||
} | ||
|
||
if _, err := RunCmdOnRootlesNode(startCmd, node); err != nil { | ||
return err | ||
} | ||
|
||
} | ||
return nil | ||
} | ||
|
||
func KillK3sCluster(nodes []string) error { | ||
for _, node := range nodes { | ||
if _, err := RunCmdOnRootlesNode(`systemctl --user stop k3s-rootless`, node); err != nil { | ||
return err | ||
} | ||
|
||
if _, err := RunCmdOnRootlesNode("k3s-killall.sh", node); err != nil { | ||
return err | ||
} | ||
if _, err := RunCmdOnRootlesNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node); err != nil { | ||
return err | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
var _ = ReportAfterEach(e2e.GenReport) | ||
|
||
var _ = BeforeSuite(func() { | ||
var err error | ||
if *local { | ||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) | ||
} else { | ||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 0) | ||
} | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
//Checks if system is using cgroup v2 | ||
_, err = e2e.RunCmdOnNode("cat /sys/fs/cgroup/cgroup.controllers", serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
|
||
}) | ||
|
||
var _ = Describe("Various Startup Configurations", Ordered, func() { | ||
Context("Verify CRI-Dockerd :", func() { | ||
It("Starts K3s with no issues", func() { | ||
dockerYAML := "docker: true" | ||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML) | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
|
||
fmt.Println("CLUSTER CONFIG") | ||
fmt.Println("OS:", *nodeOS) | ||
fmt.Println("Server Nodes:", serverNodeNames) | ||
fmt.Println("Agent Nodes:", agentNodeNames) | ||
}) | ||
|
||
It("Checks node and pod status", func() { | ||
fmt.Printf("\nFetching node status\n") | ||
Eventually(func(g Gomega) { | ||
nodes, err := ParseNodes(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, node := range nodes { | ||
g.Expect(node.Status).Should(Equal("Ready")) | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParseNodes(true, serverNodeNames[0]) | ||
|
||
fmt.Printf("\nFetching pods status\n") | ||
Eventually(func(g Gomega) { | ||
pods, err := ParsePods(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, pod := range pods { | ||
if strings.Contains(pod.Name, "helm-install") { | ||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) | ||
} else { | ||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) | ||
} | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParsePods(true, serverNodeNames[0]) | ||
}) | ||
It("Kills the cluster", func() { | ||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
}) | ||
Context("Verify prefer-bundled-bin flag", func() { | ||
It("Starts K3s with no issues", func() { | ||
preferBundledYAML := "prefer-bundled-bin: true" | ||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), preferBundledYAML) | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
|
||
fmt.Println("CLUSTER CONFIG") | ||
fmt.Println("OS:", *nodeOS) | ||
fmt.Println("Server Nodes:", serverNodeNames) | ||
fmt.Println("Agent Nodes:", agentNodeNames) | ||
}) | ||
|
||
It("Checks node and pod status", func() { | ||
fmt.Printf("\nFetching node status\n") | ||
Eventually(func(g Gomega) { | ||
nodes, err := ParseNodes(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, node := range nodes { | ||
g.Expect(node.Status).Should(Equal("Ready")) | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParseNodes(true, serverNodeNames[0]) | ||
|
||
fmt.Printf("\nFetching pods status\n") | ||
Eventually(func(g Gomega) { | ||
pods, err := ParsePods(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, pod := range pods { | ||
if strings.Contains(pod.Name, "helm-install") { | ||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) | ||
} else { | ||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) | ||
} | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParsePods(true, serverNodeNames[0]) | ||
}) | ||
It("Kills the cluster", func() { | ||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
}) | ||
Context("Verify disable-agent and egress-selector-mode flags", func() { | ||
It("Starts K3s with no issues", func() { | ||
disableAgentYAML := "disable-agent: true\negress-selector-mode: cluster" | ||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), disableAgentYAML) | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
|
||
fmt.Println("CLUSTER CONFIG") | ||
fmt.Println("OS:", *nodeOS) | ||
fmt.Println("Server Nodes:", serverNodeNames) | ||
fmt.Println("Agent Nodes:", agentNodeNames) | ||
}) | ||
|
||
It("Checks node and pod status", func() { | ||
fmt.Printf("\nFetching node status\n") | ||
Eventually(func(g Gomega) { | ||
nodes, err := ParseNodes(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, node := range nodes { | ||
g.Expect(node.Status).Should(Equal("Ready")) | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParseNodes(true, serverNodeNames[0]) | ||
|
||
fmt.Printf("\nFetching pods status\n") | ||
Eventually(func(g Gomega) { | ||
pods, err := ParsePods(false, serverNodeNames[0]) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, pod := range pods { | ||
if strings.Contains(pod.Name, "helm-install") { | ||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) | ||
} else { | ||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) | ||
} | ||
} | ||
}, "360s", "5s").Should(Succeed()) | ||
_, _ = ParsePods(true, serverNodeNames[0]) | ||
}) | ||
|
||
It("Returns pod metrics", func() { | ||
cmd := "kubectl top pod -A" | ||
Eventually(func() error { | ||
_, err := RunCmdOnRootlesNode(cmd, serverNodeNames[0]) | ||
return err | ||
}, "600s", "5s").Should(Succeed()) | ||
}) | ||
|
||
It("Returns node metrics", func() { | ||
cmd := "kubectl top node" | ||
_, err := RunCmdOnRootlesNode(cmd, serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
|
||
It("Runs an interactive command a pod", func() { | ||
cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.34.1 -- uname -a" | ||
_, err := RunCmdOnRootlesNode(cmd, serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
|
||
It("Collects logs from a pod", func() { | ||
cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik" | ||
_, err := RunCmdOnRootlesNode(cmd, serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
|
||
It("Kills the cluster", func() { | ||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
}) | ||
//Context("Verify server fails to start with bootstrap token", func() { | ||
// It("Fails to start with a meaningful error", func() { | ||
// tokenYAML := "token: aaaaaa.bbbbbbbbbbbbbbbb" | ||
// err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), tokenYAML) | ||
// Expect(err).ToNot(HaveOccurred()) | ||
// Eventually(func(g Gomega) { | ||
// logs, err := e2e.GetJournalLogs(serverNodeNames[0]) | ||
// g.Expect(err).NotTo(HaveOccurred()) | ||
// g.Expect(logs).To(ContainSubstring("failed to normalize server token")) | ||
// }, "120s", "5s").Should(Succeed()) | ||
// | ||
// }) | ||
// It("Kills the cluster", func() { | ||
// err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) | ||
// Expect(err).NotTo(HaveOccurred()) | ||
// }) | ||
//}) | ||
}) | ||
|
||
var failed bool | ||
var _ = AfterEach(func() { | ||
failed = failed || CurrentSpecReport().Failed() | ||
}) | ||
|
||
var _ = AfterSuite(func() { | ||
if failed && !*ci { | ||
fmt.Println("FAILED!") | ||
} else { | ||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) | ||
Expect(e2e.DestroyCluster()).To(Succeed()) | ||
} | ||
}) |
Oops, something went wrong.