Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add private registry e2e test #7653

Merged
merged 5 commits into from
Jun 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions tests/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,14 @@ Install the necessary vagrant plugins with the following command:
```bash
vagrant plugin install vagrant-libvirt vagrant-scp vagrant-k3s vagrant-reload
```
### Kubectl

For linux
```bash
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
```
If it does not work, or you are on a different system, check the [official tutorial](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)

## Running

Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/multiclustercidr/multiclustercidr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.NodeIP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
}
}
})
Expand Down Expand Up @@ -260,7 +260,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.NodeIP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
}
}
})
Expand Down
131 changes: 131 additions & 0 deletions tests/e2e/privateregistry/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0", "agent-1"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""

def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "shell", inline: "ping -c 2 k3s.io"

# The formatting on this is a little weird, but it allows inserting variables
# and still using the heredoc formatting with escapped quotes
writePrivateRegistry = <<~'SCRIPT'.chomp % {net: NETWORK_PREFIX}
mkdir -p /etc/rancher/k3s/
echo "mirrors:
my-registry.local:
endpoint:
- \"http://%{net}.100:5000\"" > /etc/rancher/k3s/registries.yaml
SCRIPT

setInsecureRegistryPolicy = <<~'SCRIPT'.chomp % {net: NETWORK_PREFIX}
mkdir -p /etc/docker/
echo "{ \"insecure-registries\" : [\"%{net}.100:5000\"] }" > /etc/docker/daemon.json
SCRIPT

if role.include?("server") && role_num == 0
vm.provision "insecure-registry", type: "shell", inline: setInsecureRegistryPolicy
vm.provision "private-registry", type: "shell", inline: writePrivateRegistry
dockerInstall(vm)

vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
token: vagrant
node-external-ip: #{NETWORK_PREFIX}.100
flannel-iface: eth1
cluster-init: true
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end

elsif role.include?("server") && role_num != 0
vm.provision "shell", inline: writePrivateRegistry

vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end

if role.include?("agent")
vm.provision "shell", inline: writePrivateRegistry

vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s|
k3s.args = "agent"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
end


Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end
162 changes: 162 additions & 0 deletions tests/e2e/privateregistry/privateregistry_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
package validatecluster

import (
"flag"
"fmt"
"os"
"strings"
"testing"

"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8,
// opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")

// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
// E2E_REGISTRY: true/false (default: false)

func Test_E2EPrivateRegistry(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)

var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node and Pod Status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)

fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})

It("Create new private registry", func() {
registry, err := e2e.RunCmdOnNode("sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0])
fmt.Println(registry)
Expect(err).NotTo(HaveOccurred())

})
It("ensures registry is working", func() {
a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep registry\n", serverNodeNames[0])
fmt.Println(a)
Expect(err).NotTo(HaveOccurred())

})
It("Should pull and image from dockerhub and send it to private registry", func() {
cmd := "sudo docker pull nginx"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)

nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())

cmd = "sudo docker tag nginx " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)

cmd = "sudo docker push " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)

cmd = "sudo docker image remove nginx " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
})
It("Should create and validate deployment with private registry on", func() {
res, err := e2e.RunCmdOnNode("sudo kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0])
fmt.Println(res)
Expect(err).NotTo(HaveOccurred())

var pod e2e.Pod
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
for _, p := range pods {
if strings.Contains(p.Name, "my-webpage") {
pod = p
}
}
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status).Should(Equal("Running"))
g.Expect(pod.Node).Should(Equal(agentNodeNames[0]))
}, "60s", "5s").Should(Succeed())

cmd := "curl " + pod.IP
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).To(ContainSubstring("Welcome to nginx!"))
})

})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {

if failed && !*ci {
fmt.Println("FAILED!")
} else {
r1, err := e2e.RunCmdOnNode("sudo docker rm -f registry", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), r1)
r2, err := e2e.RunCmdOnNode("sudo kubectl delete deployment my-webpage", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), r2)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
30 changes: 19 additions & 11 deletions tests/e2e/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type Pod struct {
Ready string
Status string
Restarts string
NodeIP string
IP string
Node string
}

Expand Down Expand Up @@ -384,16 +384,10 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
return nodes, nil
}

func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
func formatPods(input string) ([]Pod, error) {
pods := make([]Pod, 0, 10)
podList := ""

cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeConfig
res, _ := RunCommand(cmd)
res = strings.TrimSpace(res)
podList = res

split := strings.Split(res, "\n")
input = strings.TrimSpace(input)
split := strings.Split(input, "\n")
for _, rec := range split {
fields := strings.Fields(string(rec))
if len(fields) < 8 {
Expand All @@ -405,11 +399,25 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
Ready: fields[2],
Status: fields[3],
Restarts: fields[4],
NodeIP: fields[6],
IP: fields[6],
Node: fields[7],
}
pods = append(pods, pod)
}
return pods, nil
}

func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
podList := ""

cmd := "kubectl get pods -o wide --no-headers -A"
res, _ := RunCommand(cmd)
podList = strings.TrimSpace(res)

pods, err := formatPods(res)
if err != nil {
return nil, err
}
if print {
fmt.Println(podList)
}
Expand Down