Skip to content

Commit

Permalink
Add create-namespace support
Browse files Browse the repository at this point in the history
This also fixes the tests to ensure that charts are deleted at the end
of each context; previously it would leave the test cluster dirty as the
test charts were left behind, preventing the test namespace from
deleting properly.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
  • Loading branch information
brandond committed May 18, 2023
1 parent 57fde46 commit d6349af
Show file tree
Hide file tree
Showing 9 changed files with 227 additions and 136 deletions.
31 changes: 15 additions & 16 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ module github.com/k3s-io/helm-controller
go 1.19

require (
github.com/onsi/ginkgo v1.14.0
github.com/onsi/gomega v1.20.1
github.com/onsi/ginkgo/v2 v2.9.4
github.com/onsi/gomega v1.27.6
github.com/rancher/lasso v0.0.0-20221227210133-6ea88ca2fbcc
github.com/rancher/wrangler v1.1.1
github.com/rancher/wrangler v1.1.1-0.20230425173236-39a4707f0689
github.com/rancher/wrangler-cli v0.0.0-20220624114648-479c5692ba22
github.com/sirupsen/logrus v1.8.1
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.8.1
k8s.io/api v0.25.4
Expand All @@ -26,18 +26,19 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
Expand All @@ -47,27 +48,25 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nxadm/tail v1.4.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/term v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.25.4 // indirect
Expand Down
80 changes: 33 additions & 47 deletions go.sum

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions pkg/apis/helm.cattle.io/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ type HelmChart struct {

type HelmChartSpec struct {
TargetNamespace string `json:"targetNamespace,omitempty"`
CreateNamespace bool `json:"createNamespace,omitempty"`
Chart string `json:"chart,omitempty"`
Version string `json:"version,omitempty"`
Repo string `json:"repo,omitempty"`
Expand Down
19 changes: 14 additions & 5 deletions pkg/controllers/chart/chart.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@ import (
batch "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
Expand All @@ -52,6 +52,7 @@ const (
)

var (
jobGVK = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}
commaRE = regexp.MustCompile(`\\*,`)
deletePolicy = metav1.DeletePropagationForeground
DefaultJobImage = "rancher/klipper-helm:v0.8.0-build20230510"
Expand Down Expand Up @@ -102,7 +103,7 @@ func Register(ctx context.Context,
c.apply = apply.
WithCacheTypes(helms, confs, jobs, crbs, sas, cm, s).
WithStrictCaching().
WithPatcher(batch.SchemeGroupVersion.WithKind("Job"), c.jobPatcher)
WithPatcher(jobs.GroupVersionKind(), c.jobPatcher)

relatedresource.Watch(ctx, "resolve-helm-chart-from-config", c.resolveHelmChartFromConfig, helms, confs)

Expand Down Expand Up @@ -156,7 +157,7 @@ func (c *Controller) resolveHelmChartFromConfig(namespace, name string, obj runt
if conf, ok := obj.(*v1.HelmChartConfig); ok {
chart, err := c.helmCache.Get(conf.Namespace, conf.Name)
if err != nil {
if !errors.IsNotFound(err) {
if !apierrors.IsNotFound(err) {
return nil, err
}
}
Expand Down Expand Up @@ -216,6 +217,7 @@ func (c *Controller) OnRemove(key string, chart *v1.HelmChart) (*v1.HelmChart, e
AllowClusterScoped: true,
}).
WithOwner(chart).
WithCacheTypes(c.jobs).
WithSetID("helm-chart-registration").
ApplyObjects(append(objs, expectedJob)...)
if err != nil {
Expand All @@ -228,7 +230,7 @@ func (c *Controller) OnRemove(key string, chart *v1.HelmChart) (*v1.HelmChart, e

// once we have run the above logic, we can now check if the job is complete
job, err := c.jobCache.Get(chart.Namespace, expectedJob.Name)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
// the above apply should have created it, something is wrong.
// if you are here, there must be a bug in the code.
return chart, fmt.Errorf("could not perform uninstall: expected job %s/%s to exist after apply, but not found", chart.Namespace, expectedJob.Name)
Expand Down Expand Up @@ -258,6 +260,8 @@ func (c *Controller) OnRemove(key string, chart *v1.HelmChart) (*v1.HelmChart, e
AllowClusterScoped: true,
}).
WithOwner(chart).
WithCacheTypes(c.jobs).
WithGVK(job.GroupVersionKind()).
WithSetID("helm-chart-registration").
ApplyObjects()
if err != nil {
Expand Down Expand Up @@ -315,7 +319,7 @@ func (c *Controller) getJobAndRelatedResources(chart *v1.HelmChart) (*batch.Job,
// check if a HelmChartConfig is registered for this Helm chart
config, err := c.confCache.Get(chart.Namespace, chart.Name)
if err != nil {
if !errors.IsNotFound(err) {
if !apierrors.IsNotFound(err) {
return nil, nil, err
}
}
Expand Down Expand Up @@ -580,6 +584,11 @@ func args(chart *v1.HelmChart) []string {
if spec.TargetNamespace != "" {
args = append(args, "--namespace", spec.TargetNamespace)
}

if spec.CreateNamespace {
args = append(args, "--create-namespace")
}

if spec.Version != "" {
args = append(args, "--version", spec.Version)
}
Expand Down
6 changes: 3 additions & 3 deletions scripts/e2e
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@ set -ex

K3S_WAIT_TIME=15
K3S_NODE_NAME=helmtest-node1
K3S_VERSION=v1.23.3-k3s1
K3S_VERSION=v1.25.9-k3s1

cd $(dirname $0)/..

setup_k8s(){
# Using k3s with embedded helm controller disabled
docker pull rancher/k3s:$K3S_VERSION
docker run --detach --privileged --rm --publish 6443 --name $K3S_NODE_NAME --hostname $K3S_NODE_NAME rancher/k3s:$K3S_VERSION server --disable-helm-controller
docker run --detach --privileged --rm --publish 6443 --name $K3S_NODE_NAME --hostname $K3S_NODE_NAME rancher/k3s:$K3S_VERSION server --disable-helm-controller --disable=metrics-server,traefik
K3S_NODE_IP=$(docker inspect $K3S_NODE_NAME --format='{{index .NetworkSettings.Networks "bridge" "IPAddress"}}')
sleep $K3S_WAIT_TIME
docker exec $K3S_NODE_NAME sed "s/127.0.0.1/$K3S_NODE_IP/" /etc/rancher/k3s/k3s.yaml > $PWD/kube_config_cluster.yml
Expand All @@ -21,7 +21,7 @@ teardown_k8s(){
}

load_helm_image(){
cat bin/helm-controller-amd64.tar | docker exec -i $K3S_NODE_NAME ctr --namespace k8s.io image import -
docker image save $HELM_CONTROLLER_IMAGE | docker exec -i $K3S_NODE_NAME ctr --namespace k8s.io image import -
}

trap teardown_k8s EXIT
Expand Down
1 change: 0 additions & 1 deletion scripts/package
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ if [ -e ${DOCKERFILE}.${ARCH} ]; then
fi

docker build -f ${DOCKERFILE} -t ${IMAGE} .
docker image save ${IMAGE} -o bin/helm-controller-${ARCH}.tar
echo ${IMAGE} > bin/helm-controller-image.txt

echo Built ${IMAGE}
87 changes: 35 additions & 52 deletions test/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,20 @@ package framework

import (
"context"
"fmt"
"io"
"os"
"time"

"k8s.io/client-go/util/retry"

v1 "github.com/k3s-io/helm-controller/pkg/apis/helm.cattle.io/v1"
"github.com/k3s-io/helm-controller/pkg/controllers/common"
helmcrd "github.com/k3s-io/helm-controller/pkg/crd"
helmcln "github.com/k3s-io/helm-controller/pkg/generated/clientset/versioned"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/v2"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/crd"
"github.com/rancher/wrangler/pkg/schemas/openapi"
"github.com/sirupsen/logrus"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -43,20 +45,41 @@ type Framework struct {

func New() (*Framework, error) {
framework := &Framework{}
ginkgo.BeforeSuite(framework.BeforeSuite)
ginkgo.AfterSuite(framework.AfterSuite)
ginkgo.BeforeAll(framework.BeforeAll)
ginkgo.AfterAll(framework.AfterAll)
return framework, nil
}

func (f *Framework) BeforeSuite() {
func (f *Framework) BeforeAll() {
f.beforeFramework()
err := f.setupController(context.TODO())
if err != nil {
errExit("Failed to set up helm controller", err)
}
}

func (f *Framework) AfterSuite() {
func (f *Framework) AfterAll() {
if ginkgo.CurrentSpecReport().Failed() {
podList, _ := f.ClientSet.CoreV1().Pods(f.Namespace).List(context.Background(), metav1.ListOptions{})
for _, pod := range podList.Items {
containerNames := []string{}
for _, container := range pod.Spec.InitContainers {
containerNames = append(containerNames, container.Name)
}
for _, container := range pod.Spec.Containers {
containerNames = append(containerNames, container.Name)
}
for _, container := range containerNames {
reportName := fmt.Sprintf("podlogs-%s-%s", pod.Name, container)
logs := f.ClientSet.CoreV1().Pods(f.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{Container: container})
if logStreamer, err := logs.Stream(context.Background()); err == nil {
if podLogs, err := io.ReadAll(logStreamer); err == nil {
ginkgo.AddReportEntry(reportName, string(podLogs))
}
}
}
}
}
if err := f.teardownController(context.TODO()); err != nil {
errExit("Failed to teardown helm controller", err)
}
Expand All @@ -73,7 +96,7 @@ func (f *Framework) beforeFramework() {
errExit("Failed to initiate a client set", err)
crdFactory, err := crd.NewFactoryFromClient(config)
errExit("Failed initiate factory client", err)
f.crds, err = getCRDs()
f.crds = helmcrd.List()
errExit("Failed to construct helm crd", err)

f.HelmClientSet = helmcln
Expand All @@ -91,50 +114,6 @@ func errExit(msg string, err error) {
logrus.Panicf("%s: %v", msg, err)
}

func getCRDs() ([]crd.CRD, error) {
var crds []crd.CRD
for _, crdFn := range []func() (*crd.CRD, error){
ChartCRD,
ConfigCRD,
} {
crdef, err := crdFn()
if err != nil {
return nil, err
}
crds = append(crds, *crdef)
}

return crds, nil
}

func ChartCRD() (*crd.CRD, error) {
prototype := v1.NewHelmChart("", "", v1.HelmChart{})
schema, err := openapi.ToOpenAPIFromStruct(*prototype)
if err != nil {
return nil, err
}
return &crd.CRD{
GVK: prototype.GroupVersionKind(),
PluralName: v1.HelmChartResourceName,
Status: true,
Schema: schema,
}, nil
}

func ConfigCRD() (*crd.CRD, error) {
prototype := v1.NewHelmChartConfig("", "", v1.HelmChartConfig{})
schema, err := openapi.ToOpenAPIFromStruct(*prototype)
if err != nil {
return nil, err
}
return &crd.CRD{
GVK: prototype.GroupVersionKind(),
PluralName: v1.HelmChartConfigResourceName,
Status: true,
Schema: schema,
}, nil
}

func (f *Framework) NewHelmChart(name, chart, version, helmVersion string, set map[string]intstr.IntOrString) *v1.HelmChart {
return &v1.HelmChart{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -155,9 +134,13 @@ func (f *Framework) NewHelmChart(name, chart, version, helmVersion string, set m
}

func (f *Framework) WaitForRelease(chart *v1.HelmChart, labelSelector labels.Selector, timeout time.Duration, count int) (secrets []corev1.Secret, err error) {
namespace := chart.Namespace
if chart.Spec.TargetNamespace != "" {
namespace = chart.Spec.TargetNamespace
}

return secrets, wait.Poll(5*time.Second, timeout, func() (bool, error) {
list, err := f.ClientSet.CoreV1().Secrets(chart.Namespace).List(context.TODO(), metav1.ListOptions{
list, err := f.ClientSet.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector.String(),
})
if err != nil {
Expand Down
Loading

0 comments on commit d6349af

Please sign in to comment.