Skip to content

Commit

Permalink
Merge pull request #185 from adrienjt/upgrade-dependencies
Browse files Browse the repository at this point in the history
upgrade dependencies, support k8s 1.24 through at least 1.27
  • Loading branch information
adrienjt committed Jul 24, 2023
2 parents f7314d9 + 6c710f3 commit 3081591
Show file tree
Hide file tree
Showing 26 changed files with 723 additions and 812 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:
strategy:
fail-fast: false
matrix:
k8s_version: [ "1.21", "1.22", "1.23", "1.24", "1.25", "1.26" ]
k8s_version: [ "1.24", "1.25", "1.26", "1.27" ]
experimental: [ false ]
# workflow succeeds even if experimental job fails,
# but commit/PR check/status still appears as failure overall,
Expand Down
4 changes: 3 additions & 1 deletion charts/multicluster-scheduler/templates/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,9 @@ spec:
containers:
- name: proxy-scheduler
image: {{ .Values.scheduler.image.repository }}:{{ default .Chart.AppVersion .Values.scheduler.image.tag }}
args: ["--config", "/etc/admiralty/proxy-scheduler-config"]
args:
- --config=/etc/admiralty/proxy-scheduler-config
- --pod-max-in-unschedulable-pods-duration=60s
env:
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
Expand Down
4 changes: 2 additions & 2 deletions charts/multicluster-scheduler/templates/webhook.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@ webhooks:
- clientConfig:
caBundle: Cg==
{{- if .Values.debug.controllerManager }}
url: "https://172.17.0.1:9443/mutate-v1-pod"
url: "https://172.17.0.1:9443/mutate--v1-pod"
{{- else }}
service:
name: {{ include "fullname" . }}
namespace: {{ .Release.Namespace }}
path: /mutate-v1-pod
path: /mutate--v1-pod
{{- end }}
failurePolicy: Fail
name: {{ include "fullname" . }}.multicluster.admiralty.io
Expand Down
77 changes: 36 additions & 41 deletions cmd/agent/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,27 +23,9 @@ import (
"os"
"time"

"admiralty.io/multicluster-scheduler/pkg/controllers/cleanup"
"admiralty.io/multicluster-scheduler/pkg/leaderelection"
"admiralty.io/multicluster-service-account/pkg/config"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
vklog "github.com/virtual-kubelet/virtual-kubelet/log"
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/klog"
"k8s.io/sample-controller/pkg/signals"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"

agentconfig "admiralty.io/multicluster-scheduler/pkg/config/agent"
"admiralty.io/multicluster-scheduler/pkg/controllers/chaperon"
"admiralty.io/multicluster-scheduler/pkg/controllers/cleanup"
"admiralty.io/multicluster-scheduler/pkg/controllers/feedback"
"admiralty.io/multicluster-scheduler/pkg/controllers/follow"
"admiralty.io/multicluster-scheduler/pkg/controllers/follow/ingress"
Expand All @@ -54,21 +36,34 @@ import (
clientset "admiralty.io/multicluster-scheduler/pkg/generated/clientset/versioned"
informers "admiralty.io/multicluster-scheduler/pkg/generated/informers/externalversions"
"admiralty.io/multicluster-scheduler/pkg/generated/informers/externalversions/multicluster/v1alpha1"
"admiralty.io/multicluster-scheduler/pkg/leaderelection"
"admiralty.io/multicluster-scheduler/pkg/vk/csr"
"admiralty.io/multicluster-scheduler/pkg/vk/http"
"admiralty.io/multicluster-scheduler/pkg/vk/node"
"admiralty.io/multicluster-scheduler/pkg/webhooks/proxypod"
"admiralty.io/multicluster-service-account/pkg/config"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
vklog "github.com/virtual-kubelet/virtual-kubelet/log"
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"k8s.io/sample-controller/pkg/signals"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/manager"
)

// TODO standardize logging

func main() {
stopCh := signals.SetupSignalHandler()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
ctx := signals.SetupSignalHandler()

o := parseFlags()
setupLogging(ctx, o)
Expand All @@ -86,20 +81,20 @@ func main() {

if o.leaderElect {
leaderelection.Run(ctx, ns, "admiralty-controller-manager", k, func(ctx context.Context) {
runControllers(ctx, stopCh, agentCfg, cfg, k)
runControllers(ctx, agentCfg, cfg, k)
})
} else {
runControllers(ctx, stopCh, agentCfg, cfg, k)
runControllers(ctx, agentCfg, cfg, k)
}
}

func runControllers(ctx context.Context, stopCh <-chan struct{}, agentCfg agentconfig.Config, cfg *rest.Config, k *kubernetes.Clientset) {
func runControllers(ctx context.Context, agentCfg agentconfig.Config, cfg *rest.Config, k *kubernetes.Clientset) {
var nodeStatusUpdaters map[string]resources.NodeStatusUpdater
if len(agentCfg.Targets) > 0 {
nodeStatusUpdaters = startVirtualKubeletControllers(ctx, agentCfg, k)
}
startOldStyleControllers(ctx, stopCh, agentCfg, cfg, k, nodeStatusUpdaters)
<-stopCh
startOldStyleControllers(ctx, agentCfg, cfg, k, nodeStatusUpdaters)
<-ctx.Done()
}

type startable interface {
Expand All @@ -109,12 +104,11 @@ type startable interface {

type runnable interface {
// Run blocks
Run(threadiness int, stopCh <-chan struct{}) error
Run(ctx context.Context, threadiness int) error
}

func startOldStyleControllers(
ctx context.Context,
stopCh <-chan struct{},
agentCfg agentconfig.Config,
cfg *rest.Config,
k *kubernetes.Clientset,
Expand Down Expand Up @@ -219,12 +213,12 @@ func startOldStyleControllers(
factories, controllers = addClusterScopedFactoriesAndControllers(ctx, agentCfg, k, customClient, factories, controllers)

for _, f := range factories {
f.Start(stopCh)
f.Start(ctx.Done())
}

for _, c := range controllers {
c := c
go func() { utilruntime.Must(c.Run(1, stopCh)) }()
go func() { utilruntime.Must(c.Run(ctx, 1)) }()
}
}

Expand Down Expand Up @@ -285,21 +279,22 @@ func addClusterScopedFactoriesAndControllers(
}

func startWebhook(ctx context.Context, cfg *rest.Config, agentCfg agentconfig.Config) {
webhookMgr, err := manager.New(cfg, manager.Options{
Port: 9443,
CertDir: "/tmp/k8s-webhook-server/serving-certs",
mgr, err := manager.New(cfg, manager.Options{
MetricsBindAddress: "0",
HealthProbeBindAddress: ":8080",
})
utilruntime.Must(err)

hookServer := webhookMgr.GetWebhookServer()
hookServer.Register("/mutate-v1-pod", &webhook.Admission{Handler: proxypod.NewHandler(agentCfg.GetKnownFinalizersByNamespace())})
err = builder.WebhookManagedBy(mgr).
For(&corev1.Pod{}).
WithDefaulter(proxypod.Mutator{KnownFinalizers: agentCfg.GetKnownFinalizersByNamespace()}).
Complete()
utilruntime.Must(err)

utilruntime.Must(webhookMgr.AddReadyzCheck("webhook-ready", hookServer.StartedChecker()))
utilruntime.Must(mgr.AddReadyzCheck("webhook-ready", mgr.GetWebhookServer().StartedChecker()))

go func() {
utilruntime.Must(webhookMgr.Start(ctx))
utilruntime.Must(mgr.Start(ctx))
}()
}

Expand Down
17 changes: 6 additions & 11 deletions cmd/restarter/main.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2021 The Multicluster-Scheduler Authors.
* Copyright 2023 The Multicluster-Scheduler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -36,12 +36,7 @@ import (
)

func main() {
stopCh := signals.SetupSignalHandler()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
ctx := signals.SetupSignalHandler()

cfg, ns, err := config.ConfigAndNamespaceForKubeconfigAndContext("", "")
utilruntime.Must(err)
Expand All @@ -62,18 +57,18 @@ func main() {
customInformerFactory.Multicluster().V1alpha1().Targets(),
kubeInformerFactory.Core().V1().Secrets())

kubeInformerFactory.Start(stopCh)
customInformerFactory.Start(stopCh)
kubeInformerFactory.Start(ctx.Done())
customInformerFactory.Start(ctx.Done())

var leaderElect bool
flag.BoolVar(&leaderElect, "leader-elect", false, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.")
flag.Parse()

if leaderElect {
leaderelection.Run(ctx, ns, "admiralty-restarter", k, func(ctx context.Context) {
utilruntime.Must(targetCtrl.Run(1, stopCh))
utilruntime.Must(targetCtrl.Run(ctx, 1))
})
} else {
utilruntime.Must(targetCtrl.Run(1, stopCh))
utilruntime.Must(targetCtrl.Run(ctx, 1))
}
}
23 changes: 4 additions & 19 deletions cmd/scheduler/main.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2020 The Multicluster-Scheduler Authors.
* Copyright 2023 The Multicluster-Scheduler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,37 +17,22 @@
package main

import (
"math/rand"
"os"
"time"

"admiralty.io/multicluster-scheduler/pkg/scheduler_plugins/candidate"
"admiralty.io/multicluster-scheduler/pkg/scheduler_plugins/proxy"
"github.com/spf13/pflag"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/component-base/cli"
scheduler "k8s.io/kubernetes/cmd/kube-scheduler/app"
)

func main() {
rand.Seed(time.Now().UnixNano())

// BEWARE candidate and proxy must run in different processes, because a scheduler only processes one pod at a time
// and proxy waits on candidates in filter plugin

command := scheduler.NewSchedulerCommand(
scheduler.WithPlugin(candidate.Name, candidate.New),
scheduler.WithPlugin(proxy.Name, proxy.New))

// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()

if err := command.Execute(); err != nil {
os.Exit(1)
}
code := cli.Run(command)
os.Exit(code)
}
Loading

0 comments on commit 3081591

Please sign in to comment.