Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
packet: Add cpu_manager_policy to workers
Browse files Browse the repository at this point in the history
This allows a user to choose the cpu manager policy on a worker pool.
Possible values are: `none` and `static`.

To make this work, kubelet also needs a static allocation of system
reserved and kubernetes reserved CPUs to be defined. So this commit also
adds the default of 300m cores for kube-reserved-cpu and 1500m cores for
system-reserved-cpu when `cpu_manager_policy` is set.

closes: #1337

Signed-off-by: knrt10 <kautilya@kinvolk.io>
Co-authored-by: Suraj Deshmukh <suraj@kinvolk.io>
  • Loading branch information
knrt10 and surajssd committed Aug 13, 2021
1 parent 09579eb commit 7d66505
Show file tree
Hide file tree
Showing 8 changed files with 122 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,13 @@ storage:
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: "$${docker_cgroup_driver}"
%{~ if cpu_manager_policy == "static" ~}
cpuManagerPolicy: ${cpu_manager_policy}
systemReserved:
cpu: ${system_reserved_cpu}
kubeReserved:
cpu: ${kube_reserved_cpu}
%{~ endif ~}
EOF
- path: /opt/wait-for-dns
filesystem: root
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,3 +195,21 @@ variable "enable_tls_bootstrap" {
description = "Enable TLS Bootstrap for Kubelet."
type = bool
}

variable "cpu_manager_policy" {
description = "CPU Manager policy to use for the worker pool. Possible values: `none`, `static`."
default = "none"
type = string
}

variable "kube_reserved_cpu" {
description = "CPU cores reserved for the Worker Kubernetes components like kubelet, etc."
default = "300m"
type = string
}

variable "system_reserved_cpu" {
description = "CPU cores reserved for the host services like Docker, sshd, kernel, etc."
default = "1500m"
type = string
}
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ data "ct_config" "ignitions" {
cluster_name = var.cluster_name
dns_zone = var.dns_zone
enable_tls_bootstrap = var.enable_tls_bootstrap
cpu_manager_policy = var.cpu_manager_policy
system_reserved_cpu = var.system_reserved_cpu
kube_reserved_cpu = var.kube_reserved_cpu
}
)
platform = "packet"
Expand Down
1 change: 1 addition & 0 deletions docs/configuration-reference/platforms/equinix-metal.md
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ node_type = var.custom_default_worker_type
| `worker_pool.count` | Number of workers in the worker pool. Can be changed afterwards to add or delete workers. | 1 | number | true |
| `worker_pool.clc_snippets` | Flatcar Container Linux Config snippets for nodes in the worker pool. | [] | list(string) | false |
| `worker_pool.tags` | List of tags that will be propagated to nodes in the worker pool. | - | map(string) | false |
| `worker_pool.cpu_manager_policy` | CPU Manager policy to use. Possible values: `none`, `static`. | "none" | string | false |
| `worker_pool.disable_bgp` | Disable BGP on nodes. Nodes won't be able to connect to Equinix Metal BGP peers. | false | bool | false |
| `worker_pool.ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false |
| `worker_pool.os_arch` | Flatcar Container Linux architecture to install (amd64, arm64). | "amd64" | string | false |
Expand Down
12 changes: 6 additions & 6 deletions pkg/assets/generated_assets.go

Large diffs are not rendered by default.

21 changes: 21 additions & 0 deletions pkg/platform/packet/packet.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ const (
type workerPool struct {
Name string `hcl:"pool_name,label"`
Count int `hcl:"count"`
CPUManagerPolicy string `hcl:"cpu_manager_policy,optional"`
Facility string `hcl:"facility,optional"`
DisableBGP bool `hcl:"disable_bgp,optional"`
IPXEScriptURL string `hcl:"ipxe_script_url,optional"`
Expand Down Expand Up @@ -513,6 +514,7 @@ func (c *config) checkValidConfig() hcl.Diagnostics {
diagnostics = append(diagnostics, c.checkWorkerPoolNamesUnique()...)
diagnostics = append(diagnostics, c.checkReservationIDs()...)
diagnostics = append(diagnostics, c.validateOSVersion()...)
diagnostics = append(diagnostics, c.checkCPUManagerPolicy()...)

if c.ConntrackMaxPerCore < 0 {
diagnostics = append(diagnostics, &hcl.Diagnostic{
Expand Down Expand Up @@ -614,6 +616,25 @@ func (c *config) validateOSVersion() hcl.Diagnostics {
return diagnostics
}

func (c *config) checkCPUManagerPolicy() hcl.Diagnostics {
var diagnostics hcl.Diagnostics

for _, w := range c.WorkerPools {
switch w.CPUManagerPolicy {
case "", "none", "static":
continue
default:
diagnostics = append(diagnostics, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "invalid cpu_manager_policy",
Detail: fmt.Sprintf("Worker pool '%s' has invalid cpu_manager_policy '%s'", w.Name, w.CPUManagerPolicy),
})
}
}

return diagnostics
}

// checkEachReservation checks that hardware reservations are in the correct
// format and, when it will cause problems, that reservation IDs values in this
// pool are not mixed between using "next-available" and specific UUIDs, as this
Expand Down
62 changes: 62 additions & 0 deletions pkg/platform/packet/packet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"testing"

"github.com/google/go-cmp/cmp"
"github.com/hashicorp/hcl/v2"
)

func TestCheckNotEmptyWorkersEmpty(t *testing.T) {
Expand Down Expand Up @@ -479,3 +480,64 @@ func Test_resolveNodePrivateCIDRs(t *testing.T) { //nolint:funlen
})
}
}

func Test_config_checkCPUManagerPolicy(t *testing.T) {
tests := []struct {
name string
WorkerPools []workerPool
want hcl.Diagnostics
}{
{
name: "valid_cpu_manager_policy_values",
WorkerPools: []workerPool{
{CPUManagerPolicy: ""},
{CPUManagerPolicy: "static"},
{CPUManagerPolicy: "none"},
},
want: nil,
},
{
name: "invalid_cpu_manager_policy_value",
WorkerPools: []workerPool{
{CPUManagerPolicy: ""},
{Name: "foobar", CPUManagerPolicy: "foobar"},
},
want: hcl.Diagnostics{&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "invalid cpu_manager_policy",
Detail: "Worker pool 'foobar' has invalid cpu_manager_policy 'foobar'",
}},
},
{
name: "all_invalid_values",
WorkerPools: []workerPool{
{Name: "foo", CPUManagerPolicy: "foo"},
{Name: "bar", CPUManagerPolicy: "bar"},
},
want: hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "invalid cpu_manager_policy",
Detail: "Worker pool 'foo' has invalid cpu_manager_policy 'foo'",
},
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "invalid cpu_manager_policy",
Detail: "Worker pool 'bar' has invalid cpu_manager_policy 'bar'",
},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
c := &config{
WorkerPools: tt.WorkerPools,
}

if got := c.checkCPUManagerPolicy(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("config.checkCPUManagerPolicy() = %v, want %v", got, tt.want)
}
})
}
}
4 changes: 4 additions & 0 deletions pkg/platform/packet/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,10 @@ EOF
cluster_name = "{{$.Config.ClusterName}}"
{{- if $pool.CPUManagerPolicy }}
cpu_manager_policy = "{{$pool.CPUManagerPolicy}}"
{{- end}}
{{- if $pool.Tags }}
tags = [
{{- range $key, $value := $pool.Tags }}
Expand Down

0 comments on commit 7d66505

Please sign in to comment.