Skip to content

Commit

Permalink
init
Browse files Browse the repository at this point in the history
  • Loading branch information
davidspek committed Apr 8, 2022
1 parent 0c8663b commit ecc3976
Show file tree
Hide file tree
Showing 5 changed files with 122 additions and 56 deletions.
5 changes: 5 additions & 0 deletions controllers/packetcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,11 @@ func (r *PacketClusterReconciler) reconcileNormal(ctx context.Context, clusterSc
}
}

if err := r.PacketClient.EnableProjectBGP(packetCluster.Spec.ProjectID); err != nil {
log.Error(err, "error enabling bgp for project")
return ctrl.Result{}, err
}

clusterScope.PacketCluster.Status.Ready = true
conditions.MarkTrue(packetCluster, infrav1.NetworkInfrastructureReadyCondition)

Expand Down
32 changes: 9 additions & 23 deletions controllers/packetmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,22 +316,13 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s
ExtraTags: packet.DefaultCreateTags(machineScope.Namespace(), machineScope.Machine.Name, machineScope.Cluster.Name),
}

// TODO: see if this can be removed with kube-vip in place
// when the node is a control plan we should check if the elastic ip
// for this cluster is not assigned. If it is free we can prepare the
// current node to use it.
// when the node is a control plan we need the elastic IP
// to template out the kube-vip deployment
if machineScope.IsControlPlane() {
controlPlaneEndpoint, _ = r.PacketClient.GetIPByClusterIdentifier(
machineScope.Cluster.Namespace,
machineScope.Cluster.Name,
machineScope.PacketCluster.Spec.ProjectID)
if len(controlPlaneEndpoint.Assignments) == 0 {
a := corev1.NodeAddress{
Type: corev1.NodeExternalIP,
Address: controlPlaneEndpoint.Address,
}
addrs = append(addrs, a)
}
createDeviceReq.ControlPlaneEndpoint = controlPlaneEndpoint.Address
}

Expand Down Expand Up @@ -362,6 +353,11 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s
machineScope.SetProviderID(dev.ID)
machineScope.SetInstanceStatus(infrav1.PacketResourceStatus(dev.State))

if err := r.PacketClient.EnsureNodeBGPEnabled(dev.ID); err != nil {
// Do not treat an error enabling bgp on machine as fatal
return ctrl.Result{RequeueAfter: time.Second * 20}, fmt.Errorf("failed to enable bpg on machine %s: %w", machineScope.Name(), err)
}

deviceAddr := r.PacketClient.GetDeviceAddresses(dev)
machineScope.SetAddresses(append(addrs, deviceAddr...))

Expand All @@ -376,22 +372,12 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s
case infrav1.PacketResourceStatusRunning:
log.Info("Machine instance is active", "instance-id", machineScope.GetInstanceID())

// TODO: see if this can be removed with kube-vip in place
// This logic is here because an elastic ip can be assigned only an
// active node. It needs to be a control plane and the IP should not be
// assigned to anything at this point.
// This logic is here because an elastic ip is neede to create
// the kube-vip template
controlPlaneEndpoint, _ = r.PacketClient.GetIPByClusterIdentifier(
machineScope.Cluster.Namespace,
machineScope.Cluster.Name,
machineScope.PacketCluster.Spec.ProjectID)
if len(controlPlaneEndpoint.Assignments) == 0 && machineScope.IsControlPlane() {
if _, _, err := r.PacketClient.DeviceIPs.Assign(dev.ID, &packngo.AddressStruct{
Address: controlPlaneEndpoint.Address,
}); err != nil {
log.Error(err, "err assigining elastic ip to control plane. retrying...")
return ctrl.Result{RequeueAfter: time.Second * 20}, nil
}
}
machineScope.SetReady()
conditions.MarkTrue(machineScope.PacketMachine, infrav1.DeviceReadyCondition)

Expand Down
66 changes: 66 additions & 0 deletions pkg/cloud/packet/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"net"
"net/http"
"os"
"strconv"
"strings"
"text/template"

Expand All @@ -38,6 +39,9 @@ const (
apiTokenVarName = "PACKET_API_KEY" //nolint:gosec
clientName = "CAPP-v1beta1"
ipxeOS = "custom_ipxe"
envVarLocalASN = "METAL_LOCAL_ASN"
envVarBGPPass = "METAL_BGP_PASS"
DefaultLocalASN = 65000
)

var (
Expand Down Expand Up @@ -230,6 +234,68 @@ func (p *Client) CreateIP(namespace, clusterName, projectID, facility string) (n
return ip, nil
}

// enableBGP enable bgp on the project
func (p *Client) EnableProjectBGP(projectID string) error {
// first check if it is enabled before trying to create it
bgpConfig, _, err := p.BGPConfig.Get(projectID, &packngo.GetOptions{})
// if we already have a config, just return
// we need some extra handling logic because the API always returns 200, even if
// not BGP config is in place.
// We treat it as valid config already exists only if ALL of the above is true:
// - no error
// - bgpConfig struct exists
// - bgpConfig struct has non-blank ID
// - bgpConfig struct does not have Status=="disabled"
if err == nil && bgpConfig != nil && bgpConfig.ID != "" && strings.ToLower(bgpConfig.Status) != "disabled" {
return nil
}

// get the local ASN
localASN := os.Getenv(envVarLocalASN)
var outLocalASN int
switch {
case localASN != "":
localASNNo, err := strconv.Atoi(localASN)
if err != nil {
return fmt.Errorf("env var %s must be a number, was %s: %w", envVarLocalASN, localASN, err)
}
outLocalASN = localASNNo
default:
outLocalASN = DefaultLocalASN
}

var outBGPPass string
bgpPass := os.Getenv(envVarBGPPass)
if bgpPass != "" {
outBGPPass = bgpPass
}

// we did not have a valid one, so create it
req := packngo.CreateBGPConfigRequest{
Asn: outLocalASN,
Md5: outBGPPass,
DeploymentType: "local",
UseCase: "kubernetes-load-balancer",
}
_, err = p.BGPConfig.Create(projectID, req)
return err
}

// ensureNodeBGPEnabled check if the node has bgp enabled, and set it if it does not
func (p *Client) EnsureNodeBGPEnabled(id string) error {
// fortunately, this is idempotent, so just create
req := packngo.CreateBGPSessionRequest{
AddressFamily: "ipv4",
}
_, response, err := p.BGPSessions.Create(id, req)
// if we already had one, then we can ignore the error
// this really should be a 409, but 422 is what is returned
if response.StatusCode == 422 && strings.Contains(fmt.Sprintf("%s", err), "already has session") {
err = nil
}
return err
}

func (p *Client) GetIPByClusterIdentifier(namespace, name, projectID string) (packngo.IPAddressReservation, error) {
var err error
var reservedIP packngo.IPAddressReservation
Expand Down
12 changes: 3 additions & 9 deletions templates/cluster-template-crs-cni.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,21 +139,16 @@ spec:
cloud-provider: external
provider-id: equinixmetal://{{ `{{ v1.instance_id }}` }}
postKubeadmCommands:
- |
cat <<EOF >> /etc/network/interfaces
auto lo:0
iface lo:0 inet static
address {{ .controlPlaneEndpoint }}
netmask 255.255.255.255
EOF
- systemctl restart networking
- |
if [ -f "/run/kubeadm/kubeadm.yaml" ]; then
export KUBECONFIG=/etc/kubernetes/admin.conf
export CPEM_YAML=https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.4.0/deployment.yaml
export SECRET_DATA='cloud-sa.json=''{"apiKey": "{{ .apiKey }}","projectID": "${PROJECT_ID}", "eipTag": "cluster-api-provider-packet:cluster-id:${CLUSTER_NAME}", "eipHealthCheckUseHostIP": true}'''
kubectl create secret generic -n kube-system metal-cloud-config --from-literal="$${SECRET_DATA}" || (sleep 1 && kubectl create secret generic -n kube-system metal-cloud-config --from-literal="$${SECRET_DATA}") || (sleep 1 && kubectl create secret generic -n kube-system metal-cloud-config --from-literal="$${SECRET_DATA}")
kubectl apply -f $${CPEM_YAML} || (sleep 1 && kubectl apply -f $${CPEM_YAML}) || (sleep 1 && kubectl apply -f $${CPEM_YAML})
curl https://gist.githubusercontent.com/DavidSpek/5fbfc8b66ccbf36b47b2ca292965e7bc/raw/a395f146a08592c4f687dd8d471c61c966c1ab7d/kube-vip-control-plane.yaml --output kube-vip-control-plane.yaml
sed -i 's/$${LB_ADDRESS}/"{{ .controlPlaneEndpoint }}"/g' kube-vip-control-plane.yaml
kubectl apply -f kube-vip-control-plane.yaml || (sleep 1 && kubectl apply -f kube-vip-control-plane.yaml) || (sleep 1 && kubectl apply -f kube-vip-control-plane.yaml)
fi
preKubeadmCommands:
- sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab
Expand Down Expand Up @@ -184,7 +179,6 @@ spec:
- systemctl daemon-reload
- systemctl enable containerd
- systemctl start containerd
- ping -c 3 -q {{ .controlPlaneEndpoint }} && echo OK || ip addr add {{ .controlPlaneEndpoint }} dev lo
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
Expand Down
63 changes: 39 additions & 24 deletions templates/cluster-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,27 +47,41 @@ spec:
net.bridge.bridge-nf-call-ip6tables = 1
EOF
- sysctl --system
- apt-get -y update
- DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
- echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update -y
- TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//')
- RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1)
- apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION}
- |
apt-get -y update
DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt-get update -y
TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//')
RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1)
apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION}
- systemctl daemon-reload
- systemctl enable containerd
- systemctl start containerd
- ping -c 3 -q {{ .controlPlaneEndpoint }} && echo OK || ip addr add {{ .controlPlaneEndpoint }} dev lo
- |
if [ -f "/run/kubeadm/kubeadm.yaml" ]; then
KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")
ctr image pull ghcr.io/kube-vip/kube-vip:$${KVVERSION}
mkdir -p /etc/kubernetes/manifests/
ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$${KVVERSION} vip /kube-vip manifest pod --interface "lo" --vip "{{ .controlPlaneEndpoint }}" --controlplane --bgp --metal --metalKey "{{ .apiKey }}" --metalProjectID "b264ba74-f3bd-49f0-ae24-b0953765b7aa" | tee /etc/kubernetes/manifests/kube-vip.yaml
fi
# - ping -c 3 -q {{ .controlPlaneEndpoint }} && echo OK || ip addr add {{ .controlPlaneEndpoint }} dev lo
postKubeadmCommands:
# - |
# cat <<EOF >> /etc/network/interfaces
# auto lo:0
# iface lo:0 inet static
# address {{ .controlPlaneEndpoint }}
# netmask 255.255.255.255
# EOF
# - systemctl restart networking
- |
cat <<EOF >> /etc/network/interfaces
auto lo:0
iface lo:0 inet static
address {{ .controlPlaneEndpoint }}
netmask 255.255.255.255
EOF
- systemctl restart networking
if [ -f "/run/kubeadm/kubeadm-join-config.yaml" ]; then
KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")
ctr image pull ghcr.io/kube-vip/kube-vip:$${KVVERSION}
ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$${KVVERSION} vip /kube-vip manifest pod --interface "lo" --vip "{{ .controlPlaneEndpoint }}" --controlplane --bgp --metal --metalKey "{{ .apiKey }}" --metalProjectID "b264ba74-f3bd-49f0-ae24-b0953765b7aa" | tee /etc/kubernetes/manifests/kube-vip.yaml
fi
- |
if [ -f "/run/kubeadm/kubeadm.yaml" ]; then
export KUBECONFIG=/etc/kubernetes/admin.conf
Expand Down Expand Up @@ -196,14 +210,15 @@ spec:
net.bridge.bridge-nf-call-ip6tables = 1
EOF
- sysctl --system
- apt-get -y update
- DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
- echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update -y
- TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//')
- RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1)
- apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION}
- |
apt-get -y update
DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt-get update -y
TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//')
RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1)
apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION}
- systemctl daemon-reload
- systemctl enable containerd
- systemctl start containerd

0 comments on commit ecc3976

Please sign in to comment.