Skip to content

Commit

Permalink
Merge pull request #155 from projectsyn/fix/restart-holder-suppress-e…
Browse files Browse the repository at this point in the history
…rror

Ignore `kubectl` error output when extracting current holder pod generation
  • Loading branch information
simu authored Feb 27, 2024
2 parents fe79022 + f4b5147 commit 83ab6f4
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 11 deletions.
15 changes: 13 additions & 2 deletions component/restart-holder-ds.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,11 @@ local script = |||
while true; do
# assumption: holder plugin daemonset is called
# `csi-cephfsplugin-holder-${cephcluster:name}`
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-%(cephcluster_name)s -ojsonpath='{.metadata.generation}')
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-%(cephcluster_name)s -ojsonpath='{.metadata.generation}')
# note: we don't care about the value of the variable if the daemonset
# isn't there, since we'll check for pods in a K8s `List` which will
# simply be empty if the plugin isn't enabled.
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-%(cephcluster_name)s -ojsonpath='{.metadata.generation}' 2>/dev/null)
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-%(cephcluster_name)s -ojsonpath='{.metadata.generation}' 2>/dev/null)
needs_update=$( (\
kubectl get pods -l app=csi-cephfsplugin-holder --field-selector spec.nodeName=${NODE_NAME} -ojson |\
jq --arg wanted_gen ${cephfs_holder_wanted_gen} \
Expand Down Expand Up @@ -86,11 +89,19 @@ local daemonset = kube.DaemonSet('syn-holder-updater') {
'non-daemonset pods are running on the node) and then deletes any ' +
'outdated csi holder pods. Outdated holder pods are identified by ' +
'comparing the DaemonSet generation with the pod generation.',
// set sync wave 10 for the daemonset to ensure that the ConfigMap is
// updated first.
'argocd.argoproj.io/sync-wave': '10',
},
namespace: params.namespace,
},
spec+: {
template+: {
metadata+: {
annotations+: {
'script-checksum': std.md5(script),
},
},
spec+: {
serviceAccountName: serviceaccount.metadata.name,
containers_: {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,11 @@ data:
while true; do
# assumption: holder plugin daemonset is called
# `csi-cephfsplugin-holder-${cephcluster:name}`
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}')
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}')
# note: we don't care about the value of the variable if the daemonset
# isn't there, since we'll check for pods in a K8s `List` which will
# simply be empty if the plugin isn't enabled.
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
needs_update=$( (\
kubectl get pods -l app=csi-cephfsplugin-holder --field-selector spec.nodeName=${NODE_NAME} -ojson |\
jq --arg wanted_gen ${cephfs_holder_wanted_gen} \
Expand Down Expand Up @@ -100,6 +103,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: '10'
syn.tools/description: DaemonSet which waits for node to be drained (by waiting
until no non-daemonset pods are running on the node) and then deletes any outdated
csi holder pods. Outdated holder pods are identified by comparing the DaemonSet
Expand All @@ -120,7 +124,8 @@ spec:
name: syn-holder-updater
template:
metadata:
annotations: {}
annotations:
script-checksum: 488da91788ef6e501cece9d3d67ff8b0
labels:
app.kubernetes.io/component: rook-ceph
app.kubernetes.io/managed-by: commodore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,11 @@ data:
while true; do
# assumption: holder plugin daemonset is called
# `csi-cephfsplugin-holder-${cephcluster:name}`
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}')
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}')
# note: we don't care about the value of the variable if the daemonset
# isn't there, since we'll check for pods in a K8s `List` which will
# simply be empty if the plugin isn't enabled.
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
needs_update=$( (\
kubectl get pods -l app=csi-cephfsplugin-holder --field-selector spec.nodeName=${NODE_NAME} -ojson |\
jq --arg wanted_gen ${cephfs_holder_wanted_gen} \
Expand Down Expand Up @@ -100,6 +103,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: '10'
syn.tools/description: DaemonSet which waits for node to be drained (by waiting
until no non-daemonset pods are running on the node) and then deletes any outdated
csi holder pods. Outdated holder pods are identified by comparing the DaemonSet
Expand All @@ -120,7 +124,8 @@ spec:
name: syn-holder-updater
template:
metadata:
annotations: {}
annotations:
script-checksum: 488da91788ef6e501cece9d3d67ff8b0
labels:
app.kubernetes.io/component: rook-ceph
app.kubernetes.io/managed-by: commodore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,11 @@ data:
while true; do
# assumption: holder plugin daemonset is called
# `csi-cephfsplugin-holder-${cephcluster:name}`
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}')
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}')
# note: we don't care about the value of the variable if the daemonset
# isn't there, since we'll check for pods in a K8s `List` which will
# simply be empty if the plugin isn't enabled.
cephfs_holder_wanted_gen=$(kubectl get ds csi-cephfsplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
rbd_holder_wanted_gen=$(kubectl get ds csi-rbdplugin-holder-cluster -ojsonpath='{.metadata.generation}' 2>/dev/null)
needs_update=$( (\
kubectl get pods -l app=csi-cephfsplugin-holder --field-selector spec.nodeName=${NODE_NAME} -ojson |\
jq --arg wanted_gen ${cephfs_holder_wanted_gen} \
Expand Down Expand Up @@ -100,6 +103,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: '10'
syn.tools/description: DaemonSet which waits for node to be drained (by waiting
until no non-daemonset pods are running on the node) and then deletes any outdated
csi holder pods. Outdated holder pods are identified by comparing the DaemonSet
Expand All @@ -120,7 +124,8 @@ spec:
name: syn-holder-updater
template:
metadata:
annotations: {}
annotations:
script-checksum: 488da91788ef6e501cece9d3d67ff8b0
labels:
app.kubernetes.io/component: rook-ceph
app.kubernetes.io/managed-by: commodore
Expand Down

0 comments on commit 83ab6f4

Please sign in to comment.