Skip to content

Commit

Permalink
ci: fix shell check failures
Browse files Browse the repository at this point in the history
Signed-off-by: riya-singhal31 <rsinghal@redhat.com>
  • Loading branch information
riya-singhal31 authored and mergify[bot] committed Apr 20, 2023
1 parent f12cd9c commit 44612fe
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 17 deletions.
20 changes: 10 additions & 10 deletions scripts/install-helm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ install_cephcsi_helm_charts() {
NAMESPACE="default"
fi

kubectl_retry create namespace ${NAMESPACE}
kubectl_retry create namespace "${NAMESPACE}"

# label the nodes uniformly for domain information
for node in $(kubectl_retry get node -o jsonpath='{.items[*].metadata.name}'); do
Expand All @@ -170,19 +170,19 @@ install_cephcsi_helm_charts() {
# install ceph-csi-cephfs and ceph-csi-rbd charts
# shellcheck disable=SC2086
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs
check_deployment_status app=ceph-csi-cephfs ${NAMESPACE}
check_daemonset_status app=ceph-csi-cephfs ${NAMESPACE}
check_deployment_status app=ceph-csi-cephfs "${NAMESPACE}"
check_daemonset_status app=ceph-csi-cephfs "${NAMESPACE}"

# deleting configmaps as a workaround to avoid configmap already present
# issue when installing ceph-csi-rbd
kubectl_retry delete cm ceph-csi-config --namespace ${NAMESPACE}
kubectl_retry delete cm ceph-config --namespace ${NAMESPACE}
kubectl_retry delete cm ceph-csi-config --namespace "${NAMESPACE}"
kubectl_retry delete cm ceph-config --namespace "${NAMESPACE}"

# shellcheck disable=SC2086
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2

check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
check_deployment_status app=ceph-csi-rbd "${NAMESPACE}"
check_daemonset_status app=ceph-csi-rbd "${NAMESPACE}"

}

Expand All @@ -197,9 +197,9 @@ cleanup_cephcsi_helm_charts() {
if [ -z "$NAMESPACE" ]; then
NAMESPACE="default"
fi
"${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE}
"${HELM}" uninstall ${RBD_CHART_NAME} --namespace ${NAMESPACE}
kubectl_retry delete namespace ${NAMESPACE}
"${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace "${NAMESPACE}"
"${HELM}" uninstall ${RBD_CHART_NAME} --namespace "${NAMESPACE}"
kubectl_retry delete namespace "${NAMESPACE}"
}

helm_reset() {
Expand Down
12 changes: 6 additions & 6 deletions scripts/install-snapshot.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,21 +27,21 @@ function install_snapshot_controller() {
namespace="kube-system"
fi

create_or_delete_resource "create" ${namespace}
create_or_delete_resource "create" "${namespace}"

pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
INC=0
until [[ "${pod_ready}" == "true" || $INC -gt 20 ]]; do
sleep 10
((++INC))
pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
echo "snapshotter pod status: ${pod_ready}"
done

if [ "${pod_ready}" != "true" ]; then
echo "snapshotter controller creation failed"
kubectl get pods -l app=snapshot-controller -n ${namespace}
kubectl describe po -l app=snapshot-controller -n ${namespace}
kubectl get pods -l app=snapshot-controller -n "${namespace}"
kubectl describe po -l app=snapshot-controller -n "${namespace}"
exit 1
fi

Expand All @@ -53,7 +53,7 @@ function cleanup_snapshot_controller() {
if [ -z "${namespace}" ]; then
namespace="kube-system"
fi
create_or_delete_resource "delete" ${namespace}
create_or_delete_resource "delete" "${namespace}"
}

function create_or_delete_resource() {
Expand Down
2 changes: 1 addition & 1 deletion troubleshooting/tools/tracevol.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def get_tool_box_pod_name(arg):
print("failed to pod %s", err)
return ""

#pylint: disable=too-many-branches
#pylint: disable=too-many-branches, E0012, W0719
def get_pool_name(arg, vol_id, is_rbd):
"""
get pool name from ceph backend
Expand Down

0 comments on commit 44612fe

Please sign in to comment.