From cbac8d49ababd242086924b6bb5db9291355a7b0 Mon Sep 17 00:00:00 2001 From: Fabio Rauber Date: Fri, 22 Apr 2022 10:18:17 -0300 Subject: [PATCH] First working version for already patched pvcs --- check_full_pvs.sh | 74 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/check_full_pvs.sh b/check_full_pvs.sh index 41d5107..2dbda10 100755 --- a/check_full_pvs.sh +++ b/check_full_pvs.sh @@ -24,32 +24,59 @@ checkReqs () { } -appstop () { - $NAMESPACE = $1 - #Stop deployments - DEPLOYS=$(kubectl get deployments --namespace ${NAMESPACE} | grep -v NAME | awk '{print $1}' ) +scaleworkload () { + NAMESPACE=$1 + TYPE=$2 + WORKLOADNAME=$3 + SCALE=$4 + + echo -n " |-- Scaling $TYPE $WORKLOADNAME in $NAMESPACE to $SCALE... " && \ + kubectl scale --replicas=$SCALE $TYPE $WORKLOADNAME --namespace ${NAMESPACE} if [ $? -ne 0 ]; then - echo " |-- Error trying to get deployments for namespace $NAMESPACE." - else - for DEPLOY in $DEPLOYS; do - echo -n " |-- Stopping in ${NAMESPACE}... " && \ - kubectl scale --replicas=0 deployment ${DEPLOY} --namespace ${NAMESPACE} - if [ $? -ne 0 ]; then - echo "Error stopping $DEPLOY in namespace $NAMESPACE." - fi - done + echo " |-- Error scaling $TYPE $WORKLOADNAME to $SCALE." + fi +} + + +waitforresize (){ + ns=$1 + pvcname=$2 + + echo " |-- Waiting for volume resize for $pvcname..." + pendingresize=0 + for count in $(seq 1 60); do + pvcstatus=$(kubectl get pvc $pvcname -n $ns -o=jsonpath='{.status.conditions[0].type}') + if [[ "$pvcstatus" == "FileSystemResizePending" ]]; then + echo " |-- $pvcname resized. Need now to resize inner filesystem. This happens automatically when a pod mounts the volume." + pendingresize=1 + break + else + sleep 10 + fi + done + if [ $pendingresize -eq 0 ]; then + echo " |-- Timeout trying to resize $pvcname." fi - #Stop statefulsets - SSETS=$(kubectl get statefulsets --namespace ${NAMESPACE} | grep -v NAME | awk '{print $1}' ) +} + +restartforresize () { + NAMESPACE=$1 + PVC=$2 + + DEPLOYS=$(kubectl get deployments -n $NAMESPACE -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.spec.template.spec.volumes[].persistentVolumeClaim.claimName}{" "}{.status.replicas}{"\n"}{end}') if [ $? -ne 0 ]; then - echo " |-- Error trying to get statefulsets for namespace $NAMESPACE." - else - for SSET in $SSETS; do - echo -n "Stopping in ${NAMESPACE}... " && \ - kubectl scale --replicas=0 statefulset ${SSET} --namespace ${NAMESPACE} - if [ $? -ne 0 ]; then - echo "Error stopping $SSET in namespace $NAMESPACE." + echo " |-- Error trying to get deployments for namespace $NAMESPACE." + else + for DEPLOY in $DEPLOYS; do + name=$(echo $DEPLOY | awk '{print $1}') + pvcname=$(echo $DEPLOY | awk '{print $2}') + scale=$(echo $DEPLOY | awk '{print $3}') + + if [[ "$pvcname" == $PVC ]]; then + scaleworkload $NAMESPACE deployment $name 0 + waitforresize $NAMESPACE $pvcname + scaleworkload $NAMESPACE deployment $name $scale fi done fi @@ -103,8 +130,9 @@ for ns in $ALLNS; do pvcstatus=$(kubectl get pvc $pvcname -n $ns -o=jsonpath='{.status.conditions[0].type}') if [[ "$pvcstatus" == 'Resizing' ]]; then echo " |-- Volume $pvcname already has a Resizing operation going on." + restartforresize $ns $pvcname else - newsize=$(echo ${size}*1.5| bc | grep -v "$\.0") + newsize=$(echo ${size}*1.${INCREASEPERC}| bc | grep -v "$\.0") echo " |-- Resizing $pvcname: ${size}Gi --> ${newsize}Gi..." fi fi