fix: restart unhealthy tailscale proxies before health scan
This commit is contained in:
@@ -1052,6 +1052,18 @@ jobs:
|
|||||||
kubectl annotate storageclass flash-nfs storageclass.kubernetes.io/is-default-class=true --overwrite
|
kubectl annotate storageclass flash-nfs storageclass.kubernetes.io/is-default-class=true --overwrite
|
||||||
kubectl get storageclass | grep -E "^flash-nfs.*\\(default\\)"
|
kubectl get storageclass | grep -E "^flash-nfs.*\\(default\\)"
|
||||||
! kubectl get storageclass | grep -E "^local-path.*\\(default\\)"
|
! kubectl get storageclass | grep -E "^local-path.*\\(default\\)"
|
||||||
|
tailscale_unhealthy_pods=$(mktemp)
|
||||||
|
kubectl -n tailscale-system get pods -l tailscale.com/managed=true --no-headers \
|
||||||
|
| grep -Ev "[[:space:]](Running|Completed)[[:space:]]" \
|
||||||
|
| awk '{print $1}' >"${tailscale_unhealthy_pods}" || true
|
||||||
|
if [ -s "${tailscale_unhealthy_pods}" ]; then
|
||||||
|
echo "Restarting unhealthy Tailscale-managed proxy pods before final health scan"
|
||||||
|
while read -r pod; do
|
||||||
|
kubectl -n tailscale-system delete pod "${pod}" --wait=false
|
||||||
|
done <"${tailscale_unhealthy_pods}"
|
||||||
|
sleep 30
|
||||||
|
kubectl -n tailscale-system wait --for=condition=Ready pod -l tailscale.com/managed=true --timeout=600s
|
||||||
|
fi
|
||||||
unhealthy_pods=$(mktemp)
|
unhealthy_pods=$(mktemp)
|
||||||
kubectl get pods -A --no-headers \
|
kubectl get pods -A --no-headers \
|
||||||
| grep -Ev "[[:space:]](Running|Completed)[[:space:]]" \
|
| grep -Ev "[[:space:]](Running|Completed)[[:space:]]" \
|
||||||
|
|||||||
Reference in New Issue
Block a user