fix: restart unhealthy tailscale proxies before health scan
Deploy Cluster / Terraform (push) Successful in 36s
Deploy Cluster / Ansible (push) Successful in 20m55s

This commit is contained in:
2026-05-03 04:47:59 +00:00
parent 79f4b95aef
commit 5262c59665
+12
View File
@@ -1052,6 +1052,18 @@ jobs:
kubectl annotate storageclass flash-nfs storageclass.kubernetes.io/is-default-class=true --overwrite
kubectl get storageclass | grep -E "^flash-nfs.*\\(default\\)"
! kubectl get storageclass | grep -E "^local-path.*\\(default\\)"
tailscale_unhealthy_pods=$(mktemp)
kubectl -n tailscale-system get pods -l tailscale.com/managed=true --no-headers \
| grep -Ev "[[:space:]](Running|Completed)[[:space:]]" \
| awk '{print $1}' >"${tailscale_unhealthy_pods}" || true
if [ -s "${tailscale_unhealthy_pods}" ]; then
echo "Restarting unhealthy Tailscale-managed proxy pods before final health scan"
while read -r pod; do
kubectl -n tailscale-system delete pod "${pod}" --wait=false
done <"${tailscale_unhealthy_pods}"
sleep 30
kubectl -n tailscale-system wait --for=condition=Ready pod -l tailscale.com/managed=true --timeout=600s
fi
unhealthy_pods=$(mktemp)
kubectl get pods -A --no-headers \
| grep -Ev "[[:space:]](Running|Completed)[[:space:]]" \