feat: Add kubeconfig refresh script and fix Ansible Finalize to use public IP
All checks were successful
Deploy Cluster / Terraform (push) Successful in 53s
Deploy Cluster / Ansible (push) Successful in 5m25s

- scripts/refresh-kubeconfig.sh fetches a fresh kubeconfig from CP1
- Ansible site.yml Finalize step now uses public IP instead of Tailscale
  hostname for the kubeconfig server address
- Updated AGENTS.md with kubeconfig refresh instructions
This commit is contained in:
2026-03-29 03:31:36 +00:00
parent 905d069e91
commit 6e5b0518be
3 changed files with 41 additions and 1 deletions

33
scripts/refresh-kubeconfig.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
KUBECONFIG_PATH="$REPO_ROOT/outputs/kubeconfig"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/infra}"
CP1_PUBLIC_IP="${1:-}"
if [ -z "$CP1_PUBLIC_IP" ]; then
if [ -f "$REPO_ROOT/ansible/inventory.ini" ]; then
CP1_PUBLIC_IP=$(grep -A2 '\[control_plane\]' "$REPO_ROOT/ansible/inventory.ini" | grep -oP '\d+\.\d+\.\d+\.\d+' | head -1)
fi
fi
if [ -z "$CP1_PUBLIC_IP" ]; then
echo "Usage: $0 <control-plane-1-public-ip>"
echo " Or ensure ansible/inventory.ini exists with control plane IPs."
exit 1
fi
echo "Fetching kubeconfig from $CP1_PUBLIC_IP ..."
ssh -i "$SSH_KEY" \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
"root@$CP1_PUBLIC_IP" "cat /etc/rancher/k3s/k3s.yaml" \
| sed "s/127.0.0.1/$CP1_PUBLIC_IP/g" \
> "$KUBECONFIG_PATH"
chmod 600 "$KUBECONFIG_PATH"
echo "Kubeconfig saved to $KUBECONFIG_PATH"
echo "Run: export KUBECONFIG=$KUBECONFIG_PATH"