feat: run kubeadm reconcile after terraform apply on master
All checks were successful
Terraform Plan / Terraform Plan (push) Successful in 18s

This commit is contained in:
2026-02-28 16:39:04 +00:00
parent c04ef106a3
commit f341816112
3 changed files with 120 additions and 11 deletions

View File

@@ -117,6 +117,11 @@ For a full nuke/recreate lifecycle:
## Optional Gitea workflow automation
Primary flow:
- Push to `master` triggers `.gitea/workflows/terraform-apply.yml`
- That workflow now does Terraform apply and then runs kubeadm rebuild/bootstrap reconciliation automatically
Manual dispatch workflows are available:
- `.gitea/workflows/kubeadm-bootstrap.yml`

View File

@@ -25,6 +25,15 @@ for key in "${required[@]}"; do
fi
done
cluster_has_node() {
local node_name="$1"
remote "$CP_1" "sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get node $node_name >/dev/null 2>&1"
}
cluster_ready() {
remote "$CP_1" "test -f /etc/kubernetes/admin.conf && sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes >/dev/null 2>&1"
}
remote() {
local host_ip="$1"
local cmd="$2"
@@ -49,13 +58,17 @@ for node in cp-1 cp-2 cp-3 wk-1 wk-2 wk-3; do
done
echo "==> Initializing control plane on cp-1"
remote "$CP_1" "sudo th-kubeadm-init"
if cluster_ready; then
echo "==> Existing cluster detected on cp-1; skipping kubeadm init"
else
remote "$CP_1" "sudo th-kubeadm-init"
echo "==> Installing Cilium on cp-1"
remote "$CP_1" "helm repo add cilium https://helm.cilium.io >/dev/null 2>&1 || true"
remote "$CP_1" "helm repo update >/dev/null"
remote "$CP_1" "kubectl create namespace kube-system >/dev/null 2>&1 || true"
remote "$CP_1" "helm upgrade --install cilium cilium/cilium --namespace kube-system --set kubeProxyReplacement=true"
echo "==> Installing Cilium on cp-1"
remote "$CP_1" "helm repo add cilium https://helm.cilium.io >/dev/null 2>&1 || true"
remote "$CP_1" "helm repo update >/dev/null"
remote "$CP_1" "kubectl create namespace kube-system >/dev/null 2>&1 || true"
remote "$CP_1" "helm upgrade --install cilium cilium/cilium --namespace kube-system --set kubeProxyReplacement=true"
fi
echo "==> Building kubeadm join commands"
JOIN_CMD="$(remote "$CP_1" "sudo kubeadm token create --print-join-command")"
@@ -77,13 +90,36 @@ join_worker() {
}
echo "==> Joining remaining control planes"
join_control_plane "$CP_2"
join_control_plane "$CP_3"
if cluster_has_node "cp-2"; then
echo "cp-2 already joined; skipping"
else
join_control_plane "$CP_2"
fi
if cluster_has_node "cp-3"; then
echo "cp-3 already joined; skipping"
else
join_control_plane "$CP_3"
fi
echo "==> Joining workers"
join_worker "$WK_1"
join_worker "$WK_2"
join_worker "$WK_3"
if cluster_has_node "wk-1"; then
echo "wk-1 already joined; skipping"
else
join_worker "$WK_1"
fi
if cluster_has_node "wk-2"; then
echo "wk-2 already joined; skipping"
else
join_worker "$WK_2"
fi
if cluster_has_node "wk-3"; then
echo "wk-3 already joined; skipping"
else
join_worker "$WK_3"
fi
echo "==> Final node list"
remote "$CP_1" "kubectl get nodes -o wide"