Files
TerraHome/nixos/kubeadm/scripts/rebuild-and-bootstrap.sh
MichaelFisher1997 5669305e59
All checks were successful
Terraform Plan / Terraform Plan (push) Successful in 19s
feat: make kubeadm workflows auto-scale with terraform outputs
2026-02-28 16:43:22 +00:00

175 lines
4.7 KiB
Bash
Executable File

#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
FLAKE_DIR="${FLAKE_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
INVENTORY_FILE="${1:-$SCRIPT_DIR/inventory.env}"
if [ ! -f "$INVENTORY_FILE" ]; then
echo "Missing inventory file: $INVENTORY_FILE"
echo "Copy $SCRIPT_DIR/inventory.example.env to $SCRIPT_DIR/inventory.env and edit node mappings."
exit 1
fi
# shellcheck disable=SC1090
source "$INVENTORY_FILE"
SSH_USER="${SSH_USER:-micqdf}"
SSH_OPTS="${SSH_OPTS:- -o BatchMode=yes -o StrictHostKeyChecking=accept-new }"
declare -A NODE_IPS=()
declare -a CP_NAMES=()
declare -a WK_NAMES=()
add_node_pair() {
local role="$1"
local pair="$2"
local name="${pair%%=*}"
local ip="${pair#*=}"
if [ -z "$name" ] || [ -z "$ip" ] || [ "$name" = "$ip" ]; then
echo "Invalid node pair '$pair' (expected name=ip)."
exit 1
fi
NODE_IPS["$name"]="$ip"
if [ "$role" = "cp" ]; then
CP_NAMES+=("$name")
else
WK_NAMES+=("$name")
fi
}
populate_nodes() {
if [ -n "${CONTROL_PLANES:-}" ]; then
for pair in $CONTROL_PLANES; do
add_node_pair "cp" "$pair"
done
else
while IFS= read -r var_name; do
idx="${var_name#CP_}"
add_node_pair "cp" "cp-$idx=${!var_name}"
done < <(compgen -A variable | grep -E '^CP_[0-9]+$' | sort -V)
fi
if [ -n "${WORKERS:-}" ]; then
for pair in $WORKERS; do
add_node_pair "wk" "$pair"
done
else
while IFS= read -r var_name; do
idx="${var_name#WK_}"
add_node_pair "wk" "wk-$idx=${!var_name}"
done < <(compgen -A variable | grep -E '^WK_[0-9]+$' | sort -V)
fi
if [ "${#CP_NAMES[@]}" -eq 0 ]; then
echo "No control planes found in inventory."
exit 1
fi
if [ "${#WK_NAMES[@]}" -eq 0 ]; then
echo "No workers found in inventory."
exit 1
fi
}
remote() {
local host_ip="$1"
local cmd="$2"
ssh $SSH_OPTS "$SSH_USER@$host_ip" "$cmd"
}
cluster_has_node() {
local node_name="$1"
remote "$PRIMARY_CP_IP" "sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get node $node_name >/dev/null 2>&1"
}
cluster_ready() {
remote "$PRIMARY_CP_IP" "test -f /etc/kubernetes/admin.conf && sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes >/dev/null 2>&1"
}
rebuild_node() {
local node_name="$1"
local node_ip="$2"
echo "==> Rebuilding $node_name on $node_ip"
nixos-rebuild switch \
--flake "$FLAKE_DIR#$node_name" \
--target-host "$SSH_USER@$node_ip" \
--use-remote-sudo
}
populate_nodes
PRIMARY_CONTROL_PLANE="${PRIMARY_CONTROL_PLANE:-cp-1}"
if [ -z "${NODE_IPS[$PRIMARY_CONTROL_PLANE]:-}" ]; then
PRIMARY_CONTROL_PLANE="${CP_NAMES[0]}"
fi
PRIMARY_CP_IP="${NODE_IPS[$PRIMARY_CONTROL_PLANE]}"
for node in "${CP_NAMES[@]}"; do
rebuild_node "$node" "${NODE_IPS[$node]}"
done
for node in "${WK_NAMES[@]}"; do
rebuild_node "$node" "${NODE_IPS[$node]}"
done
echo "==> Initializing control plane on $PRIMARY_CONTROL_PLANE"
if cluster_ready; then
echo "==> Existing cluster detected on $PRIMARY_CONTROL_PLANE; skipping kubeadm init"
else
remote "$PRIMARY_CP_IP" "sudo th-kubeadm-init"
echo "==> Installing Cilium on $PRIMARY_CONTROL_PLANE"
remote "$PRIMARY_CP_IP" "helm repo add cilium https://helm.cilium.io >/dev/null 2>&1 || true"
remote "$PRIMARY_CP_IP" "helm repo update >/dev/null"
remote "$PRIMARY_CP_IP" "kubectl create namespace kube-system >/dev/null 2>&1 || true"
remote "$PRIMARY_CP_IP" "helm upgrade --install cilium cilium/cilium --namespace kube-system --set kubeProxyReplacement=true"
fi
echo "==> Building kubeadm join commands"
JOIN_CMD="$(remote "$PRIMARY_CP_IP" "sudo kubeadm token create --print-join-command")"
CERT_KEY="$(remote "$PRIMARY_CP_IP" "sudo kubeadm init phase upload-certs --upload-certs | tail -n 1")"
CP_JOIN_CMD="$JOIN_CMD --control-plane --certificate-key $CERT_KEY"
join_control_plane() {
local node_ip="$1"
local encoded
encoded="$(printf '%s' "$CP_JOIN_CMD" | base64 -w0)"
remote "$node_ip" "sudo th-kubeadm-join-control-plane \"\$(echo $encoded | base64 -d)\""
}
join_worker() {
local node_ip="$1"
local encoded
encoded="$(printf '%s' "$JOIN_CMD" | base64 -w0)"
remote "$node_ip" "sudo th-kubeadm-join-worker \"\$(echo $encoded | base64 -d)\""
}
echo "==> Joining remaining control planes"
for node in "${CP_NAMES[@]}"; do
if [ "$node" = "$PRIMARY_CONTROL_PLANE" ]; then
continue
fi
if cluster_has_node "$node"; then
echo "$node already joined; skipping"
else
join_control_plane "${NODE_IPS[$node]}"
fi
done
echo "==> Joining workers"
for node in "${WK_NAMES[@]}"; do
if cluster_has_node "$node"; then
echo "$node already joined; skipping"
else
join_worker "${NODE_IPS[$node]}"
fi
done
echo "==> Final node list"
remote "$PRIMARY_CP_IP" "kubectl get nodes -o wide"