Changed from hardcoded Tailscale IPs to DNS names: - k8s-cluster-cp-1.silverside-gopher.ts.net - k8s-cluster-cp-2.silverside-gopher.ts.net - k8s-cluster-cp-3.silverside-gopher.ts.net This is more robust since Tailscale IPs change on rebuild, but DNS names remain consistent. After next rebuild, cluster accessible via: - kubectl --server=https://k8s-cluster-cp-1.silverside-gopher.ts.net:6443
17 lines
582 B
YAML
17 lines
582 B
YAML
---
|
|
k3s_version: latest
|
|
k3s_token: ""
|
|
k3s_node_ip: ""
|
|
k3s_primary_public_ip: ""
|
|
k3s_disable_embedded_ccm: true
|
|
k3s_disable_servicelb: true
|
|
k3s_kubelet_cloud_provider_external: true
|
|
# Load Balancer endpoint for HA cluster joins (set in inventory)
|
|
kube_api_endpoint: ""
|
|
# Tailscale DNS names for control planes (to enable tailnet access)
|
|
# Using DNS names instead of IPs since Tailscale IPs change on rebuild
|
|
tailscale_control_plane_names:
|
|
- "k8s-cluster-cp-1.silverside-gopher.ts.net"
|
|
- "k8s-cluster-cp-2.silverside-gopher.ts.net"
|
|
- "k8s-cluster-cp-3.silverside-gopher.ts.net"
|