This fixes the chicken-and-egg problem where workers with --kubelet-arg=cloud-provider=external couldn't join because CCM wasn't running yet to remove the node.cloudprovider.kubernetes.io/uninitialized taint. Changes: - Create ansible/roles/ccm-deploy/ to deploy CCM via Helm during Ansible phase - Reorder site.yml: CCM deploys after secrets but before workers join - CCM runs on control_plane[0] with proper tolerations for control plane nodes - Add 10s pause after CCM ready to ensure it can process new nodes - Workers can now successfully join with external cloud provider enabled Flux still manages CCM for updates, but initial install happens in Ansible.
144 lines
3.5 KiB
YAML
144 lines
3.5 KiB
YAML
---
|
|
- name: Bootstrap Kubernetes cluster
|
|
hosts: cluster
|
|
become: true
|
|
gather_facts: true
|
|
|
|
pre_tasks:
|
|
- name: Wait for SSH
|
|
wait_for_connection:
|
|
delay: 10
|
|
timeout: 300
|
|
|
|
roles:
|
|
- common
|
|
|
|
- name: Setup primary control plane
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
vars:
|
|
k3s_primary: true
|
|
k3s_token: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters,digits') }}"
|
|
k3s_primary_private_ip: "{{ k3s_private_ip }}"
|
|
k3s_primary_public_ip: "{{ ansible_host }}"
|
|
k3s_primary_ip: "{{ k3s_private_ip }}"
|
|
k3s_node_ip: "{{ k3s_private_ip }}"
|
|
|
|
roles:
|
|
- k3s-server
|
|
|
|
- name: Get join info from primary
|
|
hosts: control_plane[0]
|
|
become: true
|
|
tasks:
|
|
- name: Fetch node token
|
|
command: cat /var/lib/rancher/k3s/server/node-token
|
|
register: node_token
|
|
changed_when: false
|
|
|
|
- name: Set join token fact
|
|
set_fact:
|
|
k3s_token: "{{ node_token.stdout }}"
|
|
k3s_primary_private_ip: "{{ k3s_private_ip }}"
|
|
k3s_primary_public_ip: "{{ ansible_host }}"
|
|
|
|
- name: Fetch kubeconfig
|
|
fetch:
|
|
src: /etc/rancher/k3s/k3s.yaml
|
|
dest: ../outputs/kubeconfig
|
|
flat: true
|
|
|
|
- name: Bootstrap addon prerequisite secrets
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
roles:
|
|
- addon-secrets-bootstrap
|
|
|
|
- name: Deploy Hetzner CCM (required for workers with external cloud provider)
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
roles:
|
|
- ccm-deploy
|
|
|
|
- name: Setup secondary control planes
|
|
hosts: control_plane[1:]
|
|
become: true
|
|
|
|
vars:
|
|
k3s_primary: false
|
|
k3s_token: "{{ hostvars[groups['control_plane'][0]]['k3s_token'] }}"
|
|
k3s_primary_ip: "{{ hostvars[groups['control_plane'][0]]['k3s_primary_private_ip'] }}"
|
|
k3s_primary_public_ip: "{{ hostvars[groups['control_plane'][0]]['k3s_primary_public_ip'] }}"
|
|
k3s_node_ip: "{{ k3s_private_ip }}"
|
|
|
|
roles:
|
|
- k3s-server
|
|
|
|
- name: Setup workers
|
|
hosts: workers
|
|
become: true
|
|
|
|
vars:
|
|
k3s_token: "{{ hostvars[groups['control_plane'][0]]['k3s_token'] }}"
|
|
k3s_server_url: "https://{{ hostvars[groups['control_plane'][0]]['k3s_primary_private_ip'] }}:6443"
|
|
k3s_node_ip: "{{ k3s_private_ip }}"
|
|
|
|
roles:
|
|
- k3s-agent
|
|
|
|
- name: Deploy observability stack
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
roles:
|
|
- role: observability
|
|
when: not (observability_gitops_enabled | default(true) | bool)
|
|
|
|
- name: Provision Grafana content
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
roles:
|
|
- role: observability-content
|
|
when: not (observability_gitops_enabled | default(true) | bool)
|
|
|
|
- name: Configure private tailnet access
|
|
hosts: control_plane[0]
|
|
become: true
|
|
vars:
|
|
private_access_grafana_port: 30080
|
|
private_access_prometheus_port: 30990
|
|
private_access_flux_port: 30901
|
|
|
|
roles:
|
|
- private-access
|
|
|
|
- name: Bootstrap Doppler access for External Secrets
|
|
hosts: control_plane[0]
|
|
become: true
|
|
|
|
roles:
|
|
- doppler-bootstrap
|
|
|
|
- name: Finalize
|
|
hosts: localhost
|
|
connection: local
|
|
tasks:
|
|
- name: Update kubeconfig server address
|
|
command: |
|
|
sed -i 's/127.0.0.1/{{ groups["control_plane"][0] }}.{{ tailscale_tailnet }}/g' ../outputs/kubeconfig
|
|
changed_when: true
|
|
|
|
- name: Display success message
|
|
debug:
|
|
msg: |
|
|
Cluster setup complete!
|
|
Control planes: {{ groups['control_plane'] | length }}
|
|
Workers: {{ groups['workers'] | length }}
|
|
To access the cluster:
|
|
export KUBECONFIG={{ playbook_dir }}/../outputs/kubeconfig
|
|
kubectl get nodes
|