feat: refactor infra to cp/wk kubeadm topology
Some checks failed
Terraform Plan / Terraform Plan (push) Failing after 9s

Provision 3 thin control planes and 3 workers with role-specific sizing and VMID ranges (701/711), generate per-node cloud-init snippets with SSH key injection, and add NixOS kubeadm host/module scaffolding for cp-1..3 and wk-1..3.
This commit is contained in:
2026-02-28 14:16:55 +00:00
parent c516c8ba35
commit 21be01346b
19 changed files with 348 additions and 62 deletions

View File

@@ -1,12 +1,33 @@
data "template_file" "cloud_init_global" {
data "template_file" "control_plane_cloud_init" {
count = var.control_plane_count
template = file("${path.module}/files/cloud_init_global.tpl")
vars = {
hostname = "cp-${count.index + 1}"
domain = "home.arpa"
SSH_KEY_PUBLIC = var.SSH_KEY_PUBLIC
}
}
resource "local_file" "cloud_init_global" {
content = data.template_file.cloud_init_global.rendered
filename = "${path.module}/files/rendered/cloud_init_global.yaml"
resource "local_file" "control_plane_cloud_init" {
count = var.control_plane_count
content = data.template_file.control_plane_cloud_init[count.index].rendered
filename = "${path.module}/files/rendered/cp-${count.index + 1}.yaml"
}
data "template_file" "worker_cloud_init" {
count = var.worker_count
template = file("${path.module}/files/cloud_init_global.tpl")
vars = {
hostname = "wk-${count.index + 1}"
domain = "home.arpa"
SSH_KEY_PUBLIC = var.SSH_KEY_PUBLIC
}
}
resource "local_file" "worker_cloud_init" {
count = var.worker_count
content = data.template_file.worker_cloud_init[count.index].rendered
filename = "${path.module}/files/rendered/wk-${count.index + 1}.yaml"
}

View File

@@ -1,4 +1,5 @@
#cloud-config
hostname: ${hostname}
manage_etc_hosts: true
resolv_conf:
nameservers:
@@ -6,6 +7,7 @@ resolv_conf:
- 1.1.1.1
preserve_hostname: false
fqdn: ${hostname}.${domain}
users:
- name: micqdf

View File

@@ -16,10 +16,10 @@ provider "proxmox" {
pm_tls_insecure = true
}
resource "proxmox_vm_qemu" "alpacas" {
count = var.alpaca_vm_count
name = "alpaca-${count.index + 1}"
vmid = 500 + count.index + 1
resource "proxmox_vm_qemu" "control_planes" {
count = var.control_plane_count
name = "cp-${count.index + 1}"
vmid = var.control_plane_vmid_start + count.index
target_node = var.target_node
clone = var.clone_template
full_clone = true
@@ -27,22 +27,22 @@ resource "proxmox_vm_qemu" "alpacas" {
agent = 1
cpu {
sockets = var.sockets
cores = var.cores
sockets = 1
cores = var.control_plane_cores
}
memory = var.memory
memory = var.control_plane_memory_mb
scsihw = "virtio-scsi-pci"
boot = "order=scsi0"
bootdisk = "scsi0"
ipconfig0 = "ip=dhcp"
cicustom = "user=local:snippets/cloud_init_global.yaml"
cicustom = "user=local:snippets/cp-${count.index + 1}.yaml"
disks {
scsi {
scsi0 {
disk {
size = var.disk_size
size = var.control_plane_disk_size
storage = var.storage
}
}
@@ -65,10 +65,10 @@ resource "proxmox_vm_qemu" "alpacas" {
}
resource "proxmox_vm_qemu" "llamas" {
count = var.llama_vm_count
name = "llama-${count.index + 1}"
vmid = 600 + count.index + 1
resource "proxmox_vm_qemu" "workers" {
count = var.worker_count
name = "wk-${count.index + 1}"
vmid = var.worker_vmid_start + count.index
target_node = var.target_node
clone = var.clone_template
full_clone = true
@@ -76,21 +76,21 @@ resource "proxmox_vm_qemu" "llamas" {
agent = 1
cpu {
sockets = var.sockets
cores = var.cores
sockets = 1
cores = var.worker_cores[count.index]
}
memory = var.memory
memory = var.worker_memory_mb[count.index]
scsihw = "virtio-scsi-pci"
boot = "order=scsi0"
bootdisk = "scsi0"
ipconfig0 = "ip=dhcp"
cicustom = "user=local:snippets/cloud_init_global.yaml"
cicustom = "user=local:snippets/wk-${count.index + 1}.yaml"
disks {
scsi {
scsi0 {
disk {
size = var.disk_size
size = var.worker_disk_size
storage = var.storage
}
}

View File

@@ -1,21 +1,21 @@
output "alpaca_vm_ids" {
output "control_plane_vm_ids" {
value = {
for i in range(var.alpaca_vm_count) :
"alpaca-${i + 1}" => proxmox_vm_qemu.alpacas[i].vmid
for i in range(var.control_plane_count) :
"cp-${i + 1}" => proxmox_vm_qemu.control_planes[i].vmid
}
}
output "alpaca_vm_names" {
value = [for vm in proxmox_vm_qemu.alpacas : vm.name]
output "control_plane_vm_names" {
value = [for vm in proxmox_vm_qemu.control_planes : vm.name]
}
output "llama_vm_ids" {
output "worker_vm_ids" {
value = {
for i in range(var.llama_vm_count) :
"llama-${i + 1}" => proxmox_vm_qemu.llamas[i].vmid
for i in range(var.worker_count) :
"wk-${i + 1}" => proxmox_vm_qemu.workers[i].vmid
}
}
output "llama_vm_names" {
value = [for vm in proxmox_vm_qemu.llamas : vm.name]
output "worker_vm_names" {
value = [for vm in proxmox_vm_qemu.workers : vm.name]
}

View File

@@ -1,10 +1,19 @@
target_node = "flex"
clone_template = "nixos-template"
cores = 1
memory = 1024
disk_size = "15G"
sockets = 1
bridge = "vmbr0"
storage = "Flash"
pm_api_url = "https://100.105.0.115:8006/api2/json"
pm_api_token_id = "terraform-prov@pve!mytoken"
control_plane_count = 3
worker_count = 3
control_plane_vmid_start = 701
worker_vmid_start = 711
control_plane_cores = 1
control_plane_memory_mb = 4096
control_plane_disk_size = "40G"
worker_cores = [4, 4, 3]
worker_memory_mb = [12288, 12288, 12288]
worker_disk_size = "60G"

View File

@@ -27,20 +27,74 @@ variable "clone_template" {
type = string
}
variable "cores" {
type = number
variable "control_plane_count" {
type = number
default = 3
description = "Number of control plane VMs"
}
variable "memory" {
type = number
variable "worker_count" {
type = number
default = 3
description = "Number of worker VMs"
}
variable "disk_size" {
type = string
variable "control_plane_vmid_start" {
type = number
default = 701
description = "Starting VMID for control plane VMs"
}
variable "sockets" {
type = number
variable "worker_vmid_start" {
type = number
default = 711
description = "Starting VMID for worker VMs"
}
variable "control_plane_cores" {
type = number
default = 1
description = "vCPU cores per control plane VM"
}
variable "control_plane_memory_mb" {
type = number
default = 4096
description = "Memory in MB per control plane VM"
}
variable "worker_cores" {
type = list(number)
default = [4, 4, 3]
description = "vCPU cores for each worker VM"
validation {
condition = length(var.worker_cores) == var.worker_count
error_message = "worker_cores list length must equal worker_count."
}
}
variable "worker_memory_mb" {
type = list(number)
default = [12288, 12288, 12288]
description = "Memory in MB for each worker VM"
validation {
condition = length(var.worker_memory_mb) == var.worker_count
error_message = "worker_memory_mb list length must equal worker_count."
}
}
variable "control_plane_disk_size" {
type = string
default = "40G"
description = "Disk size for control plane VMs"
}
variable "worker_disk_size" {
type = string
default = "60G"
description = "Disk size for worker VMs"
}
variable "bridge" {
@@ -55,18 +109,6 @@ variable "pm_api_url" {
type = string
}
variable "alpaca_vm_count" {
type = number
default = 1
description = "How many Alpaca VMs to create"
}
variable "llama_vm_count" {
type = number
default = 1
description = "How many Llama VMs to create"
}
variable "SSH_KEY_PUBLIC" {
type = string
description = "Public SSH key injected via cloud-init"