Compare commits

1 Commits

Author SHA1 Message Date
MichaelFisher1997
f90075f098 terraform fmt
All checks were successful
Gitea Destroy Terraform / Terraform Destroy (push) Successful in 20s
2025-04-17 17:20:22 +01:00
28 changed files with 330 additions and 701 deletions

View File

@@ -1,72 +1,47 @@
name: Terraform Apply
name: Gitea Actions Demo
run-name: ${{ gitea.actor }} is deploying with Terraform 🚀
on:
push:
branches:
- master
concurrency:
group: terraform-global
cancel-in-progress: false
jobs:
terraform:
name: "Terraform Apply"
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
env:
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create secrets.tfvars
working-directory: terraform
run: |
cat > secrets.auto.tfvars << EOF
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
EOF
cat > backend.hcl << EOF
bucket = "${{ secrets.B2_TF_BUCKET }}"
key = "terraform.tfstate"
region = "us-east-005"
endpoints = {
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
}
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
skip_credentials_validation = true
skip_metadata_api_check = true
skip_region_validation = true
skip_requesting_account_id = true
use_path_style = true
EOF
- name: Set up Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_version: 1.6.6
- name: Inject sensitive secrets
working-directory: terraform
run: |
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
- name: Terraform Init
working-directory: terraform
run: terraform init -reconfigure -backend-config=backend.hcl
run: terraform init
- name: Terraform Plan
working-directory: terraform
run: terraform plan -out=tfplan
- name: Block accidental destroy
env:
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
working-directory: terraform
run: |
terraform show -json -no-color tfplan > tfplan.json
DESTROY_COUNT=$(python3 -c 'import json; raw=open("tfplan.json","rb").read().decode("utf-8","ignore"); start=raw.find("{"); data=json.JSONDecoder().raw_decode(raw[start:])[0]; print(sum(1 for rc in data.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
echo "Planned deletes: $DESTROY_COUNT"
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
exit 1
fi
run: terraform plan
- name: Terraform Apply
working-directory: terraform
run: terraform apply -auto-approve tfplan
run: terraform apply -auto-approve

View File

@@ -1,94 +1,43 @@
name: Terraform Destroy
run-name: ${{ gitea.actor }} requested Terraform destroy
name: Gitea Destroy Terraform
run-name: ${{ gitea.actor }} triggered a Terraform Destroy 🧨
on:
workflow_dispatch:
inputs:
confirm:
description: "Type NUKE to confirm destroy"
required: true
type: string
target:
description: "Destroy scope"
required: true
default: all
type: choice
options:
- all
- control-planes
- workers
concurrency:
group: terraform-global
cancel-in-progress: false
push:
branches:
- destroy
jobs:
destroy:
name: "Terraform Destroy"
runs-on: ubuntu-latest
steps:
- name: Validate confirmation phrase
run: |
if [ "${{ inputs.confirm }}" != "NUKE" ]; then
echo "Confirmation failed. You must type NUKE."
exit 1
fi
permissions:
contents: read
pull-requests: write
env:
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create Terraform secret files
working-directory: terraform
run: |
cat > secrets.auto.tfvars << EOF
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
EOF
cat > backend.hcl << EOF
bucket = "${{ secrets.B2_TF_BUCKET }}"
key = "terraform.tfstate"
region = "us-east-005"
endpoints = {
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
}
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
skip_credentials_validation = true
skip_metadata_api_check = true
skip_region_validation = true
skip_requesting_account_id = true
use_path_style = true
EOF
- name: Set up Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_version: 1.6.6
- name: Terraform Init
working-directory: terraform
run: terraform init -reconfigure -backend-config=backend.hcl
- name: Terraform Destroy Plan
- name: Inject sensitive secrets
working-directory: terraform
run: |
case "${{ inputs.target }}" in
all)
terraform plan -destroy -out=tfdestroy
;;
control-planes)
terraform plan -destroy -target=proxmox_vm_qemu.control_planes -out=tfdestroy
;;
workers)
terraform plan -destroy -target=proxmox_vm_qemu.workers -out=tfdestroy
;;
*)
echo "Invalid destroy target: ${{ inputs.target }}"
exit 1
;;
esac
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
- name: Terraform Destroy Apply
- name: Terraform Init
working-directory: terraform
run: terraform apply -auto-approve tfdestroy
run: terraform init
- name: Terraform Destroy
working-directory: terraform
run: terraform destroy -auto-approve

View File

@@ -1,4 +1,5 @@
name: Terraform Plan
name: Gitea Actions Demo
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on:
push:
@@ -6,56 +7,38 @@ on:
- stage
- test
concurrency:
group: terraform-global
cancel-in-progress: false
jobs:
terraform:
name: "Terraform Plan"
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
env:
TF_VAR_TAILSCALE_KEY: ${{ secrets.TAILSCALE_KEY }}
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
steps:
- name: Checkout repository
uses: https://gitea.com/actions/checkout@v4
- name: Create secrets.tfvars
working-directory: terraform
run: |
echo "PM_API_TOKEN_SECRET length: $(echo -n '${{ secrets.PM_API_TOKEN_SECRET }}' | wc -c)"
cat > secrets.auto.tfvars << EOF
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
EOF
cat > backend.hcl << EOF
bucket = "${{ secrets.B2_TF_BUCKET }}"
key = "terraform.tfstate"
region = "us-east-005"
endpoints = {
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
}
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
skip_credentials_validation = true
skip_metadata_api_check = true
skip_region_validation = true
skip_requesting_account_id = true
use_path_style = true
EOF
echo "Created secrets.auto.tfvars:"
cat secrets.auto.tfvars | sed 's/=.*/=***/'
echo "Using token ID from terraform.tfvars:"
grep '^pm_api_token_id' terraform.tfvars
uses: actions/checkout@v4
- name: Set up Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_version: 1.6.6
terraform_wrapper: false
- name: Inject sensitive secrets
working-directory: terraform
run: |
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
- name: Terraform Init
working-directory: terraform
run: terraform init -reconfigure -backend-config=backend.hcl
run: terraform init
- name: Terraform Format Check
working-directory: terraform
@@ -69,20 +52,9 @@ jobs:
working-directory: terraform
run: terraform plan -out=tfplan
- name: Block accidental destroy
env:
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
working-directory: terraform
run: |
terraform show -json -no-color tfplan > tfplan.json
DESTROY_COUNT=$(python3 -c 'import json; raw=open("tfplan.json","rb").read().decode("utf-8","ignore"); start=raw.find("{"); data=json.JSONDecoder().raw_decode(raw[start:])[0]; print(sum(1 for rc in data.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
echo "Planned deletes: $DESTROY_COUNT"
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
exit 1
fi
- name: Upload Terraform Plan
uses: actions/upload-artifact@v3
with:
name: terraform-plan
path: terraform/tfplan
# NOTE: Disabled artifact upload for now.
# On this Gitea/act runner, post-job hooks from artifact actions can
# fail during "Complete job" even when all Terraform steps succeeded.
# Re-enable once runner/action compatibility is confirmed.

4
.gitignore vendored
View File

@@ -1,6 +1,2 @@
./terraform/.terraform
terraform/.terraform/
terraform/test-apply.sh
terraform/test-plan.sh
terraform/test-destroy.sh
terraform/tfplan

View File

@@ -1,42 +0,0 @@
# Kubeadm Cluster Layout (NixOS)
This folder defines role-based NixOS configs for a kubeadm cluster.
## Topology
- Control planes: `cp-1`, `cp-2`, `cp-3`
- Workers: `wk-1`, `wk-2`, `wk-3`
## What this provides
- Shared Kubernetes/node prerequisites in `modules/k8s-common.nix`
- Role-specific settings for control planes and workers
- Host configs for each node in `hosts/`
## Hardware config files
Each host file optionally imports `hosts/hardware/<host>.nix` if present.
Copy each node's generated hardware config into this folder:
```bash
sudo nixos-generate-config
sudo cp /etc/nixos/hardware-configuration.nix ./hosts/hardware/cp-1.nix
```
Repeat for each node (`cp-2`, `cp-3`, `wk-1`, `wk-2`, `wk-3`).
## Deploy approach
Start from one node at a time while experimenting:
```bash
sudo nixos-rebuild switch --flake .#cp-1
```
For remote target-host workflows, use your preferred deploy wrapper later
(`nixos-rebuild --target-host ...` or deploy-rs/colmena).
## Notes
- This does not run `kubeadm init/join` automatically.
- It prepares OS/runtime/kernel prerequisites so kubeadm bootstrapping is clean.

View File

@@ -1,26 +0,0 @@
{
description = "NixOS kubeadm cluster configs";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
};
outputs = { nixpkgs, ... }:
let
system = "x86_64-linux";
mkHost = hostModules:
nixpkgs.lib.nixosSystem {
inherit system;
modules = hostModules;
};
in {
nixosConfigurations = {
cp-1 = mkHost [ ./hosts/cp-1.nix ];
cp-2 = mkHost [ ./hosts/cp-2.nix ];
cp-3 = mkHost [ ./hosts/cp-3.nix ];
wk-1 = mkHost [ ./hosts/wk-1.nix ];
wk-2 = mkHost [ ./hosts/wk-2.nix ];
wk-3 = mkHost [ ./hosts/wk-3.nix ];
};
};
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-control-plane.nix
]
++ lib.optional (builtins.pathExists ./hardware/cp-1.nix) ./hardware/cp-1.nix;
networking.hostName = "cp-1";
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-control-plane.nix
]
++ lib.optional (builtins.pathExists ./hardware/cp-2.nix) ./hardware/cp-2.nix;
networking.hostName = "cp-2";
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-control-plane.nix
]
++ lib.optional (builtins.pathExists ./hardware/cp-3.nix) ./hardware/cp-3.nix;
networking.hostName = "cp-3";
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-worker.nix
]
++ lib.optional (builtins.pathExists ./hardware/wk-1.nix) ./hardware/wk-1.nix;
networking.hostName = "wk-1";
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-worker.nix
]
++ lib.optional (builtins.pathExists ./hardware/wk-2.nix) ./hardware/wk-2.nix;
networking.hostName = "wk-2";
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{ lib, ... }:
{
imports =
[
../modules/k8s-common.nix
../modules/k8s-worker.nix
]
++ lib.optional (builtins.pathExists ./hardware/wk-3.nix) ./hardware/wk-3.nix;
networking.hostName = "wk-3";
system.stateVersion = "25.05";
}

View File

@@ -1,35 +0,0 @@
{ pkgs, ... }:
{
boot.kernelModules = [ "overlay" "br_netfilter" ];
boot.kernel.sysctl = {
"net.ipv4.ip_forward" = 1;
"net.bridge.bridge-nf-call-iptables" = 1;
"net.bridge.bridge-nf-call-ip6tables" = 1;
};
virtualisation.containerd.enable = true;
services.openssh.enable = true;
services.openssh.settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
environment.systemPackages = with pkgs; [
containerd
cri-tools
cni-plugins
kubernetes
kubectl
kubernetes-helm
conntrack-tools
socat
ethtool
ipvsadm
jq
curl
vim
];
}

View File

@@ -1,14 +0,0 @@
{
networking.firewall.allowedTCPPorts = [
6443
2379
2380
10250
10257
10259
];
networking.firewall.allowedUDPPorts = [
8472
];
}

View File

@@ -1,11 +0,0 @@
{
networking.firewall.allowedTCPPorts = [
10250
30000
32767
];
networking.firewall.allowedUDPPorts = [
8472
];
}

View File

@@ -1,27 +0,0 @@
# NixOS Proxmox Template Base
This folder contains a minimal NixOS base config you can copy into a new
template VM build.
## Files
- `flake.nix`: pins `nixos-24.11` and exposes one host config.
- `configuration.nix`: base settings for Proxmox guest use.
## Before first apply
1. Replace `REPLACE_WITH_YOUR_SSH_PUBLIC_KEY` in `configuration.nix`.
2. Add `hardware-configuration.nix` from the VM install:
- `nixos-generate-config --root /`
- copy `/etc/nixos/hardware-configuration.nix` next to `configuration.nix`
## Build/apply example inside the VM
```bash
sudo nixos-rebuild switch --flake .#template
```
## Notes
- This is intentionally minimal and avoids cloud-init assumptions.
- If you want host-specific settings, create additional modules and import them.

View File

@@ -1,57 +0,0 @@
{ lib, pkgs, ... }:
{
imports =
lib.optional (builtins.pathExists ./hardware-configuration.nix)
./hardware-configuration.nix;
networking.hostName = "nixos-template";
networking.useDHCP = lib.mkDefault true;
networking.nameservers = [ "1.1.1.1" "8.8.8.8" ];
boot.loader.systemd-boot.enable = lib.mkForce false;
boot.loader.grub = {
enable = true;
device = "/dev/sda";
};
services.qemuGuest.enable = true;
services.openssh.enable = true;
services.tailscale.enable = true;
services.openssh.settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
PermitRootLogin = "prohibit-password";
};
programs.fish.enable = true;
users.users.micqdf = {
isNormalUser = true;
extraGroups = [ "wheel" ];
shell = pkgs.fish;
};
security.sudo.wheelNeedsPassword = false;
environment.systemPackages = with pkgs; [
btop
curl
dig
eza
fd
fzf
git
htop
jq
ripgrep
tailscale
tree
unzip
vim
neovim
wget
];
system.stateVersion = "25.05";
}

View File

@@ -1,14 +0,0 @@
{
description = "Base NixOS config for Proxmox template";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
};
outputs = { nixpkgs, ... }: {
nixosConfigurations.template = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [ ./configuration.nix ];
};
};
}

View File

@@ -1,24 +1,79 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/telmate/proxmox" {
version = "3.0.2-rc07"
constraints = "3.0.2-rc07"
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.2"
hashes = [
"h1:zp5hpQJQ4t4zROSLqdltVpBO+Riy9VugtfFbpyTw1aM=",
"zh:2ee860cd0a368b3eaa53f4a9ea46f16dab8a97929e813ea6ef55183f8112c2ca",
"zh:415965fd915bae2040d7f79e45f64d6e3ae61149c10114efeac1b34687d7296c",
"zh:6584b2055df0e32062561c615e3b6b2c291ca8c959440adda09ef3ec1e1436bd",
"zh:65dcfad71928e0a8dd9befc22524ed686be5020b0024dc5cca5184c7420eeb6b",
"zh:7253dc29bd265d33f2791ac4f779c5413f16720bb717de8e6c5fcb2c858648ea",
"zh:7ec8993da10a47606670f9f67cfd10719a7580641d11c7aa761121c4a2bd66fb",
"zh:999a3f7a9dcf517967fc537e6ec930a8172203642fb01b8e1f78f908373db210",
"zh:a50e6df7280eb6584a5fd2456e3f5b6df13b2ec8a7fa4605511e438e1863be42",
"zh:b25b329a1e42681c509d027fee0365414f0cc5062b65690cfc3386aab16132ae",
"zh:c028877fdb438ece48f7bc02b65bbae9ca7b7befbd260e519ccab6c0cbb39f26",
"zh:cf0eaa3ea9fcc6d62793637947f1b8d7c885b6ad74695ab47e134e4ff132190f",
"zh:d5ade3fae031cc629b7c512a7b60e46570f4c41665e88a595d7efd943dde5ab2",
"zh:f388c15ad1ecfc09e7361e3b98bae9b627a3a85f7b908c9f40650969c949901c",
"zh:f415cc6f735a3971faae6ac24034afdb9ee83373ef8de19a9631c187d5adc7db",
"h1:JlMZD6nYqJ8sSrFfEAH0Vk/SL8WLZRmFaMUF9PJK5wM=",
"zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511",
"zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea",
"zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0",
"zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b",
"zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038",
"zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4",
"zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464",
"zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b",
"zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e",
"zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.3"
hashes = [
"h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=",
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
hashes = [
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}
provider "registry.terraform.io/telmate/proxmox" {
version = "3.0.1-rc8"
constraints = "3.0.1-rc8"
hashes = [
"h1:W5X4T5AZUaqO++aAequNECUKJaXLC5upcws6Vp7mkBk=",
"zh:0272f1600251abf9b139c2683f83cde0a907ac762f5ead058b84de18ddc1d78e",
"zh:328e708a8063a133516612b17c8983a9372fa42766530925d1d37aeb1daa30ec",
"zh:3449150e4d57f79af6f9583e93e3a5ab84fb475bc594de75b968534f57af2871",
"zh:58d803a0203241214f673c80350d43ce1a5ce57b21b83ba08d0d08e8c389dcc4",
"zh:59e3e99afc1ea404e530100725403c1610d682cfd27eeeaf35190c119b76a4db",
"zh:666cb7d299824152714202e8fda000c2e37346f2ae6d0a0e3c6f6bd68ef5d9ca",
"zh:6a1290b85e7bf953664b21b2a1ea554923a060f2a8347d8d5bb3d2b5157f85d2",
"zh:72230960c49fe7050a5e80ee10fa24cdac94dbab82744bccb6aa251741eb5aa9",
"zh:91f655c41f5af9a9fdcf6104c3d0a553eaa0fb3390af81051e744f30accd5b52",
"zh:aa08a22bf737d5840573bb6030617ab6bba2a292f4b9c88b20477cdcfb9676a9",
"zh:b72012cc284cad488207532b6668c58999c972d837b5f486db1d7466d686d5fd",
"zh:e24f934249a6ab4d3705c1398226d4d9df1e81ef8a36592389be02bc35cc661f",
"zh:e9e6bcef8b6a6b5ff2317168c2c23e4c55ae23f883ba158d2c4fd6324a0413e5",
"zh:ffa1e742a8c50babd8dbfcd6884740f9bea8453ec4d832717ff006a4fbfffa91",
]
}

70
terraform/cloud-init.tf Normal file
View File

@@ -0,0 +1,70 @@
### Alpaca cloud-init template
data "template_file" "cloud_init_alpaca" {
count = var.alpaca_vm_count
template = file("${path.module}/files/cloud_init.yaml")
vars = {
ssh_key = var.ssh_key
hostname = "alpaca-${count.index + 1}"
domain = "home.arpa"
TS_AUTHKEY = var.TS_AUTHKEY
}
}
resource "local_file" "cloud_init_alpaca" {
count = var.alpaca_vm_count
content = data.template_file.cloud_init_alpaca[count.index].rendered
filename = "${path.module}/files/cloud_init_alpaca_${count.index + 1}.yaml"
}
resource "null_resource" "upload_cloud_init_alpaca" {
count = var.alpaca_vm_count
connection {
type = "ssh"
user = "root"
host = var.target_node
}
provisioner "file" {
source = local_file.cloud_init_alpaca[count.index].filename
destination = "/var/lib/vz/snippets/cloud_init_alpaca_${count.index + 1}.yaml"
}
}
### Llama cloud-init template
data "template_file" "cloud_init_llama" {
count = var.llama_vm_count
template = file("${path.module}/files/cloud_init.yaml")
vars = {
ssh_key = var.ssh_key
hostname = "llama-${count.index + 1}"
domain = "home.arpa"
TS_AUTHKEY = var.TS_AUTHKEY
}
}
resource "local_file" "cloud_init_llama" {
count = var.llama_vm_count
content = data.template_file.cloud_init_llama[count.index].rendered
filename = "${path.module}/files/cloud_init_llama_${count.index + 1}.yaml"
}
resource "null_resource" "upload_cloud_init_llama" {
count = var.llama_vm_count
connection {
type = "ssh"
user = "root"
host = var.target_node
}
provisioner "file" {
source = local_file.cloud_init_llama[count.index].filename
destination = "/var/lib/vz/snippets/cloud_init_llama_${count.index + 1}.yaml"
}
}

View File

@@ -1,9 +1,10 @@
#cloud-config
hostname: ${hostname}
fqdn: ${hostname}.${domain}
ssh_authorized_keys:
- ${ssh_key}
runcmd:
- curl -fsSL https://tailscale.com/install.sh | sh
- tailscale up --auth-key=${TS_AUTHKEY}
- tailscale set --ssh

View File

@@ -1,6 +0,0 @@
#cloud-config
runcmd:
- curl -fsSL https://tailscale.com/install.sh | sh
- tailscale up --auth-key=${TS_AUTHKEY}
- tailscale set --ssh

View File

@@ -1,15 +0,0 @@
#cloud-config
hostname: ${hostname}
manage_etc_hosts: true
resolv_conf:
nameservers:
- 8.8.8.8
- 1.1.1.1
preserve_hostname: false
fqdn: ${hostname}.${domain}
users:
- name: micqdf
ssh_authorized_keys:
- ${SSH_KEY_PUBLIC}

View File

@@ -1,61 +1,42 @@
terraform {
backend "s3" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.2-rc07"
version = "3.0.1-rc8"
}
}
}
provider "proxmox" {
pm_api_url = var.pm_api_url
pm_api_token_id = var.pm_api_token_id
pm_api_token_secret = var.pm_api_token_secret
pm_user = var.pm_user
pm_password = var.proxmox_password
pm_tls_insecure = true
}
resource "proxmox_vm_qemu" "control_planes" {
count = var.control_plane_count
name = "cp-${count.index + 1}"
vmid = var.control_plane_vmid_start + count.index
resource "proxmox_vm_qemu" "alpacas" {
count = var.alpaca_vm_count
name = "alpaca-${count.index + 1}"
vmid = 500 + count.index + 1
target_node = var.target_node
clone = var.clone_template
full_clone = true
os_type = "cloud-init"
full_clone = false
agent = 1
cpu {
sockets = 1
cores = var.control_plane_cores
}
memory = var.control_plane_memory_mb
sockets = var.sockets
cores = var.cores
memory = var.memory
scsihw = "virtio-scsi-pci"
boot = "order=scsi0"
bootdisk = "scsi0"
ipconfig0 = "ip=dhcp"
ciuser = "micqdf"
sshkeys = var.SSH_KEY_PUBLIC
cicustom = "user=local:snippets/cloud_init_alpaca_${count.index + 1}.yaml"
depends_on = [null_resource.upload_cloud_init_alpaca]
disks {
scsi {
scsi0 {
disk {
size = var.control_plane_disk_size
slot = "scsi0"
type = "disk"
storage = var.storage
}
}
}
ide {
ide2 {
cloudinit {
storage = var.storage
}
}
}
size = var.disk_size
}
network {
@@ -66,47 +47,30 @@ resource "proxmox_vm_qemu" "control_planes" {
}
resource "proxmox_vm_qemu" "workers" {
count = var.worker_count
name = "wk-${count.index + 1}"
vmid = var.worker_vmid_start + count.index
resource "proxmox_vm_qemu" "llamas" {
count = var.llama_vm_count
name = "llama-${count.index + 1}"
vmid = 600 + count.index + 1
target_node = var.target_node
clone = var.clone_template
full_clone = true
os_type = "cloud-init"
full_clone = false
agent = 1
cpu {
sockets = 1
cores = var.worker_cores[count.index]
}
memory = var.worker_memory_mb[count.index]
sockets = var.sockets
cores = var.cores
memory = var.memory
scsihw = "virtio-scsi-pci"
boot = "order=scsi0"
bootdisk = "scsi0"
ipconfig0 = "ip=dhcp"
ciuser = "micqdf"
sshkeys = var.SSH_KEY_PUBLIC
cicustom = "user=local:snippets/cloud_init_llama_${count.index + 1}.yaml"
depends_on = [null_resource.upload_cloud_init_llama]
disks {
scsi {
scsi0 {
disk {
size = var.worker_disk_size
slot = "scsi0"
type = "disk"
storage = var.storage
size = var.disk_size
}
}
}
ide {
ide2 {
cloudinit {
storage = var.storage
}
}
}
}
network {
id = 0
@@ -114,3 +78,4 @@ resource "proxmox_vm_qemu" "workers" {
bridge = var.bridge
}
}

View File

@@ -1,21 +1,22 @@
output "control_plane_vm_ids" {
output "alpaca_vm_ids" {
value = {
for i in range(var.control_plane_count) :
"cp-${i + 1}" => proxmox_vm_qemu.control_planes[i].vmid
for i in range(var.alpaca_count) :
"alpaca-${i + 1}" => proxmox_vm_qemu.alpacas[i].vmid
}
}
output "control_plane_vm_names" {
value = [for vm in proxmox_vm_qemu.control_planes : vm.name]
output "alpaca_vm_names" {
value = [for vm in proxmox_vm_qemu.alpacas : vm.name]
}
output "worker_vm_ids" {
output "llama_vm_ids" {
value = {
for i in range(var.worker_count) :
"wk-${i + 1}" => proxmox_vm_qemu.workers[i].vmid
for i in range(var.llama_count) :
"llama-${i + 1}" => proxmox_vm_qemu.llamas[i].vmid
}
}
output "worker_vm_names" {
value = [for vm in proxmox_vm_qemu.workers : vm.name]
output "llama_vm_names" {
value = [for vm in proxmox_vm_qemu.llamas : vm.name]
}

View File

@@ -1,19 +1,13 @@
target_node = "flex"
clone_template = "nixos-template"
clone_template = "Alpine-TemplateV2"
vm_name = "alpine-vm"
cores = 2
memory = 2048
disk_size = "15G"
sockets = 1
bridge = "vmbr0"
disk_type = "scsi"
storage = "Flash"
pm_api_url = "https://100.105.0.115:8006/api2/json"
pm_api_token_id = "terraform-prov@pve!mytoken"
pm_user = "terraform-prov@pve"
control_plane_count = 3
worker_count = 3
control_plane_vmid_start = 701
worker_vmid_start = 711
control_plane_cores = 1
control_plane_memory_mb = 4096
control_plane_disk_size = "40G"
worker_cores = [4, 4, 3]
worker_memory_mb = [12288, 12288, 12288]
worker_disk_size = "60G"

View File

@@ -1,22 +1,5 @@
variable "pm_api_token_id" {
variable "proxmox_password" {
type = string
description = "Proxmox API token ID (format: user@realm!tokenid)"
validation {
condition = can(regex(".+!.+", trimspace(var.pm_api_token_id)))
error_message = "pm_api_token_id must be in format user@realm!tokenid."
}
}
variable "pm_api_token_secret" {
type = string
sensitive = true
description = "Proxmox API token secret"
validation {
condition = length(trimspace(var.pm_api_token_secret)) > 0
error_message = "pm_api_token_secret cannot be empty. Check your Gitea secret PM_API_TOKEN_SECRET."
}
}
variable "target_node" {
@@ -27,70 +10,34 @@ variable "clone_template" {
type = string
}
variable "control_plane_count" {
type = number
default = 3
description = "Number of control plane VMs"
}
variable "worker_count" {
type = number
default = 3
description = "Number of worker VMs"
}
variable "control_plane_vmid_start" {
type = number
default = 701
description = "Starting VMID for control plane VMs"
}
variable "worker_vmid_start" {
type = number
default = 711
description = "Starting VMID for worker VMs"
}
variable "control_plane_cores" {
type = number
default = 1
description = "vCPU cores per control plane VM"
}
variable "control_plane_memory_mb" {
type = number
default = 4096
description = "Memory in MB per control plane VM"
}
variable "worker_cores" {
type = list(number)
default = [4, 4, 3]
description = "vCPU cores for each worker VM"
}
variable "worker_memory_mb" {
type = list(number)
default = [12288, 12288, 12288]
description = "Memory in MB for each worker VM"
}
variable "control_plane_disk_size" {
variable "vm_name" {
type = string
default = "40G"
description = "Disk size for control plane VMs"
}
variable "worker_disk_size" {
variable "cores" {
type = number
}
variable "memory" {
type = number
}
variable "disk_size" {
type = string
default = "60G"
description = "Disk size for worker VMs"
}
variable "sockets" {
type = number
}
variable "bridge" {
type = string
}
variable "disk_type" {
type = string
}
variable "storage" {
type = string
}
@@ -99,7 +46,42 @@ variable "pm_api_url" {
type = string
}
variable "SSH_KEY_PUBLIC" {
variable "pm_user" {
type = string
description = "Public SSH key injected via cloud-init"
}
variable "alpaca_count" {
type = number
default = 1
description = "How many Alpaca VMs to create"
}
variable "llama_count" {
type = number
default = 1
description = "How many Llama VMs to create"
}
variable "alpaca_vm_count" {
type = number
default = 1
description = "How many Alpaca VMs to create"
}
variable "llama_vm_count" {
type = number
default = 1
description = "How many Llama VMs to create"
}
variable "TS_AUTHKEY" {
type = string
description = "Tailscale auth key used in cloud-init"
}
variable "ssh_key" {
type = string
description = "Public SSH key used by cloud-init"
}