Compare commits
131 Commits
destroy/no
...
3539ae9b50
| Author | SHA1 | Date | |
|---|---|---|---|
| 3539ae9b50 | |||
| 5669305e59 | |||
| f341816112 | |||
| c04ef106a3 | |||
| c154ff4d15 | |||
| 8bcc162956 | |||
| b0779c51c0 | |||
| 9fe845b53d | |||
| 885a92f494 | |||
| 91dd20e60e | |||
| abac6300ca | |||
| 7206d8cd41 | |||
| a42d44bb27 | |||
| a99516a2a3 | |||
| 5c69abf9ff | |||
| 5fc8bcc406 | |||
| 16d5a87586 | |||
| 9a02c05983 | |||
| 1304afd793 | |||
| d1dcbe0feb | |||
| df4740071a | |||
| 54c0b684c8 | |||
| 2577669e12 | |||
| dd3a37dfd1 | |||
| 35f0a0dccb | |||
| 583d5c3591 | |||
| 77626ed93c | |||
| a5d5ddb618 | |||
| a5f8d72bff | |||
| 335254b7b2 | |||
| 21be01346b | |||
| ba1884bbc5 | |||
| c516c8ba35 | |||
| 8b8bab77b0 | |||
| 93bba9fbfc | |||
| 6ef807e59c | |||
| 8887a8bb87 | |||
| 32b1fcec58 | |||
| c87bb16f10 | |||
| a891109ee9 | |||
| 0ea9888854 | |||
| 3261b18f37 | |||
| 2d455929bd | |||
| 9740e9c6fb | |||
| f12e15e566 | |||
| b3521d6c02 | |||
| 17834b3aa7 | |||
| 017d5ce00d | |||
| 6fada2f32a | |||
| 510ba707ad | |||
| a2d61d6972 | |||
| 6fbc4dd80f | |||
| 5acb8370cc | |||
| f207f774de | |||
| 1a309cbe4f | |||
| 83d277d144 | |||
| 5e1fd2e9f3 | |||
| 3335020db5 | |||
| 9ce06671c9 | |||
| a7f68c0c4b | |||
| d1a7ccc98c | |||
| afe19041d9 | |||
| c9be2a2fc8 | |||
| 5fc58dfc98 | |||
| 1c4a27bca3 | |||
| 47f950d667 | |||
| b0768db7a7 | |||
| c0dd091b51 | |||
| 595df12b3e | |||
| 735e9df9f1 | |||
| e714a56980 | |||
| 4247d16c24 | |||
| 59fbbb07df | |||
| c3a0ef251c | |||
| 841abb8fe3 | |||
| 364dc6b35b | |||
| 9c1476b6bf | |||
| 4a123e0fb6 | |||
| 5633d18276 | |||
| c6fc9edcc4 | |||
| c8b86c7443 | |||
| 79b535bb59 | |||
| 84e45b4c61 | |||
|
|
080752e8a0 | ||
|
|
f063baa349 | ||
| bada1b69da | |||
|
|
7d04a2c475 | ||
|
|
e04f10c5a3 | ||
|
|
0e7860bfe7 | ||
|
|
0c0cbc5def | ||
|
|
fcdde6cf1f | ||
|
|
524bd92da4 | ||
|
|
ba3fe8e7ff | ||
|
|
724a433d5e | ||
|
|
bfbf0680e2 | ||
|
|
8f1ee24440 | ||
|
|
73dd2e18ff | ||
| 8d9eea6728 | |||
|
|
96f6d94c3a | ||
| 8d49e447e6 | |||
|
|
99f3610a84 | ||
| d634e124a3 | |||
|
|
70b9b5e5b7 | ||
|
|
93d3f94100 | ||
| 70139b2693 | |||
|
|
8773f5026c | ||
| 1b6eca0f69 | |||
|
|
9551e0ad53 | ||
|
|
ffc1c1e785 | ||
| 3e55a72767 | |||
|
|
fcbd6a0b1d | ||
|
|
7227782d4f | ||
|
|
6dec58856e | ||
|
|
437d7ab8d1 | ||
|
|
ac2db5a1cf | ||
|
|
74b2fb8175 | ||
|
|
1acd33cb87 | ||
|
|
f9edeb8be5 | ||
|
|
661fb95830 | ||
|
|
50ae59602c | ||
|
|
507c102dad | ||
| b26ff582a4 | |||
|
|
ec07db08db | ||
| 114bfb9772 | |||
| 5509e14066 | |||
| df088a7903 | |||
| dcec6c3648 | |||
| a0ee1b8a4b | |||
| 39d4e2ac65 | |||
| 6d06cfac02 | |||
| e669353638 |
129
.gitea/workflows/kubeadm-bootstrap.yml
Normal file
129
.gitea/workflows/kubeadm-bootstrap.yml
Normal file
@@ -0,0 +1,129 @@
|
||||
name: Kubeadm Bootstrap
|
||||
run-name: ${{ gitea.actor }} requested kubeadm bootstrap
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
confirm:
|
||||
description: "Type BOOTSTRAP to run rebuild + kubeadm bootstrap"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: kubeadm-bootstrap
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
bootstrap:
|
||||
name: "Rebuild and Bootstrap Cluster"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Validate confirmation phrase
|
||||
run: |
|
||||
if [ "${{ inputs.confirm }}" != "BOOTSTRAP" ]; then
|
||||
echo "Confirmation failed. You must type BOOTSTRAP."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: https://gitea.com/actions/checkout@v4
|
||||
|
||||
- name: Create SSH key
|
||||
run: |
|
||||
install -m 0700 -d ~/.ssh
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.KUBEADM_SSH_PRIVATE_KEY }}")"
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.SSH_KEY_PRIVATE }}")"
|
||||
fi
|
||||
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
echo "Missing SSH private key secret. Set KUBEADM_SSH_PRIVATE_KEY or SSH_KEY_PRIVATE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\n' "$KEY_CONTENT" > ~/.ssh/id_ed25519
|
||||
chmod 0600 ~/.ssh/id_ed25519
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.6.6
|
||||
terraform_wrapper: false
|
||||
|
||||
- name: Build Terraform backend files
|
||||
working-directory: terraform
|
||||
run: |
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
|
||||
EOF
|
||||
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
|
||||
- name: Terraform init for state read
|
||||
working-directory: terraform
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Create kubeadm inventory
|
||||
env:
|
||||
KUBEADM_SSH_USER: ${{ secrets.KUBEADM_SSH_USER }}
|
||||
run: |
|
||||
TF_OUTPUT_JSON="$(terraform -chdir=terraform output -json)"
|
||||
printf '%s' "$TF_OUTPUT_JSON" | ./nixos/kubeadm/scripts/render-inventory-from-tf-output.py > nixos/kubeadm/scripts/inventory.env
|
||||
|
||||
- name: Validate nix installation
|
||||
run: |
|
||||
if [ -x /nix/var/nix/profiles/default/bin/nix ]; then
|
||||
/nix/var/nix/profiles/default/bin/nix --version
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
nix --version
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Nix missing; installing no-daemon Nix for this runner job"
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
nix --version
|
||||
|
||||
- name: Install nixos-rebuild tool
|
||||
env:
|
||||
NIX_CONFIG: experimental-features = nix-command flakes
|
||||
run: |
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
nix profile install nixpkgs#nixos-rebuild
|
||||
|
||||
- name: Run cluster rebuild and bootstrap
|
||||
env:
|
||||
NIX_CONFIG: experimental-features = nix-command flakes
|
||||
PATH: $HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:${{ env.PATH }}
|
||||
run: |
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
./nixos/kubeadm/scripts/rebuild-and-bootstrap.sh
|
||||
91
.gitea/workflows/kubeadm-reset.yml
Normal file
91
.gitea/workflows/kubeadm-reset.yml
Normal file
@@ -0,0 +1,91 @@
|
||||
name: Kubeadm Reset
|
||||
run-name: ${{ gitea.actor }} requested kubeadm reset
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
confirm:
|
||||
description: "Type RESET to run kubeadm reset on all nodes"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: kubeadm-bootstrap
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
reset:
|
||||
name: "Reset Cluster Nodes"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Validate confirmation phrase
|
||||
run: |
|
||||
if [ "${{ inputs.confirm }}" != "RESET" ]; then
|
||||
echo "Confirmation failed. You must type RESET."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: https://gitea.com/actions/checkout@v4
|
||||
|
||||
- name: Create SSH key
|
||||
run: |
|
||||
install -m 0700 -d ~/.ssh
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.KUBEADM_SSH_PRIVATE_KEY }}")"
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.SSH_KEY_PRIVATE }}")"
|
||||
fi
|
||||
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
echo "Missing SSH private key secret. Set KUBEADM_SSH_PRIVATE_KEY or SSH_KEY_PRIVATE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\n' "$KEY_CONTENT" > ~/.ssh/id_ed25519
|
||||
chmod 0600 ~/.ssh/id_ed25519
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.6.6
|
||||
terraform_wrapper: false
|
||||
|
||||
- name: Build Terraform backend files
|
||||
working-directory: terraform
|
||||
run: |
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
|
||||
EOF
|
||||
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
|
||||
- name: Terraform init for state read
|
||||
working-directory: terraform
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Create kubeadm inventory
|
||||
env:
|
||||
KUBEADM_SSH_USER: ${{ secrets.KUBEADM_SSH_USER }}
|
||||
run: |
|
||||
TF_OUTPUT_JSON="$(terraform -chdir=terraform output -json)"
|
||||
printf '%s' "$TF_OUTPUT_JSON" | ./nixos/kubeadm/scripts/render-inventory-from-tf-output.py > nixos/kubeadm/scripts/inventory.env
|
||||
|
||||
- name: Run cluster reset
|
||||
run: |
|
||||
./nixos/kubeadm/scripts/reset-cluster-nodes.sh
|
||||
@@ -1,47 +1,122 @@
|
||||
name: Gitea Actions Demo
|
||||
run-name: ${{ gitea.actor }} is deploying with Terraform 🚀
|
||||
name: Terraform Apply
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: terraform-global
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
terraform:
|
||||
name: "Terraform Apply"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
|
||||
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: https://gitea.com/actions/checkout@v4
|
||||
|
||||
- name: Create secrets.tfvars
|
||||
working-directory: terraform
|
||||
run: |
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
|
||||
EOF
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.6.6
|
||||
|
||||
- name: Inject sensitive secrets
|
||||
working-directory: terraform
|
||||
run: |
|
||||
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
|
||||
terraform_wrapper: false
|
||||
|
||||
- name: Terraform Init
|
||||
working-directory: terraform
|
||||
run: terraform init
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Terraform Plan
|
||||
working-directory: terraform
|
||||
run: terraform plan
|
||||
run: terraform plan -out=tfplan
|
||||
|
||||
- name: Block accidental destroy
|
||||
env:
|
||||
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
|
||||
working-directory: terraform
|
||||
run: |
|
||||
terraform show -json -no-color tfplan > tfplan.json
|
||||
DESTROY_COUNT=$(python3 -c 'import json; raw=open("tfplan.json","rb").read().decode("utf-8","ignore"); start=raw.find("{"); data=json.JSONDecoder().raw_decode(raw[start:])[0]; print(sum(1 for rc in data.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
|
||||
echo "Planned deletes: $DESTROY_COUNT"
|
||||
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
|
||||
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Terraform Apply
|
||||
working-directory: terraform
|
||||
run: terraform apply -auto-approve
|
||||
run: terraform apply -auto-approve tfplan
|
||||
|
||||
- name: Create SSH key
|
||||
run: |
|
||||
install -m 0700 -d ~/.ssh
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.KUBEADM_SSH_PRIVATE_KEY }}")"
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
KEY_CONTENT="$(printf '%s' "${{ secrets.SSH_KEY_PRIVATE }}")"
|
||||
fi
|
||||
|
||||
if [ -z "$KEY_CONTENT" ]; then
|
||||
echo "Missing SSH private key secret. Set KUBEADM_SSH_PRIVATE_KEY or SSH_KEY_PRIVATE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\n' "$KEY_CONTENT" > ~/.ssh/id_ed25519
|
||||
chmod 0600 ~/.ssh/id_ed25519
|
||||
|
||||
- name: Create kubeadm inventory from Terraform outputs
|
||||
env:
|
||||
KUBEADM_SSH_USER: ${{ secrets.KUBEADM_SSH_USER }}
|
||||
run: |
|
||||
TF_OUTPUT_JSON="$(terraform -chdir=terraform output -json)"
|
||||
printf '%s' "$TF_OUTPUT_JSON" | ./nixos/kubeadm/scripts/render-inventory-from-tf-output.py > nixos/kubeadm/scripts/inventory.env
|
||||
|
||||
- name: Ensure nix and nixos-rebuild
|
||||
env:
|
||||
NIX_CONFIG: experimental-features = nix-command flakes
|
||||
run: |
|
||||
if [ ! -x /nix/var/nix/profiles/default/bin/nix ] && ! command -v nix >/dev/null 2>&1; then
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
nix --version
|
||||
nix profile install nixpkgs#nixos-rebuild
|
||||
|
||||
- name: Rebuild and bootstrap/reconcile kubeadm cluster
|
||||
env:
|
||||
NIX_CONFIG: experimental-features = nix-command flakes
|
||||
PATH: $HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:${{ env.PATH }}
|
||||
run: |
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
./nixos/kubeadm/scripts/rebuild-and-bootstrap.sh
|
||||
|
||||
@@ -1,41 +1,111 @@
|
||||
name: Gitea Destroy Terraform
|
||||
run-name: ${{ gitea.actor }} triggered a Terraform Destroy 🧨
|
||||
name: Terraform Destroy
|
||||
run-name: ${{ gitea.actor }} requested Terraform destroy
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Manual trigger
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
confirm:
|
||||
description: "Type NUKE to confirm destroy"
|
||||
required: true
|
||||
type: string
|
||||
target:
|
||||
description: "Destroy scope"
|
||||
required: true
|
||||
default: all
|
||||
type: choice
|
||||
options:
|
||||
- all
|
||||
- control-planes
|
||||
- workers
|
||||
|
||||
concurrency:
|
||||
group: terraform-global
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
destroy:
|
||||
name: "Terraform Destroy"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
|
||||
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
|
||||
|
||||
steps:
|
||||
- name: Validate confirmation phrase
|
||||
run: |
|
||||
if [ "${{ inputs.confirm }}" != "NUKE" ]; then
|
||||
echo "Confirmation failed. You must type NUKE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: https://gitea.com/actions/checkout@v4
|
||||
|
||||
- name: Create Terraform secret files
|
||||
working-directory: terraform
|
||||
run: |
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
|
||||
EOF
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.6.6
|
||||
|
||||
- name: Inject sensitive secrets
|
||||
working-directory: terraform
|
||||
run: |
|
||||
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
|
||||
terraform_wrapper: false
|
||||
|
||||
- name: Terraform Init
|
||||
working-directory: terraform
|
||||
run: terraform init
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Terraform Destroy
|
||||
- name: Terraform Destroy Plan
|
||||
working-directory: terraform
|
||||
run: terraform destroy -auto-approve
|
||||
run: |
|
||||
case "${{ inputs.target }}" in
|
||||
all)
|
||||
terraform plan -destroy -out=tfdestroy
|
||||
;;
|
||||
control-planes)
|
||||
terraform plan -destroy -target=proxmox_vm_qemu.control_planes -out=tfdestroy
|
||||
;;
|
||||
workers)
|
||||
terraform plan -destroy -target=proxmox_vm_qemu.workers -out=tfdestroy
|
||||
;;
|
||||
*)
|
||||
echo "Invalid destroy target: ${{ inputs.target }}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Terraform Destroy Apply
|
||||
working-directory: terraform
|
||||
run: |
|
||||
set +e
|
||||
terraform apply -auto-approve tfdestroy 2>&1 | tee destroy-apply.log
|
||||
APPLY_EXIT=${PIPESTATUS[0]}
|
||||
|
||||
if [ "$APPLY_EXIT" -ne 0 ] && [ -f errored.tfstate ] && grep -q "Failed to persist state to backend" destroy-apply.log; then
|
||||
echo "Detected backend state write failure after destroy; attempting recovery push..."
|
||||
terraform state push errored.tfstate
|
||||
PUSH_EXIT=$?
|
||||
|
||||
if [ "$PUSH_EXIT" -eq 0 ]; then
|
||||
echo "Recovered by pushing errored.tfstate to backend."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "$APPLY_EXIT"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
name: Gitea Actions Demo
|
||||
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
|
||||
name: Terraform Plan
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -7,38 +6,56 @@ on:
|
||||
- stage
|
||||
- test
|
||||
|
||||
concurrency:
|
||||
group: terraform-global
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
terraform:
|
||||
name: "Terraform Plan"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
TF_VAR_TAILSCALE_KEY: ${{ secrets.TAILSCALE_KEY }}
|
||||
TF_VAR_TS_AUTHKEY: ${{ secrets.TAILSCALE_KEY }}
|
||||
TF_VAR_ssh_key: ${{ secrets.SSH_PUBLIC_KEY }}
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: https://gitea.com/actions/checkout@v4
|
||||
|
||||
- name: Create secrets.tfvars
|
||||
working-directory: terraform
|
||||
run: |
|
||||
echo "PM_API_TOKEN_SECRET length: $(echo -n '${{ secrets.PM_API_TOKEN_SECRET }}' | wc -c)"
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
SSH_KEY_PUBLIC = "$(printf '%s' "${{ secrets.SSH_KEY_PUBLIC }}" | tr -d '\r\n')"
|
||||
EOF
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
echo "Created secrets.auto.tfvars:"
|
||||
cat secrets.auto.tfvars | sed 's/=.*/=***/'
|
||||
echo "Using token ID from terraform.tfvars:"
|
||||
grep '^pm_api_token_id' terraform.tfvars
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.6.6
|
||||
|
||||
- name: Inject sensitive secrets
|
||||
working-directory: terraform
|
||||
run: |
|
||||
echo 'proxmox_password = "${{ secrets.PROXMOX_PASSWORD }}"' >> terraform.tfvars
|
||||
terraform_wrapper: false
|
||||
|
||||
- name: Terraform Init
|
||||
working-directory: terraform
|
||||
run: terraform init
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Terraform Format Check
|
||||
working-directory: terraform
|
||||
@@ -52,9 +69,20 @@ jobs:
|
||||
working-directory: terraform
|
||||
run: terraform plan -out=tfplan
|
||||
|
||||
- name: Upload Terraform Plan
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: terraform-plan
|
||||
path: terraform/tfplan
|
||||
- name: Block accidental destroy
|
||||
env:
|
||||
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
|
||||
working-directory: terraform
|
||||
run: |
|
||||
terraform show -json -no-color tfplan > tfplan.json
|
||||
DESTROY_COUNT=$(python3 -c 'import json; raw=open("tfplan.json","rb").read().decode("utf-8","ignore"); start=raw.find("{"); data=json.JSONDecoder().raw_decode(raw[start:])[0]; print(sum(1 for rc in data.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
|
||||
echo "Planned deletes: $DESTROY_COUNT"
|
||||
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
|
||||
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# NOTE: Disabled artifact upload for now.
|
||||
# On this Gitea/act runner, post-job hooks from artifact actions can
|
||||
# fail during "Complete job" even when all Terraform steps succeeded.
|
||||
# Re-enable once runner/action compatibility is confirmed.
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,6 @@
|
||||
./terraform/.terraform
|
||||
terraform/.terraform/
|
||||
terraform/test-apply.sh
|
||||
terraform/test-plan.sh
|
||||
terraform/test-destroy.sh
|
||||
terraform/tfplan
|
||||
|
||||
148
nixos/kubeadm/README.md
Normal file
148
nixos/kubeadm/README.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Kubeadm Cluster Layout (NixOS)
|
||||
|
||||
This folder defines role-based NixOS configs for a kubeadm cluster.
|
||||
|
||||
## Topology
|
||||
|
||||
- Control planes: `cp-1`, `cp-2`, `cp-3`
|
||||
- Workers: `wk-1`, `wk-2`, `wk-3`
|
||||
|
||||
## What this provides
|
||||
|
||||
- Shared Kubernetes/node prerequisites in `modules/k8s-common.nix`
|
||||
- Shared cluster defaults in `modules/k8s-cluster-settings.nix`
|
||||
- Role-specific settings for control planes and workers
|
||||
- Generated per-node host configs from `flake.nix` (no duplicated host files)
|
||||
- Bootstrap helper commands:
|
||||
- `th-kubeadm-init`
|
||||
- `th-kubeadm-join-control-plane`
|
||||
- `th-kubeadm-join-worker`
|
||||
- `th-kubeadm-status`
|
||||
|
||||
## Hardware config files
|
||||
|
||||
The flake automatically imports `hosts/hardware/<host>.nix` if present.
|
||||
Copy each node's generated hardware config into this folder:
|
||||
|
||||
```bash
|
||||
sudo nixos-generate-config
|
||||
sudo cp /etc/nixos/hardware-configuration.nix ./hosts/hardware/cp-1.nix
|
||||
```
|
||||
|
||||
Repeat for each node (`cp-2`, `cp-3`, `wk-1`, `wk-2`, `wk-3`).
|
||||
|
||||
## Deploy approach
|
||||
|
||||
Start from one node at a time while experimenting:
|
||||
|
||||
```bash
|
||||
sudo nixos-rebuild switch --flake .#cp-1
|
||||
```
|
||||
|
||||
For remote target-host workflows, use your preferred deploy wrapper later
|
||||
(`nixos-rebuild --target-host ...` or deploy-rs/colmena).
|
||||
|
||||
## Bootstrap runbook (kubeadm + kube-vip + Cilium)
|
||||
|
||||
1. Apply Nix config on all nodes (`cp-*`, then `wk-*`).
|
||||
2. On `cp-1`, run:
|
||||
|
||||
```bash
|
||||
sudo th-kubeadm-init
|
||||
```
|
||||
|
||||
This infers the control-plane VIP as `<node-subnet>.250` on `eth0`, creates the
|
||||
kube-vip static pod manifest, and runs `kubeadm init`.
|
||||
|
||||
3. Install Cilium from `cp-1`:
|
||||
|
||||
```bash
|
||||
helm repo add cilium https://helm.cilium.io
|
||||
helm repo update
|
||||
helm upgrade --install cilium cilium/cilium \
|
||||
--namespace kube-system \
|
||||
--set kubeProxyReplacement=true
|
||||
```
|
||||
|
||||
4. Generate join commands on `cp-1`:
|
||||
|
||||
```bash
|
||||
sudo kubeadm token create --print-join-command
|
||||
sudo kubeadm init phase upload-certs --upload-certs
|
||||
```
|
||||
|
||||
5. Join `cp-2` and `cp-3`:
|
||||
|
||||
```bash
|
||||
sudo th-kubeadm-join-control-plane '<kubeadm join ... --control-plane --certificate-key ...>'
|
||||
```
|
||||
|
||||
6. Join workers:
|
||||
|
||||
```bash
|
||||
sudo th-kubeadm-join-worker '<kubeadm join ...>'
|
||||
```
|
||||
|
||||
7. Validate from a control plane:
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
kubectl -n kube-system get pods -o wide
|
||||
```
|
||||
|
||||
## Repeatable rebuild flow (recommended)
|
||||
|
||||
1. Copy and edit inventory:
|
||||
|
||||
```bash
|
||||
cp ./scripts/inventory.example.env ./scripts/inventory.env
|
||||
$EDITOR ./scripts/inventory.env
|
||||
```
|
||||
|
||||
2. Rebuild all nodes and bootstrap/reconcile cluster:
|
||||
|
||||
```bash
|
||||
./scripts/rebuild-and-bootstrap.sh
|
||||
```
|
||||
|
||||
3. If you only want to reset Kubernetes state on existing VMs:
|
||||
|
||||
```bash
|
||||
./scripts/reset-cluster-nodes.sh
|
||||
```
|
||||
|
||||
For a full nuke/recreate lifecycle:
|
||||
- run Terraform destroy/apply for VMs first,
|
||||
- then run `./scripts/rebuild-and-bootstrap.sh` again.
|
||||
|
||||
Node lists are discovered from Terraform outputs, so adding new workers/control
|
||||
planes in Terraform is picked up automatically by the bootstrap/reconcile flow.
|
||||
|
||||
## Optional Gitea workflow automation
|
||||
|
||||
Primary flow:
|
||||
|
||||
- Push to `master` triggers `.gitea/workflows/terraform-apply.yml`
|
||||
- That workflow now does Terraform apply and then runs kubeadm rebuild/bootstrap reconciliation automatically
|
||||
|
||||
Manual dispatch workflows are available:
|
||||
|
||||
- `.gitea/workflows/kubeadm-bootstrap.yml`
|
||||
- `.gitea/workflows/kubeadm-reset.yml`
|
||||
|
||||
Required repository secrets:
|
||||
|
||||
- Existing Terraform/backend secrets used by current workflows (`B2_*`, `PM_API_TOKEN_SECRET`, `SSH_KEY_PUBLIC`)
|
||||
- SSH private key: prefer `KUBEADM_SSH_PRIVATE_KEY`, fallback to existing `SSH_KEY_PRIVATE`
|
||||
|
||||
Optional secrets:
|
||||
|
||||
- `KUBEADM_SSH_USER` (defaults to `micqdf`)
|
||||
|
||||
Node IPs are auto-discovered from Terraform state outputs (`control_plane_vm_ipv4`, `worker_vm_ipv4`), so you do not need per-node IP secrets.
|
||||
|
||||
## Notes
|
||||
|
||||
- Scripts are intentionally manual-triggered (predictable for homelab bring-up).
|
||||
- If `.250` on the node subnet is already in use, change `controlPlaneVipSuffix`
|
||||
in `modules/k8s-cluster-settings.nix` before bootstrap.
|
||||
27
nixos/kubeadm/flake.lock
generated
Normal file
27
nixos/kubeadm/flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1767313136,
|
||||
"narHash": "sha256-16KkgfdYqjaeRGBaYsNrhPRRENs0qzkQVUooNHtoy2w=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ac62194c3917d5f474c1a844b6fd6da2db95077d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
77
nixos/kubeadm/flake.nix
Normal file
77
nixos/kubeadm/flake.nix
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
description = "NixOS kubeadm cluster configs";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
};
|
||||
|
||||
outputs = { nixpkgs, ... }:
|
||||
let
|
||||
system = "x86_64-linux";
|
||||
lib = nixpkgs.lib;
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
nodeNames = [ "cp-1" "cp-2" "cp-3" "wk-1" "wk-2" "wk-3" ];
|
||||
|
||||
mkNode = {
|
||||
name,
|
||||
role,
|
||||
extraModules ? [ ],
|
||||
}:
|
||||
let
|
||||
roleModule = if role == "control-plane" then ./modules/k8s-control-plane.nix else ./modules/k8s-worker.nix;
|
||||
hardwarePath = ./hosts/hardware + "/${name}.nix";
|
||||
in
|
||||
nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
modules = [
|
||||
./modules/k8s-cluster-settings.nix
|
||||
./modules/k8s-common.nix
|
||||
roleModule
|
||||
({ lib, ... }: {
|
||||
imports = lib.optional (builtins.pathExists hardwarePath) hardwarePath;
|
||||
networking.hostName = name;
|
||||
system.stateVersion = "25.05";
|
||||
boot.loader.grub.devices = lib.mkDefault [ "/dev/sda" ];
|
||||
fileSystems."/" = lib.mkDefault {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
fsType = "ext4";
|
||||
};
|
||||
})
|
||||
] ++ extraModules;
|
||||
};
|
||||
|
||||
mkNodeByName = name:
|
||||
mkNode {
|
||||
inherit name;
|
||||
role = if lib.hasPrefix "cp-" name then "control-plane" else "worker";
|
||||
};
|
||||
|
||||
mkEvalCheck = name:
|
||||
let
|
||||
cfg = mkNode {
|
||||
inherit name;
|
||||
role = if lib.hasPrefix "cp-" name then "control-plane" else "worker";
|
||||
extraModules = [
|
||||
({ lib, ... }: {
|
||||
boot.loader.grub.devices = lib.mkDefault [ "/dev/sda" ];
|
||||
fileSystems."/" = lib.mkDefault {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
fsType = "ext4";
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.runCommand "eval-${name}" { } ''
|
||||
cat > "$out" <<'EOF'
|
||||
host=${cfg.config.networking.hostName}
|
||||
role=${if lib.hasPrefix "cp-" name then "control-plane" else "worker"}
|
||||
stateVersion=${cfg.config.system.stateVersion}
|
||||
EOF
|
||||
'';
|
||||
in {
|
||||
nixosConfigurations = lib.genAttrs nodeNames mkNodeByName;
|
||||
|
||||
checks.${system} = lib.genAttrs nodeNames mkEvalCheck;
|
||||
};
|
||||
}
|
||||
0
nixos/kubeadm/hosts/hardware/.gitkeep
Normal file
0
nixos/kubeadm/hosts/hardware/.gitkeep
Normal file
12
nixos/kubeadm/modules/k8s-cluster-settings.nix
Normal file
12
nixos/kubeadm/modules/k8s-cluster-settings.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
terrahome.kubeadm = {
|
||||
k8sMinor = "1.31";
|
||||
controlPlaneInterface = "eth0";
|
||||
controlPlaneVipSuffix = 250;
|
||||
podSubnet = "10.244.0.0/16";
|
||||
serviceSubnet = "10.96.0.0/12";
|
||||
clusterDomain = "cluster.local";
|
||||
};
|
||||
}
|
||||
202
nixos/kubeadm/modules/k8s-common.nix
Normal file
202
nixos/kubeadm/modules/k8s-common.nix
Normal file
@@ -0,0 +1,202 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
pinnedK8s = lib.attrByPath [ "kubernetes_1_31" ] pkgs.kubernetes pkgs;
|
||||
kubeVipImage = "ghcr.io/kube-vip/kube-vip:v0.8.9";
|
||||
in
|
||||
{
|
||||
options.terrahome.kubeadm = {
|
||||
k8sMinor = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "1.31";
|
||||
};
|
||||
|
||||
controlPlaneInterface = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "eth0";
|
||||
};
|
||||
|
||||
controlPlaneVipSuffix = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 250;
|
||||
};
|
||||
|
||||
podSubnet = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "10.244.0.0/16";
|
||||
};
|
||||
|
||||
serviceSubnet = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "10.96.0.0/12";
|
||||
};
|
||||
|
||||
clusterDomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "cluster.local";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
boot.kernelModules = [ "overlay" "br_netfilter" ];
|
||||
|
||||
boot.kernel.sysctl = {
|
||||
"net.ipv4.ip_forward" = 1;
|
||||
"net.bridge.bridge-nf-call-iptables" = 1;
|
||||
"net.bridge.bridge-nf-call-ip6tables" = 1;
|
||||
};
|
||||
|
||||
virtualisation.containerd.enable = true;
|
||||
virtualisation.containerd.settings = {
|
||||
plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options.SystemdCgroup = true;
|
||||
};
|
||||
|
||||
swapDevices = lib.mkForce [ ];
|
||||
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
};
|
||||
|
||||
environment.variables = {
|
||||
KUBECONFIG = "/etc/kubernetes/admin.conf";
|
||||
KUBE_VIP_IMAGE = kubeVipImage;
|
||||
};
|
||||
|
||||
environment.systemPackages = (with pkgs; [
|
||||
containerd
|
||||
cri-tools
|
||||
cni-plugins
|
||||
pinnedK8s
|
||||
kubernetes-helm
|
||||
conntrack-tools
|
||||
socat
|
||||
ethtool
|
||||
ipvsadm
|
||||
iproute2
|
||||
iptables
|
||||
ebtables
|
||||
jq
|
||||
curl
|
||||
vim
|
||||
gawk
|
||||
]) ++ [
|
||||
(pkgs.writeShellScriptBin "th-kubeadm-init" ''
|
||||
set -euo pipefail
|
||||
|
||||
iface="${config.terrahome.kubeadm.controlPlaneInterface}"
|
||||
suffix="${toString config.terrahome.kubeadm.controlPlaneVipSuffix}"
|
||||
pod_subnet="${config.terrahome.kubeadm.podSubnet}"
|
||||
service_subnet="${config.terrahome.kubeadm.serviceSubnet}"
|
||||
domain="${config.terrahome.kubeadm.clusterDomain}"
|
||||
|
||||
local_ip_cidr=$(ip -4 -o addr show dev "$iface" | awk 'NR==1 {print $4}')
|
||||
if [ -z "''${local_ip_cidr:-}" ]; then
|
||||
echo "Could not determine IPv4 CIDR on interface $iface"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
subnet_prefix=$(echo "$local_ip_cidr" | cut -d/ -f1 | awk -F. '{print $1"."$2"."$3}')
|
||||
vip="$subnet_prefix.$suffix"
|
||||
|
||||
echo "Using control-plane endpoint: $vip:6443"
|
||||
echo "Using kube-vip interface: $iface"
|
||||
|
||||
mkdir -p /etc/kubernetes/manifests
|
||||
ctr image pull "$KUBE_VIP_IMAGE"
|
||||
|
||||
ctr run --rm --net-host "$KUBE_VIP_IMAGE" kube-vip /kube-vip manifest pod \
|
||||
--interface "$iface" \
|
||||
--address "$vip" \
|
||||
--controlplane \
|
||||
--services \
|
||||
--arp \
|
||||
--leaderElection \
|
||||
> /etc/kubernetes/manifests/kube-vip.yaml
|
||||
|
||||
kubeadm init \
|
||||
--control-plane-endpoint "$vip:6443" \
|
||||
--upload-certs \
|
||||
--pod-network-cidr "$pod_subnet" \
|
||||
--service-cidr "$service_subnet" \
|
||||
--service-dns-domain "$domain"
|
||||
|
||||
mkdir -p /root/.kube
|
||||
cp /etc/kubernetes/admin.conf /root/.kube/config
|
||||
chmod 600 /root/.kube/config
|
||||
|
||||
echo
|
||||
echo "Next: install Cilium, then generate join commands:"
|
||||
echo " kubeadm token create --print-join-command"
|
||||
echo " kubeadm token create --print-join-command --certificate-key <key>"
|
||||
'')
|
||||
|
||||
(pkgs.writeShellScriptBin "th-kubeadm-join-control-plane" ''
|
||||
set -euo pipefail
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "Usage: th-kubeadm-join-control-plane '<kubeadm join ... --control-plane --certificate-key ...>'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
iface="${config.terrahome.kubeadm.controlPlaneInterface}"
|
||||
suffix="${toString config.terrahome.kubeadm.controlPlaneVipSuffix}"
|
||||
local_ip_cidr=$(ip -4 -o addr show dev "$iface" | awk 'NR==1 {print $4}')
|
||||
if [ -z "''${local_ip_cidr:-}" ]; then
|
||||
echo "Could not determine IPv4 CIDR on interface $iface"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
subnet_prefix=$(echo "$local_ip_cidr" | cut -d/ -f1 | awk -F. '{print $1"."$2"."$3}')
|
||||
vip="$subnet_prefix.$suffix"
|
||||
|
||||
mkdir -p /etc/kubernetes/manifests
|
||||
ctr image pull "$KUBE_VIP_IMAGE"
|
||||
ctr run --rm --net-host "$KUBE_VIP_IMAGE" kube-vip /kube-vip manifest pod \
|
||||
--interface "$iface" \
|
||||
--address "$vip" \
|
||||
--controlplane \
|
||||
--services \
|
||||
--arp \
|
||||
--leaderElection \
|
||||
> /etc/kubernetes/manifests/kube-vip.yaml
|
||||
|
||||
eval "$1"
|
||||
'')
|
||||
|
||||
(pkgs.writeShellScriptBin "th-kubeadm-join-worker" ''
|
||||
set -euo pipefail
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "Usage: th-kubeadm-join-worker '<kubeadm join ...>'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval "$1"
|
||||
'')
|
||||
|
||||
(pkgs.writeShellScriptBin "th-kubeadm-status" ''
|
||||
set -euo pipefail
|
||||
systemctl is-active containerd || true
|
||||
systemctl is-active kubelet || true
|
||||
crictl info >/dev/null && echo "crictl: ok" || echo "crictl: not-ready"
|
||||
'')
|
||||
];
|
||||
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "containerd.service" "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pinnedK8s}/bin/kubelet";
|
||||
Restart = "always";
|
||||
RestartSec = "10";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /etc/kubernetes 0755 root root -"
|
||||
"d /etc/kubernetes/manifests 0755 root root -"
|
||||
];
|
||||
};
|
||||
}
|
||||
14
nixos/kubeadm/modules/k8s-control-plane.nix
Normal file
14
nixos/kubeadm/modules/k8s-control-plane.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
6443
|
||||
2379
|
||||
2380
|
||||
10250
|
||||
10257
|
||||
10259
|
||||
];
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
8472
|
||||
];
|
||||
}
|
||||
11
nixos/kubeadm/modules/k8s-worker.nix
Normal file
11
nixos/kubeadm/modules/k8s-worker.nix
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
10250
|
||||
30000
|
||||
32767
|
||||
];
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
8472
|
||||
];
|
||||
}
|
||||
7
nixos/kubeadm/scripts/inventory.example.env
Normal file
7
nixos/kubeadm/scripts/inventory.example.env
Normal file
@@ -0,0 +1,7 @@
|
||||
SSH_USER=micqdf
|
||||
PRIMARY_CONTROL_PLANE=cp-1
|
||||
|
||||
# Name=IP pairs (space-separated)
|
||||
CONTROL_PLANES="cp-1=192.168.1.101 cp-2=192.168.1.102 cp-3=192.168.1.103"
|
||||
|
||||
WORKERS="wk-1=192.168.1.111 wk-2=192.168.1.112 wk-3=192.168.1.113"
|
||||
174
nixos/kubeadm/scripts/rebuild-and-bootstrap.sh
Executable file
174
nixos/kubeadm/scripts/rebuild-and-bootstrap.sh
Executable file
@@ -0,0 +1,174 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
FLAKE_DIR="${FLAKE_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
|
||||
INVENTORY_FILE="${1:-$SCRIPT_DIR/inventory.env}"
|
||||
|
||||
if [ ! -f "$INVENTORY_FILE" ]; then
|
||||
echo "Missing inventory file: $INVENTORY_FILE"
|
||||
echo "Copy $SCRIPT_DIR/inventory.example.env to $SCRIPT_DIR/inventory.env and edit node mappings."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$INVENTORY_FILE"
|
||||
|
||||
SSH_USER="${SSH_USER:-micqdf}"
|
||||
SSH_OPTS="${SSH_OPTS:- -o BatchMode=yes -o StrictHostKeyChecking=accept-new }"
|
||||
|
||||
declare -A NODE_IPS=()
|
||||
declare -a CP_NAMES=()
|
||||
declare -a WK_NAMES=()
|
||||
|
||||
add_node_pair() {
|
||||
local role="$1"
|
||||
local pair="$2"
|
||||
local name="${pair%%=*}"
|
||||
local ip="${pair#*=}"
|
||||
|
||||
if [ -z "$name" ] || [ -z "$ip" ] || [ "$name" = "$ip" ]; then
|
||||
echo "Invalid node pair '$pair' (expected name=ip)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_IPS["$name"]="$ip"
|
||||
if [ "$role" = "cp" ]; then
|
||||
CP_NAMES+=("$name")
|
||||
else
|
||||
WK_NAMES+=("$name")
|
||||
fi
|
||||
}
|
||||
|
||||
populate_nodes() {
|
||||
if [ -n "${CONTROL_PLANES:-}" ]; then
|
||||
for pair in $CONTROL_PLANES; do
|
||||
add_node_pair "cp" "$pair"
|
||||
done
|
||||
else
|
||||
while IFS= read -r var_name; do
|
||||
idx="${var_name#CP_}"
|
||||
add_node_pair "cp" "cp-$idx=${!var_name}"
|
||||
done < <(compgen -A variable | grep -E '^CP_[0-9]+$' | sort -V)
|
||||
fi
|
||||
|
||||
if [ -n "${WORKERS:-}" ]; then
|
||||
for pair in $WORKERS; do
|
||||
add_node_pair "wk" "$pair"
|
||||
done
|
||||
else
|
||||
while IFS= read -r var_name; do
|
||||
idx="${var_name#WK_}"
|
||||
add_node_pair "wk" "wk-$idx=${!var_name}"
|
||||
done < <(compgen -A variable | grep -E '^WK_[0-9]+$' | sort -V)
|
||||
fi
|
||||
|
||||
if [ "${#CP_NAMES[@]}" -eq 0 ]; then
|
||||
echo "No control planes found in inventory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${#WK_NAMES[@]}" -eq 0 ]; then
|
||||
echo "No workers found in inventory."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
remote() {
|
||||
local host_ip="$1"
|
||||
local cmd="$2"
|
||||
ssh $SSH_OPTS "$SSH_USER@$host_ip" "$cmd"
|
||||
}
|
||||
|
||||
cluster_has_node() {
|
||||
local node_name="$1"
|
||||
remote "$PRIMARY_CP_IP" "sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get node $node_name >/dev/null 2>&1"
|
||||
}
|
||||
|
||||
cluster_ready() {
|
||||
remote "$PRIMARY_CP_IP" "test -f /etc/kubernetes/admin.conf && sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes >/dev/null 2>&1"
|
||||
}
|
||||
|
||||
rebuild_node() {
|
||||
local node_name="$1"
|
||||
local node_ip="$2"
|
||||
|
||||
echo "==> Rebuilding $node_name on $node_ip"
|
||||
nixos-rebuild switch \
|
||||
--flake "$FLAKE_DIR#$node_name" \
|
||||
--target-host "$SSH_USER@$node_ip" \
|
||||
--use-remote-sudo
|
||||
}
|
||||
|
||||
populate_nodes
|
||||
|
||||
PRIMARY_CONTROL_PLANE="${PRIMARY_CONTROL_PLANE:-cp-1}"
|
||||
if [ -z "${NODE_IPS[$PRIMARY_CONTROL_PLANE]:-}" ]; then
|
||||
PRIMARY_CONTROL_PLANE="${CP_NAMES[0]}"
|
||||
fi
|
||||
PRIMARY_CP_IP="${NODE_IPS[$PRIMARY_CONTROL_PLANE]}"
|
||||
|
||||
for node in "${CP_NAMES[@]}"; do
|
||||
rebuild_node "$node" "${NODE_IPS[$node]}"
|
||||
done
|
||||
|
||||
for node in "${WK_NAMES[@]}"; do
|
||||
rebuild_node "$node" "${NODE_IPS[$node]}"
|
||||
done
|
||||
|
||||
echo "==> Initializing control plane on $PRIMARY_CONTROL_PLANE"
|
||||
if cluster_ready; then
|
||||
echo "==> Existing cluster detected on $PRIMARY_CONTROL_PLANE; skipping kubeadm init"
|
||||
else
|
||||
remote "$PRIMARY_CP_IP" "sudo th-kubeadm-init"
|
||||
|
||||
echo "==> Installing Cilium on $PRIMARY_CONTROL_PLANE"
|
||||
remote "$PRIMARY_CP_IP" "helm repo add cilium https://helm.cilium.io >/dev/null 2>&1 || true"
|
||||
remote "$PRIMARY_CP_IP" "helm repo update >/dev/null"
|
||||
remote "$PRIMARY_CP_IP" "kubectl create namespace kube-system >/dev/null 2>&1 || true"
|
||||
remote "$PRIMARY_CP_IP" "helm upgrade --install cilium cilium/cilium --namespace kube-system --set kubeProxyReplacement=true"
|
||||
fi
|
||||
|
||||
echo "==> Building kubeadm join commands"
|
||||
JOIN_CMD="$(remote "$PRIMARY_CP_IP" "sudo kubeadm token create --print-join-command")"
|
||||
CERT_KEY="$(remote "$PRIMARY_CP_IP" "sudo kubeadm init phase upload-certs --upload-certs | tail -n 1")"
|
||||
CP_JOIN_CMD="$JOIN_CMD --control-plane --certificate-key $CERT_KEY"
|
||||
|
||||
join_control_plane() {
|
||||
local node_ip="$1"
|
||||
local encoded
|
||||
encoded="$(printf '%s' "$CP_JOIN_CMD" | base64 -w0)"
|
||||
remote "$node_ip" "sudo th-kubeadm-join-control-plane \"\$(echo $encoded | base64 -d)\""
|
||||
}
|
||||
|
||||
join_worker() {
|
||||
local node_ip="$1"
|
||||
local encoded
|
||||
encoded="$(printf '%s' "$JOIN_CMD" | base64 -w0)"
|
||||
remote "$node_ip" "sudo th-kubeadm-join-worker \"\$(echo $encoded | base64 -d)\""
|
||||
}
|
||||
|
||||
echo "==> Joining remaining control planes"
|
||||
for node in "${CP_NAMES[@]}"; do
|
||||
if [ "$node" = "$PRIMARY_CONTROL_PLANE" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if cluster_has_node "$node"; then
|
||||
echo "$node already joined; skipping"
|
||||
else
|
||||
join_control_plane "${NODE_IPS[$node]}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "==> Joining workers"
|
||||
for node in "${WK_NAMES[@]}"; do
|
||||
if cluster_has_node "$node"; then
|
||||
echo "$node already joined; skipping"
|
||||
else
|
||||
join_worker "${NODE_IPS[$node]}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "==> Final node list"
|
||||
remote "$PRIMARY_CP_IP" "kubectl get nodes -o wide"
|
||||
40
nixos/kubeadm/scripts/render-inventory-from-tf-output.py
Executable file
40
nixos/kubeadm/scripts/render-inventory-from-tf-output.py
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def natural_key(name: str):
|
||||
m = re.match(r"^([a-zA-Z-]+)-(\d+)$", name)
|
||||
if m:
|
||||
return (m.group(1), int(m.group(2)))
|
||||
return (name, 0)
|
||||
|
||||
|
||||
def map_to_pairs(items: dict[str, str]) -> str:
|
||||
ordered = sorted(items.items(), key=lambda kv: natural_key(kv[0]))
|
||||
return " ".join(f"{k}={v}" for k, v in ordered)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
payload = json.load(sys.stdin)
|
||||
|
||||
cp_map = payload.get("control_plane_vm_ipv4", {}).get("value", {})
|
||||
wk_map = payload.get("worker_vm_ipv4", {}).get("value", {})
|
||||
|
||||
if not cp_map or not wk_map:
|
||||
raise SystemExit("Missing control_plane_vm_ipv4 or worker_vm_ipv4 in terraform output")
|
||||
|
||||
ssh_user = os.environ.get("KUBEADM_SSH_USER", "").strip() or "micqdf"
|
||||
|
||||
print(f"SSH_USER={ssh_user}")
|
||||
print("PRIMARY_CONTROL_PLANE=cp-1")
|
||||
print(f"CONTROL_PLANES=\"{map_to_pairs(cp_map)}\"")
|
||||
print(f"WORKERS=\"{map_to_pairs(wk_map)}\"")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
72
nixos/kubeadm/scripts/reset-cluster-nodes.sh
Executable file
72
nixos/kubeadm/scripts/reset-cluster-nodes.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
INVENTORY_FILE="${1:-$SCRIPT_DIR/inventory.env}"
|
||||
|
||||
if [ ! -f "$INVENTORY_FILE" ]; then
|
||||
echo "Missing inventory file: $INVENTORY_FILE"
|
||||
echo "Copy $SCRIPT_DIR/inventory.example.env to $SCRIPT_DIR/inventory.env and edit node mappings."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$INVENTORY_FILE"
|
||||
|
||||
SSH_USER="${SSH_USER:-micqdf}"
|
||||
SSH_OPTS="${SSH_OPTS:- -o BatchMode=yes -o StrictHostKeyChecking=accept-new }"
|
||||
|
||||
declare -A NODE_IPS=()
|
||||
|
||||
add_pair() {
|
||||
local pair="$1"
|
||||
local name="${pair%%=*}"
|
||||
local ip="${pair#*=}"
|
||||
|
||||
if [ -z "$name" ] || [ -z "$ip" ] || [ "$name" = "$ip" ]; then
|
||||
echo "Invalid node pair '$pair' (expected name=ip)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_IPS["$name"]="$ip"
|
||||
}
|
||||
|
||||
if [ -n "${CONTROL_PLANES:-}" ]; then
|
||||
for pair in $CONTROL_PLANES; do
|
||||
add_pair "$pair"
|
||||
done
|
||||
else
|
||||
while IFS= read -r var_name; do
|
||||
idx="${var_name#CP_}"
|
||||
add_pair "cp-$idx=${!var_name}"
|
||||
done < <(compgen -A variable | grep -E '^CP_[0-9]+$' | sort -V)
|
||||
fi
|
||||
|
||||
if [ -n "${WORKERS:-}" ]; then
|
||||
for pair in $WORKERS; do
|
||||
add_pair "$pair"
|
||||
done
|
||||
else
|
||||
while IFS= read -r var_name; do
|
||||
idx="${var_name#WK_}"
|
||||
add_pair "wk-$idx=${!var_name}"
|
||||
done < <(compgen -A variable | grep -E '^WK_[0-9]+$' | sort -V)
|
||||
fi
|
||||
|
||||
if [ "${#NODE_IPS[@]}" -eq 0 ]; then
|
||||
echo "No nodes found in inventory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
reset_node() {
|
||||
local node_name="$1"
|
||||
local node_ip="$2"
|
||||
echo "==> Resetting $node_name ($node_ip)"
|
||||
ssh $SSH_OPTS "$SSH_USER@$node_ip" "sudo kubeadm reset -f && sudo systemctl stop kubelet && sudo rm -rf /etc/kubernetes /var/lib/etcd /var/lib/cni /etc/cni/net.d"
|
||||
}
|
||||
|
||||
while IFS= read -r node_name; do
|
||||
reset_node "$node_name" "${NODE_IPS[$node_name]}"
|
||||
done < <(printf '%s\n' "${!NODE_IPS[@]}" | sort -V)
|
||||
|
||||
echo "Cluster components reset on all listed nodes."
|
||||
27
nixos/template-base/README.md
Normal file
27
nixos/template-base/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# NixOS Proxmox Template Base
|
||||
|
||||
This folder contains a minimal NixOS base config you can copy into a new
|
||||
template VM build.
|
||||
|
||||
## Files
|
||||
|
||||
- `flake.nix`: pins `nixos-24.11` and exposes one host config.
|
||||
- `configuration.nix`: base settings for Proxmox guest use.
|
||||
|
||||
## Before first apply
|
||||
|
||||
1. Replace `REPLACE_WITH_YOUR_SSH_PUBLIC_KEY` in `configuration.nix`.
|
||||
2. Add `hardware-configuration.nix` from the VM install:
|
||||
- `nixos-generate-config --root /`
|
||||
- copy `/etc/nixos/hardware-configuration.nix` next to `configuration.nix`
|
||||
|
||||
## Build/apply example inside the VM
|
||||
|
||||
```bash
|
||||
sudo nixos-rebuild switch --flake .#template
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- This is intentionally minimal and avoids cloud-init assumptions.
|
||||
- If you want host-specific settings, create additional modules and import them.
|
||||
57
nixos/template-base/configuration.nix
Normal file
57
nixos/template-base/configuration.nix
Normal file
@@ -0,0 +1,57 @@
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
lib.optional (builtins.pathExists ./hardware-configuration.nix)
|
||||
./hardware-configuration.nix;
|
||||
|
||||
networking.hostName = "nixos-template";
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
networking.nameservers = [ "1.1.1.1" "8.8.8.8" ];
|
||||
|
||||
boot.loader.systemd-boot.enable = lib.mkForce false;
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
};
|
||||
|
||||
services.qemuGuest.enable = true;
|
||||
services.openssh.enable = true;
|
||||
services.tailscale.enable = true;
|
||||
services.openssh.settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
PermitRootLogin = "prohibit-password";
|
||||
};
|
||||
|
||||
programs.fish.enable = true;
|
||||
|
||||
users.users.micqdf = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
shell = pkgs.fish;
|
||||
};
|
||||
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
btop
|
||||
curl
|
||||
dig
|
||||
eza
|
||||
fd
|
||||
fzf
|
||||
git
|
||||
htop
|
||||
jq
|
||||
ripgrep
|
||||
tailscale
|
||||
tree
|
||||
unzip
|
||||
vim
|
||||
neovim
|
||||
wget
|
||||
];
|
||||
|
||||
system.stateVersion = "25.05";
|
||||
}
|
||||
14
nixos/template-base/flake.nix
Normal file
14
nixos/template-base/flake.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
description = "Base NixOS config for Proxmox template";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
};
|
||||
|
||||
outputs = { nixpkgs, ... }: {
|
||||
nixosConfigurations.template = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./configuration.nix ];
|
||||
};
|
||||
};
|
||||
}
|
||||
89
terraform/.terraform.lock.hcl
generated
89
terraform/.terraform.lock.hcl
generated
@@ -1,79 +1,24 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/local" {
|
||||
version = "2.5.2"
|
||||
hashes = [
|
||||
"h1:JlMZD6nYqJ8sSrFfEAH0Vk/SL8WLZRmFaMUF9PJK5wM=",
|
||||
"zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511",
|
||||
"zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea",
|
||||
"zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0",
|
||||
"zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b",
|
||||
"zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038",
|
||||
"zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4",
|
||||
"zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464",
|
||||
"zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b",
|
||||
"zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e",
|
||||
"zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/null" {
|
||||
version = "3.2.3"
|
||||
hashes = [
|
||||
"h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=",
|
||||
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
|
||||
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
|
||||
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
|
||||
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
|
||||
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
|
||||
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
|
||||
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
|
||||
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
|
||||
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
|
||||
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/template" {
|
||||
version = "2.2.0"
|
||||
hashes = [
|
||||
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
|
||||
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
|
||||
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
|
||||
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
|
||||
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
|
||||
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
|
||||
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
|
||||
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
|
||||
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
|
||||
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
|
||||
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/telmate/proxmox" {
|
||||
version = "3.0.1-rc8"
|
||||
constraints = "3.0.1-rc8"
|
||||
version = "3.0.2-rc07"
|
||||
constraints = "3.0.2-rc07"
|
||||
hashes = [
|
||||
"h1:W5X4T5AZUaqO++aAequNECUKJaXLC5upcws6Vp7mkBk=",
|
||||
"zh:0272f1600251abf9b139c2683f83cde0a907ac762f5ead058b84de18ddc1d78e",
|
||||
"zh:328e708a8063a133516612b17c8983a9372fa42766530925d1d37aeb1daa30ec",
|
||||
"zh:3449150e4d57f79af6f9583e93e3a5ab84fb475bc594de75b968534f57af2871",
|
||||
"zh:58d803a0203241214f673c80350d43ce1a5ce57b21b83ba08d0d08e8c389dcc4",
|
||||
"zh:59e3e99afc1ea404e530100725403c1610d682cfd27eeeaf35190c119b76a4db",
|
||||
"zh:666cb7d299824152714202e8fda000c2e37346f2ae6d0a0e3c6f6bd68ef5d9ca",
|
||||
"zh:6a1290b85e7bf953664b21b2a1ea554923a060f2a8347d8d5bb3d2b5157f85d2",
|
||||
"zh:72230960c49fe7050a5e80ee10fa24cdac94dbab82744bccb6aa251741eb5aa9",
|
||||
"zh:91f655c41f5af9a9fdcf6104c3d0a553eaa0fb3390af81051e744f30accd5b52",
|
||||
"zh:aa08a22bf737d5840573bb6030617ab6bba2a292f4b9c88b20477cdcfb9676a9",
|
||||
"zh:b72012cc284cad488207532b6668c58999c972d837b5f486db1d7466d686d5fd",
|
||||
"zh:e24f934249a6ab4d3705c1398226d4d9df1e81ef8a36592389be02bc35cc661f",
|
||||
"zh:e9e6bcef8b6a6b5ff2317168c2c23e4c55ae23f883ba158d2c4fd6324a0413e5",
|
||||
"zh:ffa1e742a8c50babd8dbfcd6884740f9bea8453ec4d832717ff006a4fbfffa91",
|
||||
"h1:zp5hpQJQ4t4zROSLqdltVpBO+Riy9VugtfFbpyTw1aM=",
|
||||
"zh:2ee860cd0a368b3eaa53f4a9ea46f16dab8a97929e813ea6ef55183f8112c2ca",
|
||||
"zh:415965fd915bae2040d7f79e45f64d6e3ae61149c10114efeac1b34687d7296c",
|
||||
"zh:6584b2055df0e32062561c615e3b6b2c291ca8c959440adda09ef3ec1e1436bd",
|
||||
"zh:65dcfad71928e0a8dd9befc22524ed686be5020b0024dc5cca5184c7420eeb6b",
|
||||
"zh:7253dc29bd265d33f2791ac4f779c5413f16720bb717de8e6c5fcb2c858648ea",
|
||||
"zh:7ec8993da10a47606670f9f67cfd10719a7580641d11c7aa761121c4a2bd66fb",
|
||||
"zh:999a3f7a9dcf517967fc537e6ec930a8172203642fb01b8e1f78f908373db210",
|
||||
"zh:a50e6df7280eb6584a5fd2456e3f5b6df13b2ec8a7fa4605511e438e1863be42",
|
||||
"zh:b25b329a1e42681c509d027fee0365414f0cc5062b65690cfc3386aab16132ae",
|
||||
"zh:c028877fdb438ece48f7bc02b65bbae9ca7b7befbd260e519ccab6c0cbb39f26",
|
||||
"zh:cf0eaa3ea9fcc6d62793637947f1b8d7c885b6ad74695ab47e134e4ff132190f",
|
||||
"zh:d5ade3fae031cc629b7c512a7b60e46570f4c41665e88a595d7efd943dde5ab2",
|
||||
"zh:f388c15ad1ecfc09e7361e3b98bae9b627a3a85f7b908c9f40650969c949901c",
|
||||
"zh:f415cc6f735a3971faae6ac24034afdb9ee83373ef8de19a9631c187d5adc7db",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
### Alpaca cloud-init template
|
||||
data "template_file" "cloud_init_alpaca" {
|
||||
count = var.alpaca_vm_count
|
||||
template = file("${path.module}/files/cloud_init.yaml")
|
||||
|
||||
vars = {
|
||||
ssh_key = var.ssh_key
|
||||
hostname = "alpaca-${count.index + 1}"
|
||||
domain = "home.arpa"
|
||||
TS_AUTHKEY = var.TS_AUTHKEY
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "local_file" "cloud_init_alpaca" {
|
||||
count = var.alpaca_vm_count
|
||||
content = data.template_file.cloud_init_alpaca[count.index].rendered
|
||||
filename = "${path.module}/files/cloud_init_alpaca_${count.index + 1}.yaml"
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_cloud_init_alpaca" {
|
||||
count = var.alpaca_vm_count
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "root"
|
||||
host = var.target_node
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = local_file.cloud_init_alpaca[count.index].filename
|
||||
destination = "/var/lib/vz/snippets/cloud_init_alpaca_${count.index + 1}.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
### Llama cloud-init template
|
||||
data "template_file" "cloud_init_llama" {
|
||||
count = var.llama_vm_count
|
||||
template = file("${path.module}/files/cloud_init.yaml")
|
||||
|
||||
vars = {
|
||||
ssh_key = var.ssh_key
|
||||
hostname = "llama-${count.index + 1}"
|
||||
domain = "home.arpa"
|
||||
TS_AUTHKEY = var.TS_AUTHKEY
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "local_file" "cloud_init_llama" {
|
||||
count = var.llama_vm_count
|
||||
content = data.template_file.cloud_init_llama[count.index].rendered
|
||||
filename = "${path.module}/files/cloud_init_llama_${count.index + 1}.yaml"
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_cloud_init_llama" {
|
||||
count = var.llama_vm_count
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "root"
|
||||
host = var.target_node
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = local_file.cloud_init_llama[count.index].filename
|
||||
destination = "/var/lib/vz/snippets/cloud_init_llama_${count.index + 1}.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
#cloud-config
|
||||
hostname: ${hostname}
|
||||
fqdn: ${hostname}.${domain}
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_key}
|
||||
|
||||
runcmd:
|
||||
- curl -fsSL https://tailscale.com/install.sh | sh
|
||||
- tailscale up --auth-key=${TS_AUTHKEY}
|
||||
- tailscale set --ssh
|
||||
|
||||
|
||||
6
terraform/files/cloud_init_base.yaml
Normal file
6
terraform/files/cloud_init_base.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
#cloud-config
|
||||
runcmd:
|
||||
- curl -fsSL https://tailscale.com/install.sh | sh
|
||||
- tailscale up --auth-key=${TS_AUTHKEY}
|
||||
- tailscale set --ssh
|
||||
|
||||
15
terraform/files/cloud_init_global.tpl
Normal file
15
terraform/files/cloud_init_global.tpl
Normal file
@@ -0,0 +1,15 @@
|
||||
#cloud-config
|
||||
hostname: ${hostname}
|
||||
manage_etc_hosts: true
|
||||
resolv_conf:
|
||||
nameservers:
|
||||
- 8.8.8.8
|
||||
- 1.1.1.1
|
||||
|
||||
preserve_hostname: false
|
||||
fqdn: ${hostname}.${domain}
|
||||
|
||||
users:
|
||||
- name: micqdf
|
||||
ssh_authorized_keys:
|
||||
- ${SSH_KEY_PUBLIC}
|
||||
@@ -1,42 +1,62 @@
|
||||
terraform {
|
||||
backend "s3" {}
|
||||
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "Telmate/proxmox"
|
||||
version = "3.0.1-rc8"
|
||||
version = "3.0.2-rc07"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
pm_api_url = var.pm_api_url
|
||||
pm_user = var.pm_user
|
||||
pm_password = var.proxmox_password
|
||||
pm_api_token_id = var.pm_api_token_id
|
||||
pm_api_token_secret = var.pm_api_token_secret
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
resource "proxmox_vm_qemu" "alpacas" {
|
||||
count = var.alpaca_vm_count
|
||||
name = "alpaca-${count.index + 1}"
|
||||
vmid = 500 + count.index + 1
|
||||
resource "proxmox_vm_qemu" "control_planes" {
|
||||
count = var.control_plane_count
|
||||
name = "cp-${count.index + 1}"
|
||||
vmid = var.control_plane_vmid_start + count.index
|
||||
target_node = var.target_node
|
||||
clone = var.clone_template
|
||||
full_clone = false
|
||||
full_clone = true
|
||||
os_type = "cloud-init"
|
||||
agent = 1
|
||||
automatic_reboot = false
|
||||
|
||||
sockets = var.sockets
|
||||
cores = var.cores
|
||||
memory = var.memory
|
||||
cpu {
|
||||
sockets = 1
|
||||
cores = var.control_plane_cores
|
||||
}
|
||||
memory = var.control_plane_memory_mb
|
||||
scsihw = "virtio-scsi-pci"
|
||||
boot = "order=scsi0"
|
||||
bootdisk = "scsi0"
|
||||
ipconfig0 = "ip=dhcp"
|
||||
cicustom = "user=local:snippets/cloud_init_alpaca_${count.index + 1}.yaml"
|
||||
depends_on = [null_resource.upload_cloud_init_alpaca]
|
||||
ciuser = "micqdf"
|
||||
sshkeys = var.SSH_KEY_PUBLIC
|
||||
|
||||
|
||||
disks {
|
||||
scsi {
|
||||
scsi0 {
|
||||
disk {
|
||||
slot = "scsi0"
|
||||
type = "disk"
|
||||
size = var.control_plane_disk_size
|
||||
storage = var.storage
|
||||
size = var.disk_size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ide {
|
||||
ide2 {
|
||||
cloudinit {
|
||||
storage = var.storage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
@@ -47,30 +67,48 @@ resource "proxmox_vm_qemu" "alpacas" {
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_vm_qemu" "llamas" {
|
||||
count = var.llama_vm_count
|
||||
name = "llama-${count.index + 1}"
|
||||
vmid = 600 + count.index + 1
|
||||
resource "proxmox_vm_qemu" "workers" {
|
||||
count = var.worker_count
|
||||
name = "wk-${count.index + 1}"
|
||||
vmid = var.worker_vmid_start + count.index
|
||||
target_node = var.target_node
|
||||
clone = var.clone_template
|
||||
full_clone = false
|
||||
full_clone = true
|
||||
os_type = "cloud-init"
|
||||
agent = 1
|
||||
automatic_reboot = false
|
||||
|
||||
sockets = var.sockets
|
||||
cores = var.cores
|
||||
memory = var.memory
|
||||
cpu {
|
||||
sockets = 1
|
||||
cores = var.worker_cores[count.index]
|
||||
}
|
||||
memory = var.worker_memory_mb[count.index]
|
||||
scsihw = "virtio-scsi-pci"
|
||||
boot = "order=scsi0"
|
||||
bootdisk = "scsi0"
|
||||
ipconfig0 = "ip=dhcp"
|
||||
cicustom = "user=local:snippets/cloud_init_llama_${count.index + 1}.yaml"
|
||||
depends_on = [null_resource.upload_cloud_init_llama]
|
||||
ciuser = "micqdf"
|
||||
sshkeys = var.SSH_KEY_PUBLIC
|
||||
|
||||
disks {
|
||||
scsi {
|
||||
scsi0 {
|
||||
disk {
|
||||
slot = "scsi0"
|
||||
type = "disk"
|
||||
size = var.worker_disk_size
|
||||
storage = var.storage
|
||||
size = var.disk_size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ide {
|
||||
ide2 {
|
||||
cloudinit {
|
||||
storage = var.storage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
network {
|
||||
id = 0
|
||||
@@ -78,4 +116,3 @@ resource "proxmox_vm_qemu" "llamas" {
|
||||
bridge = var.bridge
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,35 @@
|
||||
output "alpaca_vm_ids" {
|
||||
output "control_plane_vm_ids" {
|
||||
value = {
|
||||
for i in range(var.alpaca_count) :
|
||||
"alpaca-${i + 1}" => proxmox_vm_qemu.alpacas[i].vmid
|
||||
for i in range(var.control_plane_count) :
|
||||
"cp-${i + 1}" => proxmox_vm_qemu.control_planes[i].vmid
|
||||
}
|
||||
}
|
||||
|
||||
output "alpaca_vm_names" {
|
||||
value = [for vm in proxmox_vm_qemu.alpacas : vm.name]
|
||||
output "control_plane_vm_names" {
|
||||
value = [for vm in proxmox_vm_qemu.control_planes : vm.name]
|
||||
}
|
||||
|
||||
output "llama_vm_ids" {
|
||||
output "control_plane_vm_ipv4" {
|
||||
value = {
|
||||
for i in range(var.llama_count) :
|
||||
"llama-${i + 1}" => proxmox_vm_qemu.llamas[i].vmid
|
||||
for vm in proxmox_vm_qemu.control_planes :
|
||||
vm.name => vm.default_ipv4_address
|
||||
}
|
||||
}
|
||||
|
||||
output "llama_vm_names" {
|
||||
value = [for vm in proxmox_vm_qemu.llamas : vm.name]
|
||||
output "worker_vm_ids" {
|
||||
value = {
|
||||
for i in range(var.worker_count) :
|
||||
"wk-${i + 1}" => proxmox_vm_qemu.workers[i].vmid
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_vm_names" {
|
||||
value = [for vm in proxmox_vm_qemu.workers : vm.name]
|
||||
}
|
||||
|
||||
output "worker_vm_ipv4" {
|
||||
value = {
|
||||
for vm in proxmox_vm_qemu.workers :
|
||||
vm.name => vm.default_ipv4_address
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
target_node = "flex"
|
||||
clone_template = "Alpine-TemplateV2"
|
||||
vm_name = "alpine-vm"
|
||||
cores = 2
|
||||
memory = 2048
|
||||
disk_size = "15G"
|
||||
sockets = 1
|
||||
clone_template = "nixos-template"
|
||||
bridge = "vmbr0"
|
||||
disk_type = "scsi"
|
||||
storage = "Flash"
|
||||
pm_api_url = "https://100.105.0.115:8006/api2/json"
|
||||
pm_user = "terraform-prov@pve"
|
||||
pm_api_token_id = "terraform-prov@pve!mytoken"
|
||||
|
||||
control_plane_count = 3
|
||||
worker_count = 3
|
||||
control_plane_vmid_start = 701
|
||||
worker_vmid_start = 711
|
||||
|
||||
control_plane_cores = 1
|
||||
control_plane_memory_mb = 4096
|
||||
control_plane_disk_size = "40G"
|
||||
|
||||
worker_cores = [4, 4, 4]
|
||||
worker_memory_mb = [12288, 12288, 12288]
|
||||
worker_disk_size = "60G"
|
||||
|
||||
@@ -1,5 +1,22 @@
|
||||
variable "proxmox_password" {
|
||||
variable "pm_api_token_id" {
|
||||
type = string
|
||||
description = "Proxmox API token ID (format: user@realm!tokenid)"
|
||||
|
||||
validation {
|
||||
condition = can(regex(".+!.+", trimspace(var.pm_api_token_id)))
|
||||
error_message = "pm_api_token_id must be in format user@realm!tokenid."
|
||||
}
|
||||
}
|
||||
|
||||
variable "pm_api_token_secret" {
|
||||
type = string
|
||||
sensitive = true
|
||||
description = "Proxmox API token secret"
|
||||
|
||||
validation {
|
||||
condition = length(trimspace(var.pm_api_token_secret)) > 0
|
||||
error_message = "pm_api_token_secret cannot be empty. Check your Gitea secret PM_API_TOKEN_SECRET."
|
||||
}
|
||||
}
|
||||
|
||||
variable "target_node" {
|
||||
@@ -10,34 +27,70 @@ variable "clone_template" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_name" {
|
||||
variable "control_plane_count" {
|
||||
type = number
|
||||
default = 3
|
||||
description = "Number of control plane VMs"
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = number
|
||||
default = 3
|
||||
description = "Number of worker VMs"
|
||||
}
|
||||
|
||||
variable "control_plane_vmid_start" {
|
||||
type = number
|
||||
default = 701
|
||||
description = "Starting VMID for control plane VMs"
|
||||
}
|
||||
|
||||
variable "worker_vmid_start" {
|
||||
type = number
|
||||
default = 711
|
||||
description = "Starting VMID for worker VMs"
|
||||
}
|
||||
|
||||
variable "control_plane_cores" {
|
||||
type = number
|
||||
default = 1
|
||||
description = "vCPU cores per control plane VM"
|
||||
}
|
||||
|
||||
variable "control_plane_memory_mb" {
|
||||
type = number
|
||||
default = 4096
|
||||
description = "Memory in MB per control plane VM"
|
||||
}
|
||||
|
||||
variable "worker_cores" {
|
||||
type = list(number)
|
||||
default = [4, 4, 4]
|
||||
description = "vCPU cores for each worker VM"
|
||||
}
|
||||
|
||||
variable "worker_memory_mb" {
|
||||
type = list(number)
|
||||
default = [12288, 12288, 12288]
|
||||
description = "Memory in MB for each worker VM"
|
||||
}
|
||||
|
||||
variable "control_plane_disk_size" {
|
||||
type = string
|
||||
default = "40G"
|
||||
description = "Disk size for control plane VMs"
|
||||
}
|
||||
|
||||
variable "cores" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "memory" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
variable "worker_disk_size" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "sockets" {
|
||||
type = number
|
||||
default = "60G"
|
||||
description = "Disk size for worker VMs"
|
||||
}
|
||||
|
||||
variable "bridge" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "storage" {
|
||||
type = string
|
||||
}
|
||||
@@ -46,42 +99,7 @@ variable "pm_api_url" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "pm_user" {
|
||||
variable "SSH_KEY_PUBLIC" {
|
||||
type = string
|
||||
description = "Public SSH key injected via cloud-init"
|
||||
}
|
||||
|
||||
variable "alpaca_count" {
|
||||
type = number
|
||||
default = 1
|
||||
description = "How many Alpaca VMs to create"
|
||||
}
|
||||
|
||||
variable "llama_count" {
|
||||
type = number
|
||||
default = 1
|
||||
description = "How many Llama VMs to create"
|
||||
}
|
||||
|
||||
variable "alpaca_vm_count" {
|
||||
type = number
|
||||
default = 1
|
||||
description = "How many Alpaca VMs to create"
|
||||
}
|
||||
|
||||
variable "llama_vm_count" {
|
||||
type = number
|
||||
default = 1
|
||||
description = "How many Llama VMs to create"
|
||||
}
|
||||
|
||||
variable "TS_AUTHKEY" {
|
||||
type = string
|
||||
description = "Tailscale auth key used in cloud-init"
|
||||
}
|
||||
|
||||
|
||||
variable "ssh_key" {
|
||||
type = string
|
||||
description = "Public SSH key used by cloud-init"
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user