fix: align VM boot disk and add Terraform safety workflows
Some checks failed
Terraform Plan / Terraform Plan (push) Failing after 3m35s
Some checks failed
Terraform Plan / Terraform Plan (push) Failing after 3m35s
Switch VM boot order/disks to scsi0 to match cloned NixOS template boot layout, add destroy guards to plan/apply workflows, and replace destroy workflow with a confirmed manual dispatch nuke flow that uses remote B2 state.
This commit is contained in:
@@ -47,11 +47,24 @@ jobs:
|
||||
|
||||
- name: Terraform Plan
|
||||
working-directory: terraform
|
||||
run: terraform plan
|
||||
run: terraform plan -out=tfplan
|
||||
|
||||
- name: Block accidental destroy
|
||||
env:
|
||||
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
|
||||
working-directory: terraform
|
||||
run: |
|
||||
terraform show -json tfplan > tfplan.json
|
||||
DESTROY_COUNT=$(python3 -c 'import json; p=json.load(open("tfplan.json")); print(sum(1 for rc in p.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
|
||||
echo "Planned deletes: $DESTROY_COUNT"
|
||||
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
|
||||
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Terraform Apply
|
||||
working-directory: terraform
|
||||
run: terraform apply -auto-approve
|
||||
run: terraform apply -auto-approve tfplan
|
||||
|
||||
- name: Enroll VMs in Tailscale
|
||||
env:
|
||||
|
||||
@@ -1,28 +1,61 @@
|
||||
name: Gitea Destroy Terraform
|
||||
run-name: ${{ gitea.actor }} triggered a Terraform Destroy 🧨
|
||||
name: Terraform Destroy
|
||||
run-name: ${{ gitea.actor }} requested Terraform destroy
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Manual trigger
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
confirm:
|
||||
description: "Type NUKE to confirm destroy"
|
||||
required: true
|
||||
type: string
|
||||
target:
|
||||
description: "Destroy scope"
|
||||
required: true
|
||||
default: all
|
||||
type: choice
|
||||
options:
|
||||
- all
|
||||
- alpacas
|
||||
- llamas
|
||||
|
||||
jobs:
|
||||
destroy:
|
||||
name: "Terraform Destroy"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
TF_VAR_SSH_KEY: ${{ secrets.TF_VAR_SSH_KEY_PUBLIC }}
|
||||
TF_VAR_TS_AUTHKEY: ${{ secrets.TF_VAR_TS_AUTHKEY }}
|
||||
TF_VAR_PROXMOX_PASSWORD: ${{ secrets.TF_VAR_PROXMOX_PASSWORD }}
|
||||
|
||||
|
||||
steps:
|
||||
- name: Validate confirmation phrase
|
||||
run: |
|
||||
if [ "${{ inputs.confirm }}" != "NUKE" ]; then
|
||||
echo "Confirmation failed. You must type NUKE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create Terraform secret files
|
||||
working-directory: terraform
|
||||
run: |
|
||||
cat > secrets.auto.tfvars << EOF
|
||||
pm_api_token_secret = "${{ secrets.PM_API_TOKEN_SECRET }}"
|
||||
EOF
|
||||
cat > backend.hcl << EOF
|
||||
bucket = "${{ secrets.B2_TF_BUCKET }}"
|
||||
key = "terraform.tfstate"
|
||||
region = "us-east-005"
|
||||
endpoints = {
|
||||
s3 = "${{ secrets.B2_TF_ENDPOINT }}"
|
||||
}
|
||||
access_key = "$(printf '%s' "${{ secrets.B2_KEY_ID }}" | tr -d '\r\n')"
|
||||
secret_key = "$(printf '%s' "${{ secrets.B2_APPLICATION_KEY }}" | tr -d '\r\n')"
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
use_path_style = true
|
||||
EOF
|
||||
|
||||
- name: Set up Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
@@ -30,9 +63,27 @@ jobs:
|
||||
|
||||
- name: Terraform Init
|
||||
working-directory: terraform
|
||||
run: terraform init
|
||||
run: terraform init -reconfigure -backend-config=backend.hcl
|
||||
|
||||
- name: Terraform Destroy
|
||||
- name: Terraform Destroy Plan
|
||||
working-directory: terraform
|
||||
run: terraform destroy -auto-approve
|
||||
run: |
|
||||
case "${{ inputs.target }}" in
|
||||
all)
|
||||
terraform plan -destroy -out=tfdestroy
|
||||
;;
|
||||
alpacas)
|
||||
terraform plan -destroy -target=proxmox_vm_qemu.alpacas -out=tfdestroy
|
||||
;;
|
||||
llamas)
|
||||
terraform plan -destroy -target=proxmox_vm_qemu.llamas -out=tfdestroy
|
||||
;;
|
||||
*)
|
||||
echo "Invalid destroy target: ${{ inputs.target }}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Terraform Destroy Apply
|
||||
working-directory: terraform
|
||||
run: terraform apply -auto-approve tfdestroy
|
||||
|
||||
@@ -63,6 +63,19 @@ jobs:
|
||||
working-directory: terraform
|
||||
run: terraform plan -out=tfplan
|
||||
|
||||
- name: Block accidental destroy
|
||||
env:
|
||||
ALLOW_TF_DESTROY: ${{ secrets.ALLOW_TF_DESTROY }}
|
||||
working-directory: terraform
|
||||
run: |
|
||||
terraform show -json tfplan > tfplan.json
|
||||
DESTROY_COUNT=$(python3 -c 'import json; p=json.load(open("tfplan.json")); print(sum(1 for rc in p.get("resource_changes", []) if "delete" in rc.get("change", {}).get("actions", [])))')
|
||||
echo "Planned deletes: $DESTROY_COUNT"
|
||||
if [ "$DESTROY_COUNT" -gt 0 ] && [ "${ALLOW_TF_DESTROY}" != "true" ]; then
|
||||
echo "Destroy actions detected. Set ALLOW_TF_DESTROY=true to allow."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload Terraform Plan
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
@@ -26,19 +26,21 @@ resource "proxmox_vm_qemu" "alpacas" {
|
||||
os_type = "cloud-init"
|
||||
agent = 1
|
||||
|
||||
cpu {
|
||||
sockets = var.sockets
|
||||
cores = var.cores
|
||||
}
|
||||
memory = var.memory
|
||||
scsihw = "virtio-scsi-pci"
|
||||
boot = "order=virtio0"
|
||||
bootdisk = "virtio0"
|
||||
boot = "order=scsi0"
|
||||
bootdisk = "scsi0"
|
||||
ipconfig0 = "ip=dhcp"
|
||||
cicustom = "user=local:snippets/cloud_init_global.yaml"
|
||||
|
||||
|
||||
disks {
|
||||
virtio {
|
||||
virtio0 {
|
||||
scsi {
|
||||
scsi0 {
|
||||
disk {
|
||||
size = var.disk_size
|
||||
storage = var.storage
|
||||
@@ -73,18 +75,20 @@ resource "proxmox_vm_qemu" "llamas" {
|
||||
os_type = "cloud-init"
|
||||
agent = 1
|
||||
|
||||
cpu {
|
||||
sockets = var.sockets
|
||||
cores = var.cores
|
||||
}
|
||||
memory = var.memory
|
||||
scsihw = "virtio-scsi-pci"
|
||||
boot = "order=virtio0"
|
||||
bootdisk = "virtio0"
|
||||
boot = "order=scsi0"
|
||||
bootdisk = "scsi0"
|
||||
ipconfig0 = "ip=dhcp"
|
||||
cicustom = "user=local:snippets/cloud_init_global.yaml"
|
||||
|
||||
disks {
|
||||
virtio {
|
||||
virtio0 {
|
||||
scsi {
|
||||
scsi0 {
|
||||
disk {
|
||||
size = var.disk_size
|
||||
storage = var.storage
|
||||
|
||||
Reference in New Issue
Block a user