Optimized virtual machine provisioning

This commit is contained in:
Max Pfeiffer
2024-11-10 19:01:12 +01:00
parent 0a410366ed
commit d43ddca54a
4 changed files with 18 additions and 64 deletions

View File

@@ -1,8 +0,0 @@
resource "helm_release" "argocd" {
name = "argocd"
chart = "argo-cd"
repository = "https://argoproj.github.io/argo-helm"
version = "7.7.0"
timeout = "1500"
namespace = kubernetes_namespace.argocd.id
}

View File

@@ -1,3 +1,9 @@
resource "kubernetes_namespace" "ingress" {
metadata {
name = "ingress"
}
}
resource "kubernetes_namespace" "argocd" { resource "kubernetes_namespace" "argocd" {
metadata { metadata {
name = "argocd" name = "argocd"

View File

@@ -21,7 +21,7 @@ data "talos_client_configuration" "this" {
} }
resource "talos_machine_configuration_apply" "controlplane" { resource "talos_machine_configuration_apply" "controlplane" {
depends_on = [proxmox_vm_qemu.k8s_control_plane] depends_on = [proxmox_vm_qemu.kubernetes_control_plane]
client_configuration = talos_machine_secrets.this.client_configuration client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration
for_each = var.node_data.controlplanes for_each = var.node_data.controlplanes
@@ -37,7 +37,7 @@ resource "talos_machine_configuration_apply" "controlplane" {
} }
resource "talos_machine_configuration_apply" "worker" { resource "talos_machine_configuration_apply" "worker" {
depends_on = [proxmox_vm_qemu.k8s_worker_1, proxmox_vm_qemu.k8s_worker_2] depends_on = [proxmox_vm_qemu.kubernetes_worker]
client_configuration = talos_machine_secrets.this.client_configuration client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration
for_each = var.node_data.workers for_each = var.node_data.workers

View File

@@ -1,7 +1,8 @@
resource "proxmox_vm_qemu" "k8s_control_plane" { resource "proxmox_vm_qemu" "kubernetes_control_plane" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image] depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-control-plane" for_each = var.node_data.controlplanes
desc = "Control Node" name = format("kubernetes-control-plane-%s", index(keys(var.node_data.controlplanes), each.key))
desc = "Kubernetes Control Plane"
target_node = var.proxmox_target_node target_node = var.proxmox_target_node
agent = 1 agent = 1
vm_state = "running" vm_state = "running"
@@ -40,14 +41,15 @@ resource "proxmox_vm_qemu" "k8s_control_plane" {
# Cloud init setup # Cloud init setup
os_type = "cloud-init" os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_control_plane_ip_address}/24,gw=192.168.1.1" ipconfig0 = "ip=${each.key}/24,gw=192.168.1.1"
} }
resource "proxmox_vm_qemu" "k8s_worker_1" { resource "proxmox_vm_qemu" "kubernetes_worker" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image] depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-worker-1" for_each = var.node_data.workers
desc = "Worker Node 1" name = format("kubernetes-worker-%s", index(keys(var.node_data.workers), each.key))
desc = "Kubernetes Worker Node"
target_node = var.proxmox_target_node target_node = var.proxmox_target_node
agent = 1 agent = 1
vm_state = "running" vm_state = "running"
@@ -86,51 +88,5 @@ resource "proxmox_vm_qemu" "k8s_worker_1" {
# Cloud init setup # Cloud init setup
os_type = "cloud-init" os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_worker_1_ip_address}/24,gw=192.168.1.1" ipconfig0 = "ip=${each.key}/24,gw=192.168.1.1"
}
resource "proxmox_vm_qemu" "k8s_worker_2" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-worker-2"
desc = "Worker Node 2"
target_node = var.proxmox_target_node
agent = 1
vm_state = "running"
cores = 2
memory = 8192
boot = "order=virtio0;ide2"
vga {
type = "std"
}
disk {
slot = "ide0"
type = "cloudinit"
storage = "local-lvm"
}
disk {
slot = "ide2"
type = "cdrom"
iso = "local:iso/${local.talos_linux_iso_image_filename}"
}
disk {
slot = "virtio0"
type = "disk"
storage = "local-lvm"
size = "10240M"
discard = true
}
network {
model = "virtio"
bridge = "vmbr0"
}
# Cloud init setup
os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_worker_2_ip_address}/24,gw=192.168.1.1"
} }