diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..03d9549
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..db8786c
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..7d909ab
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/proxmox-talos-opentofu.iml b/.idea/proxmox-talos-opentofu.iml
new file mode 100644
index 0000000..d0876a7
--- /dev/null
+++ b/.idea/proxmox-talos-opentofu.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl
new file mode 100644
index 0000000..4b8f729
--- /dev/null
+++ b/.terraform.lock.hcl
@@ -0,0 +1,47 @@
+# This file is maintained automatically by "tofu init".
+# Manual edits may be lost in future updates.
+
+provider "registry.opentofu.org/siderolabs/talos" {
+ version = "0.7.0-alpha.0"
+ constraints = "0.7.0-alpha.0"
+ hashes = [
+ "h1:RMoURrHNTK/4ofsYr803w/GZIk/W4BtkR9tPcHeqkzw=",
+ "zh:090d86eee971ac84a1d6999d1ccdb1323f257ced6aec068ac39f621d9410baad",
+ "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
+ "zh:3de44dd80dee28b4e5840886167b2a0abab16dd8aefa1d387f913e57723bf74a",
+ "zh:3eb60ee11290e32cb436aa6c2801fe16f436388ee3578f913656776590634835",
+ "zh:5d31feb8a7782a5f77cfd7e4447f731d9f69c9350a1cf08ec98b66bd014bbb2a",
+ "zh:6b0c1d0965fd256ac38911add83a95a179d69843978956c5a2980c073f209b8d",
+ "zh:75431c28ac8a09243291e95d9ce93ae250bc77e1e40c81e94b84639dfca3e492",
+ "zh:7f26210ddc7af32737756ce214b208218a2c1679475e3eb49504543911e7d9ad",
+ "zh:8e5b685a8db6ddb28db84df076729389a3fb8cbe0576f996ab7e0a0a31220b4e",
+ "zh:b441337a78d2fbcea9cf0261ddc45599dd332459700e87484f1258d656399f6a",
+ "zh:bbb54c313bf26845b012d3d74f4d300cea96dbb2d7410e7210e64cb0be2d1f60",
+ "zh:c7991f7c27762ba17404ee6b666e7b66c6cd8bf24f01103c0d2ed96a40021b5e",
+ "zh:d8901ba224dc14f4d6cab1509a4d2f7bd87958fa3d7840c70f59fa2967f77515",
+ "zh:dc425e059399527f44e493cc8a078244065b4c0f5a77bbd9f00f3b47fb4a27d3",
+ "zh:de3b15a809b49ef9fee77f9f864c2ed1bdbfa62fc258c59473169269f354d8f5",
+ ]
+}
+
+provider "registry.opentofu.org/telmate/proxmox" {
+ version = "3.0.1-rc4"
+ constraints = "3.0.1-rc4"
+ hashes = [
+ "h1:tcfqcTB5TDQKSGrWksACZdFIX6ig72i++OYaC8EncOU=",
+ "zh:1070aff02aebeadf130368b0082e76d976f61464b3bb29c1c5a7866bb14c7380",
+ "zh:3cd96c232a12cf3bbb0e874508e5ff14116ef347d60db20db17ad87bb161ee22",
+ "zh:4f75954f3e68159ed969e3eac27485166103058eff3f99186d805816c6f8eb66",
+ "zh:55572fd22f7c62813a691fe0d017b2a57a34f3b4e1c40af6c2197731878ebf84",
+ "zh:6536402b2eff0a754ff975c39318c3c0b47dfa2dc4461d34a8c55ba493288d9f",
+ "zh:735f4283286cb78fe28b4ad001771c460f1963ee640e027467eb199d80a6c257",
+ "zh:90a675455c5812d90acbf44bfee347c2318b13565c68bcf64b452dbe6c2a629a",
+ "zh:9bbfe89d3f0997a26d7636d5c2d7244beccf92371d17073583299b1b74e1ab9c",
+ "zh:9ed8ecb50c4ed8555ffe1544325de07db678e2877f7c4637fbfaf02d5f004100",
+ "zh:b1e362ebd234aa82a38ffcfa4e74295e8a23811edff8af88f79372ba18ef0918",
+ "zh:c652faede363a91ad3a148cdd1b3d9c3ab8bac1b94d92ce89eb6e1ddadc99cc1",
+ "zh:d803958e5e465095cc0d5741abf0abd80b5fd3d1c076b40880a136e737bb06d0",
+ "zh:fa12bd372e39c8ac6295503f88884f328971834e109fcc015322fc9ab5fe858f",
+ "zh:fb7abe461d36f9868a0a6728320e482ecd54e047c2876dce85d5c2143eba3b8f",
+ ]
+}
diff --git a/README.md b/README.md
index 2785523..7c55ddf 100644
--- a/README.md
+++ b/README.md
@@ -1 +1 @@
-# proxmos-talos-linux-opentofu
\ No newline at end of file
+# proxmox-talos-opentofu
\ No newline at end of file
diff --git a/files/cp-scheduling.yaml b/files/cp-scheduling.yaml
new file mode 100644
index 0000000..627c339
--- /dev/null
+++ b/files/cp-scheduling.yaml
@@ -0,0 +1,2 @@
+cluster:
+ allowSchedulingOnControlPlanes: true
\ No newline at end of file
diff --git a/iso_images.tf b/iso_images.tf
new file mode 100644
index 0000000..67d47ad
--- /dev/null
+++ b/iso_images.tf
@@ -0,0 +1,6 @@
+resource "proxmox_storage_iso" "talos_linux_iso_image" {
+ url = "https://factory.talos.dev/image/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515/v1.8.2/nocloud-amd64.iso"
+ filename = local.talos_linux_iso_image_filename
+ storage = "local"
+ pve_node = var.proxmox_target_node
+}
diff --git a/locals.tf b/locals.tf
new file mode 100644
index 0000000..fd5037f
--- /dev/null
+++ b/locals.tf
@@ -0,0 +1,15 @@
+locals {
+ repo_root = "${dirname(abspath(path.root))}"
+
+ # Talos Linux
+ talos_linux_iso_image_filename = "talos-linux-qemu-guest-agent-amd64.iso"
+
+ # K8s control plane
+ k8s_control_plane_ip_address = "192.168.1.150"
+
+ # K8s worker 1
+ k8s_worker_1_ip_address = "192.168.1.151"
+
+ # K8s worker 2
+ k8s_worker_2_ip_address = "192.168.1.152"
+}
diff --git a/outputs.tf b/outputs.tf
new file mode 100644
index 0000000..b13a726
--- /dev/null
+++ b/outputs.tf
@@ -0,0 +1,9 @@
+output "talosconfig" {
+ value = data.talos_client_configuration.this.talos_config
+ sensitive = true
+}
+
+output "kubeconfig" {
+ value = talos_cluster_kubeconfig.this.kubeconfig_raw
+ sensitive = true
+}
diff --git a/providers.tf b/providers.tf
new file mode 100644
index 0000000..670b93e
--- /dev/null
+++ b/providers.tf
@@ -0,0 +1,22 @@
+terraform {
+ required_providers {
+ proxmox = {
+ source = "telmate/proxmox"
+ version = "3.0.1-rc4"
+ }
+ talos = {
+ source = "siderolabs/talos"
+ version = "0.7.0-alpha.0"
+ }
+ }
+}
+
+provider "proxmox" {
+ pm_api_url= var.proxmox_api_url
+ pm_api_token_id = var.proxmox_api_token_id
+ pm_api_token_secret = var.proxmox_api_token_secret
+ pm_tls_insecure = true
+ # Switching off parallelism is necessary here, as VM provisioning fails otherwise
+ # see: https://github.com/Telmate/terraform-provider-proxmox/issues/173
+ pm_parallel = 1
+}
diff --git a/talos_linux.tf b/talos_linux.tf
new file mode 100644
index 0000000..378a016
--- /dev/null
+++ b/talos_linux.tf
@@ -0,0 +1,63 @@
+resource "talos_machine_secrets" "this" {}
+
+data "talos_machine_configuration" "controlplane" {
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ machine_type = "controlplane"
+ machine_secrets = talos_machine_secrets.this.machine_secrets
+}
+
+data "talos_machine_configuration" "worker" {
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ machine_type = "worker"
+ machine_secrets = talos_machine_secrets.this.machine_secrets
+}
+
+data "talos_client_configuration" "this" {
+ cluster_name = var.cluster_name
+ client_configuration = talos_machine_secrets.this.client_configuration
+ endpoints = [for k, v in var.node_data.controlplanes : k]
+}
+
+resource "talos_machine_configuration_apply" "controlplane" {
+ depends_on = [proxmox_vm_qemu.k8s_control_plane]
+ client_configuration = talos_machine_secrets.this.client_configuration
+ machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration
+ for_each = var.node_data.controlplanes
+ node = each.key
+ config_patches = [
+ templatefile("${path.module}/templates/machine_disk_and_hostname.yaml.tftmpl", {
+ hostname = each.value.hostname == null ? format("%s-cp-%s", var.cluster_name, index(keys(var.node_data.controlplanes), each.key)) : each.value.hostname
+ install_disk = each.value.install_disk
+ }),
+ file("${path.module}/files/cp-scheduling.yaml"),
+ ]
+}
+
+resource "talos_machine_configuration_apply" "worker" {
+ depends_on = [proxmox_vm_qemu.k8s_worker_1, proxmox_vm_qemu.k8s_worker_2]
+ client_configuration = talos_machine_secrets.this.client_configuration
+ machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration
+ for_each = var.node_data.workers
+ node = each.key
+ config_patches = [
+ templatefile("${path.module}/templates/machine_disk_and_hostname.yaml.tftmpl", {
+ hostname = each.value.hostname == null ? format("%s-worker-%s", var.cluster_name, index(keys(var.node_data.workers), each.key)) : each.value.hostname
+ install_disk = each.value.install_disk
+ })
+ ]
+}
+
+resource "talos_machine_bootstrap" "this" {
+ depends_on = [talos_machine_configuration_apply.controlplane]
+
+ client_configuration = talos_machine_secrets.this.client_configuration
+ node = [for k, v in var.node_data.controlplanes : k][0]
+}
+
+resource "talos_cluster_kubeconfig" "this" {
+ depends_on = [talos_machine_bootstrap.this]
+ client_configuration = talos_machine_secrets.this.client_configuration
+ node = [for k, v in var.node_data.controlplanes : k][0]
+}
\ No newline at end of file
diff --git a/templates/machine_disk_and_hostname.yaml.tftmpl b/templates/machine_disk_and_hostname.yaml.tftmpl
new file mode 100644
index 0000000..383faf8
--- /dev/null
+++ b/templates/machine_disk_and_hostname.yaml.tftmpl
@@ -0,0 +1,5 @@
+machine:
+ install:
+ disk: ${install_disk}
+ network:
+ hostname: ${hostname}
\ No newline at end of file
diff --git a/variables.tf b/variables.tf
new file mode 100644
index 0000000..361476b
--- /dev/null
+++ b/variables.tf
@@ -0,0 +1,56 @@
+variable "proxmox_api_url" {
+ type = string
+}
+
+variable "proxmox_api_token_id" {
+ type = string
+ sensitive = true
+}
+
+variable "proxmox_api_token_secret" {
+ type = string
+ sensitive = true
+}
+
+variable "proxmox_target_node" {
+ type = string
+}
+
+variable "cluster_name" {
+ description = "A name to provide for the Talos cluster"
+ type = string
+}
+
+variable "cluster_endpoint" {
+ description = "The endpoint for the Talos cluster"
+ type = string
+}
+
+variable "node_data" {
+ description = "A map of node data"
+ type = object({
+ controlplanes = map(object({
+ install_disk = string
+ hostname = optional(string)
+ }))
+ workers = map(object({
+ install_disk = string
+ hostname = optional(string)
+ }))
+ })
+ default = {
+ controlplanes = {
+ "192.168.1.150" = {
+ install_disk = "/dev/vda"
+ },
+ }
+ workers = {
+ "192.168.1.151" = {
+ install_disk = "/dev/vda"
+ },
+ "192.168.1.152" = {
+ install_disk = "/dev/vda"
+ }
+ }
+ }
+}
diff --git a/virtual_machines.tf b/virtual_machines.tf
new file mode 100644
index 0000000..aeacae7
--- /dev/null
+++ b/virtual_machines.tf
@@ -0,0 +1,136 @@
+resource "proxmox_vm_qemu" "k8s_control_plane" {
+ depends_on = [proxmox_storage_iso.talos_linux_iso_image]
+ name = "k8s-control-plane"
+ desc = "Control Node"
+ target_node = var.proxmox_target_node
+ agent = 1
+ vm_state = "running"
+ cores = 2
+ memory = 4096
+ boot = "order=virtio0;ide2"
+
+ vga {
+ type = "std"
+ }
+
+ disk {
+ slot = "ide0"
+ type = "cloudinit"
+ storage = "local-lvm"
+ }
+
+ disk {
+ slot = "ide2"
+ type = "cdrom"
+ iso = "local:iso/${local.talos_linux_iso_image_filename}"
+ }
+
+ disk {
+ slot = "virtio0"
+ type = "disk"
+ storage = "local-lvm"
+ size = "10240M"
+ discard = true
+ }
+
+ network {
+ model = "virtio"
+ bridge = "vmbr0"
+ }
+
+ # Cloud init setup
+ os_type = "cloud-init"
+ ipconfig0 = "ip=${local.k8s_control_plane_ip_address}/24,gw=192.168.1.1"
+}
+
+
+resource "proxmox_vm_qemu" "k8s_worker_1" {
+ depends_on = [proxmox_storage_iso.talos_linux_iso_image]
+ name = "k8s-worker-1"
+ desc = "Worker Node 1"
+ target_node = var.proxmox_target_node
+ agent = 1
+ vm_state = "running"
+ cores = 2
+ memory = 8192
+ boot = "order=virtio0;ide2"
+
+ vga {
+ type = "std"
+ }
+
+ disk {
+ slot = "ide0"
+ type = "cloudinit"
+ storage = "local-lvm"
+ }
+
+ disk {
+ slot = "ide2"
+ type = "cdrom"
+ iso = "local:iso/${local.talos_linux_iso_image_filename}"
+ }
+
+ disk {
+ slot = "virtio0"
+ type = "disk"
+ storage = "local-lvm"
+ size = "10240M"
+ discard = true
+ }
+
+ network {
+ model = "virtio"
+ bridge = "vmbr0"
+ }
+
+ # Cloud init setup
+ os_type = "cloud-init"
+ ipconfig0 = "ip=${local.k8s_worker_1_ip_address}/24,gw=192.168.1.1"
+}
+
+
+resource "proxmox_vm_qemu" "k8s_worker_2" {
+ depends_on = [proxmox_storage_iso.talos_linux_iso_image]
+ name = "k8s-worker-2"
+ desc = "Worker Node 2"
+ target_node = var.proxmox_target_node
+ agent = 1
+ vm_state = "running"
+ cores = 2
+ memory = 8192
+ boot = "order=virtio0;ide2"
+
+ vga {
+ type = "std"
+ }
+
+ disk {
+ slot = "ide0"
+ type = "cloudinit"
+ storage = "local-lvm"
+ }
+
+ disk {
+ slot = "ide2"
+ type = "cdrom"
+ iso = "local:iso/${local.talos_linux_iso_image_filename}"
+ }
+
+ disk {
+ slot = "virtio0"
+ type = "disk"
+ storage = "local-lvm"
+ size = "10240M"
+ discard = true
+ }
+
+ network {
+ model = "virtio"
+ bridge = "vmbr0"
+ }
+
+ # Cloud init setup
+ os_type = "cloud-init"
+ ipconfig0 = "ip=${local.k8s_worker_2_ip_address}/24,gw=192.168.1.1"
+}