Added virtual machines and talos config to create a three node cluster

This commit is contained in:
Max Pfeiffer
2024-11-09 12:32:59 +01:00
parent 9e2b64cc8c
commit ee675221d9
18 changed files with 411 additions and 1 deletions

8
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

View File

@@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
</profile>
</component>

View File

@@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
.idea/misc.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.12" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/proxmox-talos-opentofu.iml" filepath="$PROJECT_DIR$/.idea/proxmox-talos-opentofu.iml" />
</modules>
</component>
</project>

8
.idea/proxmox-talos-opentofu.iml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

47
.terraform.lock.hcl generated Normal file
View File

@@ -0,0 +1,47 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/siderolabs/talos" {
version = "0.7.0-alpha.0"
constraints = "0.7.0-alpha.0"
hashes = [
"h1:RMoURrHNTK/4ofsYr803w/GZIk/W4BtkR9tPcHeqkzw=",
"zh:090d86eee971ac84a1d6999d1ccdb1323f257ced6aec068ac39f621d9410baad",
"zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
"zh:3de44dd80dee28b4e5840886167b2a0abab16dd8aefa1d387f913e57723bf74a",
"zh:3eb60ee11290e32cb436aa6c2801fe16f436388ee3578f913656776590634835",
"zh:5d31feb8a7782a5f77cfd7e4447f731d9f69c9350a1cf08ec98b66bd014bbb2a",
"zh:6b0c1d0965fd256ac38911add83a95a179d69843978956c5a2980c073f209b8d",
"zh:75431c28ac8a09243291e95d9ce93ae250bc77e1e40c81e94b84639dfca3e492",
"zh:7f26210ddc7af32737756ce214b208218a2c1679475e3eb49504543911e7d9ad",
"zh:8e5b685a8db6ddb28db84df076729389a3fb8cbe0576f996ab7e0a0a31220b4e",
"zh:b441337a78d2fbcea9cf0261ddc45599dd332459700e87484f1258d656399f6a",
"zh:bbb54c313bf26845b012d3d74f4d300cea96dbb2d7410e7210e64cb0be2d1f60",
"zh:c7991f7c27762ba17404ee6b666e7b66c6cd8bf24f01103c0d2ed96a40021b5e",
"zh:d8901ba224dc14f4d6cab1509a4d2f7bd87958fa3d7840c70f59fa2967f77515",
"zh:dc425e059399527f44e493cc8a078244065b4c0f5a77bbd9f00f3b47fb4a27d3",
"zh:de3b15a809b49ef9fee77f9f864c2ed1bdbfa62fc258c59473169269f354d8f5",
]
}
provider "registry.opentofu.org/telmate/proxmox" {
version = "3.0.1-rc4"
constraints = "3.0.1-rc4"
hashes = [
"h1:tcfqcTB5TDQKSGrWksACZdFIX6ig72i++OYaC8EncOU=",
"zh:1070aff02aebeadf130368b0082e76d976f61464b3bb29c1c5a7866bb14c7380",
"zh:3cd96c232a12cf3bbb0e874508e5ff14116ef347d60db20db17ad87bb161ee22",
"zh:4f75954f3e68159ed969e3eac27485166103058eff3f99186d805816c6f8eb66",
"zh:55572fd22f7c62813a691fe0d017b2a57a34f3b4e1c40af6c2197731878ebf84",
"zh:6536402b2eff0a754ff975c39318c3c0b47dfa2dc4461d34a8c55ba493288d9f",
"zh:735f4283286cb78fe28b4ad001771c460f1963ee640e027467eb199d80a6c257",
"zh:90a675455c5812d90acbf44bfee347c2318b13565c68bcf64b452dbe6c2a629a",
"zh:9bbfe89d3f0997a26d7636d5c2d7244beccf92371d17073583299b1b74e1ab9c",
"zh:9ed8ecb50c4ed8555ffe1544325de07db678e2877f7c4637fbfaf02d5f004100",
"zh:b1e362ebd234aa82a38ffcfa4e74295e8a23811edff8af88f79372ba18ef0918",
"zh:c652faede363a91ad3a148cdd1b3d9c3ab8bac1b94d92ce89eb6e1ddadc99cc1",
"zh:d803958e5e465095cc0d5741abf0abd80b5fd3d1c076b40880a136e737bb06d0",
"zh:fa12bd372e39c8ac6295503f88884f328971834e109fcc015322fc9ab5fe858f",
"zh:fb7abe461d36f9868a0a6728320e482ecd54e047c2876dce85d5c2143eba3b8f",
]
}

View File

@@ -1 +1 @@
# proxmos-talos-linux-opentofu # proxmox-talos-opentofu

2
files/cp-scheduling.yaml Normal file
View File

@@ -0,0 +1,2 @@
cluster:
allowSchedulingOnControlPlanes: true

6
iso_images.tf Normal file
View File

@@ -0,0 +1,6 @@
resource "proxmox_storage_iso" "talos_linux_iso_image" {
url = "https://factory.talos.dev/image/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515/v1.8.2/nocloud-amd64.iso"
filename = local.talos_linux_iso_image_filename
storage = "local"
pve_node = var.proxmox_target_node
}

15
locals.tf Normal file
View File

@@ -0,0 +1,15 @@
locals {
repo_root = "${dirname(abspath(path.root))}"
# Talos Linux
talos_linux_iso_image_filename = "talos-linux-qemu-guest-agent-amd64.iso"
# K8s control plane
k8s_control_plane_ip_address = "192.168.1.150"
# K8s worker 1
k8s_worker_1_ip_address = "192.168.1.151"
# K8s worker 2
k8s_worker_2_ip_address = "192.168.1.152"
}

9
outputs.tf Normal file
View File

@@ -0,0 +1,9 @@
output "talosconfig" {
value = data.talos_client_configuration.this.talos_config
sensitive = true
}
output "kubeconfig" {
value = talos_cluster_kubeconfig.this.kubeconfig_raw
sensitive = true
}

22
providers.tf Normal file
View File

@@ -0,0 +1,22 @@
terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "3.0.1-rc4"
}
talos = {
source = "siderolabs/talos"
version = "0.7.0-alpha.0"
}
}
}
provider "proxmox" {
pm_api_url= var.proxmox_api_url
pm_api_token_id = var.proxmox_api_token_id
pm_api_token_secret = var.proxmox_api_token_secret
pm_tls_insecure = true
# Switching off parallelism is necessary here, as VM provisioning fails otherwise
# see: https://github.com/Telmate/terraform-provider-proxmox/issues/173
pm_parallel = 1
}

63
talos_linux.tf Normal file
View File

@@ -0,0 +1,63 @@
resource "talos_machine_secrets" "this" {}
data "talos_machine_configuration" "controlplane" {
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
machine_type = "controlplane"
machine_secrets = talos_machine_secrets.this.machine_secrets
}
data "talos_machine_configuration" "worker" {
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
machine_type = "worker"
machine_secrets = talos_machine_secrets.this.machine_secrets
}
data "talos_client_configuration" "this" {
cluster_name = var.cluster_name
client_configuration = talos_machine_secrets.this.client_configuration
endpoints = [for k, v in var.node_data.controlplanes : k]
}
resource "talos_machine_configuration_apply" "controlplane" {
depends_on = [proxmox_vm_qemu.k8s_control_plane]
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration
for_each = var.node_data.controlplanes
node = each.key
config_patches = [
templatefile("${path.module}/templates/machine_disk_and_hostname.yaml.tftmpl", {
hostname = each.value.hostname == null ? format("%s-cp-%s", var.cluster_name, index(keys(var.node_data.controlplanes), each.key)) : each.value.hostname
install_disk = each.value.install_disk
}),
file("${path.module}/files/cp-scheduling.yaml"),
]
}
resource "talos_machine_configuration_apply" "worker" {
depends_on = [proxmox_vm_qemu.k8s_worker_1, proxmox_vm_qemu.k8s_worker_2]
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration
for_each = var.node_data.workers
node = each.key
config_patches = [
templatefile("${path.module}/templates/machine_disk_and_hostname.yaml.tftmpl", {
hostname = each.value.hostname == null ? format("%s-worker-%s", var.cluster_name, index(keys(var.node_data.workers), each.key)) : each.value.hostname
install_disk = each.value.install_disk
})
]
}
resource "talos_machine_bootstrap" "this" {
depends_on = [talos_machine_configuration_apply.controlplane]
client_configuration = talos_machine_secrets.this.client_configuration
node = [for k, v in var.node_data.controlplanes : k][0]
}
resource "talos_cluster_kubeconfig" "this" {
depends_on = [talos_machine_bootstrap.this]
client_configuration = talos_machine_secrets.this.client_configuration
node = [for k, v in var.node_data.controlplanes : k][0]
}

View File

@@ -0,0 +1,5 @@
machine:
install:
disk: ${install_disk}
network:
hostname: ${hostname}

56
variables.tf Normal file
View File

@@ -0,0 +1,56 @@
variable "proxmox_api_url" {
type = string
}
variable "proxmox_api_token_id" {
type = string
sensitive = true
}
variable "proxmox_api_token_secret" {
type = string
sensitive = true
}
variable "proxmox_target_node" {
type = string
}
variable "cluster_name" {
description = "A name to provide for the Talos cluster"
type = string
}
variable "cluster_endpoint" {
description = "The endpoint for the Talos cluster"
type = string
}
variable "node_data" {
description = "A map of node data"
type = object({
controlplanes = map(object({
install_disk = string
hostname = optional(string)
}))
workers = map(object({
install_disk = string
hostname = optional(string)
}))
})
default = {
controlplanes = {
"192.168.1.150" = {
install_disk = "/dev/vda"
},
}
workers = {
"192.168.1.151" = {
install_disk = "/dev/vda"
},
"192.168.1.152" = {
install_disk = "/dev/vda"
}
}
}
}

136
virtual_machines.tf Normal file
View File

@@ -0,0 +1,136 @@
resource "proxmox_vm_qemu" "k8s_control_plane" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-control-plane"
desc = "Control Node"
target_node = var.proxmox_target_node
agent = 1
vm_state = "running"
cores = 2
memory = 4096
boot = "order=virtio0;ide2"
vga {
type = "std"
}
disk {
slot = "ide0"
type = "cloudinit"
storage = "local-lvm"
}
disk {
slot = "ide2"
type = "cdrom"
iso = "local:iso/${local.talos_linux_iso_image_filename}"
}
disk {
slot = "virtio0"
type = "disk"
storage = "local-lvm"
size = "10240M"
discard = true
}
network {
model = "virtio"
bridge = "vmbr0"
}
# Cloud init setup
os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_control_plane_ip_address}/24,gw=192.168.1.1"
}
resource "proxmox_vm_qemu" "k8s_worker_1" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-worker-1"
desc = "Worker Node 1"
target_node = var.proxmox_target_node
agent = 1
vm_state = "running"
cores = 2
memory = 8192
boot = "order=virtio0;ide2"
vga {
type = "std"
}
disk {
slot = "ide0"
type = "cloudinit"
storage = "local-lvm"
}
disk {
slot = "ide2"
type = "cdrom"
iso = "local:iso/${local.talos_linux_iso_image_filename}"
}
disk {
slot = "virtio0"
type = "disk"
storage = "local-lvm"
size = "10240M"
discard = true
}
network {
model = "virtio"
bridge = "vmbr0"
}
# Cloud init setup
os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_worker_1_ip_address}/24,gw=192.168.1.1"
}
resource "proxmox_vm_qemu" "k8s_worker_2" {
depends_on = [proxmox_storage_iso.talos_linux_iso_image]
name = "k8s-worker-2"
desc = "Worker Node 2"
target_node = var.proxmox_target_node
agent = 1
vm_state = "running"
cores = 2
memory = 8192
boot = "order=virtio0;ide2"
vga {
type = "std"
}
disk {
slot = "ide0"
type = "cloudinit"
storage = "local-lvm"
}
disk {
slot = "ide2"
type = "cdrom"
iso = "local:iso/${local.talos_linux_iso_image_filename}"
}
disk {
slot = "virtio0"
type = "disk"
storage = "local-lvm"
size = "10240M"
discard = true
}
network {
model = "virtio"
bridge = "vmbr0"
}
# Cloud init setup
os_type = "cloud-init"
ipconfig0 = "ip=${local.k8s_worker_2_ip_address}/24,gw=192.168.1.1"
}