add terraform project

This commit is contained in:
2025-12-25 20:25:54 +01:00
commit 81d66e91cd
15 changed files with 1166 additions and 0 deletions

33
terraform/.gitignore vendored Normal file
View File

@@ -0,0 +1,33 @@
# Terraform files
*.tfstate
*.tfstate.*
*.tfvars
!terraform.tfvars.example
.terraform/
.terraform.lock.hcl
crash.log
crash.*.log
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Talos files
*.iso
talosconfig
kubeconfig
# Sensitive outputs
talos-config.yaml
kube-config.yaml
# IDE files
.idea/
.vscode/
*.swp
*.swo
*~
# OS files
.DS_Store
Thumbs.db

453
terraform/README.md Normal file
View File

@@ -0,0 +1,453 @@
# Talos Cluster on Proxmox - Terraform Configuration
This Terraform project creates and provisions a Talos Kubernetes cluster on Proxmox VE with integrated Proxmox Cloud Controller Manager (CCM) and Container Storage Interface (CSI) driver.
## Features
- 🚀 **Automated VM provisioning** on Proxmox VE
- ☁️ **Proxmox Cloud Controller Manager** - Native Proxmox integration for Kubernetes
- 💾 **Proxmox CSI Driver** - Dynamic volume provisioning using Proxmox storage
- 🔄 **High Availability** - Multi-node control plane with optional VIP
- 🌐 **Flexible networking** - DHCP or static IP configuration
- 📦 **Full stack deployment** - From VMs to running Kubernetes cluster
## Prerequisites
1. **Proxmox VE** server with API access
2. **Terraform** >= 1.0
3. **SSH access** to Proxmox node
4. **Network requirements**:
- Available IP addresses for VMs (DHCP or static)
- Network connectivity between VMs
- Access to download Talos ISO (for initial setup)
## Quick Start
### 1. Create terraform.tfvars
Create a `terraform.tfvars` file with your Proxmox and cluster configuration:
```hcl
# Proxmox Connection
proxmox_endpoint = "https://proxmox.example.com:8006"
proxmox_username = "root@pam"
proxmox_password = "your-password"
proxmox_node = "pve"
# Proxmox API Tokens (required for CCM/CSI)
proxmox_ccm_token_secret = "your-ccm-token-secret"
proxmox_csi_token_secret = "your-csi-token-secret"
# Cluster Configuration
cluster_name = "talos-cluster"
cluster_endpoint = "https://10.0.0.100:6443"
# VM Configuration
controlplane_count = 3
worker_count = 2
# Network (DHCP - IPs will be auto-assigned)
# For static IPs, see advanced configuration below
```
### 2. Initialize and Apply
```bash
terraform init
terraform plan
terraform apply
```
### 3. Get Cluster Access
```bash
# Get talosconfig
terraform output -raw talosconfig > ~/.talos/config
# Get kubeconfig
terraform output -raw kubeconfig > ~/.kube/config
# Verify cluster
talosctl version --nodes <controlplane-ip>
kubectl get nodes
```
### 4. Verify Proxmox Integration
```bash
# Check CCM is running
kubectl get pods -n kube-system | grep proxmox-cloud-controller
# Check CSI is running
kubectl get pods -n csi-proxmox
# View available storage classes
kubectl get storageclass
# Create a test PVC
kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: proxmox-data
EOF
```
## Configuration Options
### Basic Configuration
| Variable | Description | Default |
|----------|-------------|---------|
| `proxmox_endpoint` | Proxmox API endpoint | - |
| `proxmox_username` | Proxmox username | `root@pam` |
| `proxmox_password` | Proxmox password | - |
| `proxmox_insecure` | Allow insecure Proxmox API connections | `true` |
| `proxmox_ssh_user` | SSH user for Proxmox node | `root` |
| `proxmox_node` | Proxmox node name | - |
| `proxmox_storage` | Storage location for VM disks | `local` |
| `proxmox_network_bridge` | Network bridge for VMs | `vmbr40` |
| `proxmox_ccm_token_secret` | Proxmox API token for CCM (sensitive) | - |
| `proxmox_csi_token_secret` | Proxmox API token for CSI (sensitive) | - |
| `cluster_name` | Talos cluster name | - |
| `cluster_endpoint` | Cluster API endpoint | - |
| `vm_id_prefix` | Starting VM ID prefix | `800` |
| `talos_version` | Talos version to use | `v1.9.1` |
| `talos_iso_url` | Custom Talos ISO URL | `""` (uses default) |
### Network Configuration
| Variable | Description | Default |
|----------|-------------|---------|
| `controlplane_ips` | Static IPs for control plane nodes | `[]` (DHCP) |
| `worker_ips` | Static IPs for worker nodes | `[]` (DHCP) |
| `gateway` | Default gateway (required for static IPs) | `""` |
| `netmask` | Network mask in CIDR notation | `24` |
| `nameservers` | DNS nameservers | `["1.1.1.1", "8.8.8.8"]` |
| `cluster_vip` | Virtual IP for HA control plane | `""` (disabled) |
### Proxmox Integration
| Variable | Description | Default |
|----------|-------------|---------|
| `proxmox_region` | Region identifier for CCM | `proxmox` |
### VM Resources
| Variable | Description | Default |
|----------|-------------|---------|
| `controlplane_count` | Number of control plane nodes | `3` |
| `worker_count` | Number of worker nodes | `2` |
| `controlplane_cpu` | CPU cores per control plane | `2` |
| `controlplane_memory` | Memory (MB) per control plane | `4096` |
| `controlplane_disk_size` | Disk size per control plane | `20` |
| `worker_cpu` | CPU cores per worker | `4` |
| `worker_memory` | Memory (MB) per worker | `8192` |
| `worker_disk_size` | Disk size per worker | `10` |
### Static IP Configuration
For production deployments, use static IPs. **All three parameters (IPs, gateway, and netmask) must be configured together:**
```hcl
# Control plane IPs
controlplane_ips = [
"10.0.0.101",
"10.0.0.102",
"10.0.0.103"
]
# Worker IPs
worker_ips = [
"10.0.0.104",
"10.0.0.105"
]
# Network settings (required for static IPs)
gateway = "10.0.0.1" # Default gateway
netmask = 24 # CIDR notation (e.g., 24 = 255.255.255.0)
nameservers = ["1.1.1.1", "8.8.8.8"] # DNS servers
# Use VIP for control plane endpoint
cluster_vip = "10.0.0.100"
cluster_endpoint = "https://10.0.0.100:6443"
```
**Important**: When using static IPs, you must configure:
- `controlplane_ips` and/or `worker_ips` - List of IP addresses
- `gateway` - Network gateway IP address
- `netmask` - Network mask in CIDR notation (default: 24)
- `nameservers` - DNS servers (default: ["1.1.1.1", "8.8.8.8"])
If any of these are missing, the nodes will use DHCP instead.
### High Availability Setup
For HA control plane, configure a virtual IP:
```hcl
cluster_vip = "10.0.0.100"
cluster_endpoint = "https://10.0.0.100:6443"
controlplane_count = 3 # Minimum 3 for HA
```
### Custom Talos Version
```hcl
talos_version = "v1.9.1"
# Or use custom ISO URL
talos_iso_url = "https://custom-mirror.com/talos.iso"
```
## Advanced Configuration
### Custom Storage Backend
```hcl
proxmox_storage = "ceph-storage" # or "nfs-backup", etc.
```
### Custom Network Bridge
```hcl
proxmox_network_bridge = "vmbr1"
```
### Custom VM ID Range
```hcl
vm_id_prefix = 1000 # VMs will be 1000, 1001, 1002, etc.
```
### Proxmox API Token Setup
The CCM and CSI drivers require Proxmox API tokens for authentication. Generate tokens in Proxmox:
1. Navigate to Datacenter → Permissions → API Tokens
2. Create a token for CCM with appropriate permissions
3. Create a token for CSI with storage permissions
4. Add the token secrets to your `terraform.tfvars`:
```hcl
proxmox_ccm_token_secret = "your-ccm-api-token-secret"
proxmox_csi_token_secret = "your-csi-api-token-secret"
```
## Architecture
The project creates:
1. **Control Plane VMs** (default: 3)
- Run Kubernetes control plane components
- Can schedule workload pods if configured
- Participate in etcd cluster
- Run Proxmox CCM for cloud provider integration
2. **Worker VMs** (default: 2)
- Run application workloads
- Join the cluster automatically
- Support CSI for dynamic volume provisioning
3. **Talos Configuration**
- Machine secrets and certificates
- Node-specific configurations
- Client configurations (talosconfig, kubeconfig)
- Cloud provider configuration for CCM integration
4. **Proxmox Integration**
- **CCM (Cloud Controller Manager)**: Provides node lifecycle management and metadata
- **CSI (Container Storage Interface)**: Enables dynamic PV provisioning from Proxmox storage
## Workflow
1. **VM Creation**: VMs are created in Proxmox with Talos ISO attached
2. **Boot to Maintenance**: VMs boot into Talos maintenance mode
3. **Configuration Apply**: Terraform applies Talos machine configurations with cloud-provider settings
4. **Cluster Bootstrap**: First control plane node bootstraps the cluster
5. **Node Join**: Remaining nodes join automatically
6. **Kubeconfig Generation**: Cluster credentials are generated
7. **CCM Installation**: Proxmox Cloud Controller Manager is deployed (if enabled)
8. **CSI Installation**: Proxmox CSI driver and storage class are deployed (if enabled)
## Proxmox Integration Details
### Cloud Controller Manager (CCM)
The CCM provides:
- **Node Management**: Automatic node registration with Proxmox metadata
- **Node Labels**: Topology labels (region, zone, instance-type)
- **Node Lifecycle**: Proper handling of node additions and removals
Nodes are automatically labeled with:
```yaml
node.kubernetes.io/instance-type: proxmox
topology.kubernetes.io/region: <proxmox_region>
topology.kubernetes.io/zone: <proxmox_node>
```
### Container Storage Interface (CSI)
The CSI driver provides:
- **Dynamic Provisioning**: Automatically create volumes in Proxmox storage
- **Volume Expansion**: Support for expanding PVCs
- **Multiple Storage Backends**: Use any Proxmox storage (LVM, ZFS, Ceph, NFS, etc.)
Example usage:
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: proxmox-data
```
## Accessing the Cluster
### Talos CLI
```bash
# Export talosconfig
terraform output -raw talosconfig > ~/.talos/config
# Get nodes
talosctl get members
# Get service status
talosctl services
# Access logs
talosctl logs kubelet
```
### Kubernetes CLI
```bash
# Export kubeconfig
terraform output -raw kubeconfig > ~/.kube/config
# Get cluster info
kubectl cluster-info
kubectl get nodes -o wide
kubectl get pods -A
# Check Proxmox integrations
kubectl get pods -n kube-system | grep proxmox
kubectl get pods -n csi-proxmox
kubectl get storageclass
```
## Maintenance
### Upgrading Talos
```bash
# Update talos_version variable
talos_version = "v1.9.2"
# Apply changes
terraform apply
# Or upgrade manually
talosctl upgrade --image ghcr.io/siderolabs/installer:v1.9.2
```
### Scaling Workers
```bash
# Update worker_count
worker_count = 5
# Apply changes
terraform apply
```
### Removing the Cluster
```bash
terraform destroy
```
## Troubleshooting
### VMs not getting IP addresses
**For DHCP:**
- Check Proxmox network bridge configuration
- Verify DHCP server is running on the network
- Ensure VMs are connected to the correct network bridge
**For Static IPs:**
- Verify all required parameters are set: `controlplane_ips`/`worker_ips`, `gateway`, and `netmask`
- Check that IPs are in the correct subnet
- Ensure gateway IP is correct and reachable
- Verify no IP conflicts with existing devices
### Cannot connect to nodes
- Verify firewall rules allow port 50000 (Talos API)
- Check VM networking in Proxmox
- Ensure nodes are in maintenance mode: `talosctl version --nodes <ip>`
### Bootstrap fails
- Check control plane IPs are correct
- Verify cluster_endpoint is accessible
- Review logs: `talosctl logs etcd`
### ISO upload fails
- Verify SSH access to Proxmox node
- Check `/var/lib/vz/template/iso/` permissions
- Manually upload ISO if needed
### CCM/CSI not working
- Verify Proxmox API token secrets are correct
- Check that tokens have appropriate permissions in Proxmox
- Review template logs for CCM/CSI configuration
## Project Structure
```
.
├── main.tf # Main VM, Talos, CCM/CSI resources
├── variables.tf # Input variables
├── outputs.tf # Output values
├── versions.tf # Provider versions (Talos, Proxmox, Helm, K8s)
├── locals.tf # Local values
├── terraform.tfvars # Your configuration (create this)
├── templates/
│ ├── install-disk-and-hostname.yaml.tmpl
│ ├── static-ip.yaml.tmpl # Static IP configuration
│ ├── node-labels.yaml.tmpl
│ └── vip-config.yaml.tmpl
└── files/
├── cp-scheduling.yaml
└── cloud-provider.yaml
```
## References
- [Talos Documentation](https://www.talos.dev/)
- [Talos Terraform Provider](https://registry.terraform.io/providers/siderolabs/talos)
- [Proxmox Terraform Provider](https://registry.terraform.io/providers/bpg/proxmox)
- [Proxmox CCM](https://github.com/sergelogvinov/proxmox-cloud-controller-manager)
- [Proxmox CSI](https://github.com/sergelogvinov/proxmox-csi-plugin)
- [Siderolabs Contrib Examples](https://github.com/siderolabs/contrib/tree/main/examples/terraform)
## License
Based on examples from [siderolabs/contrib](https://github.com/siderolabs/contrib)

View File

@@ -0,0 +1,7 @@
machine:
kubelet:
extraArgs:
cloud-provider: external
nodeIP:
validSubnets:
- 0.0.0.0/0

View File

@@ -0,0 +1,2 @@
cluster:
allowSchedulingOnControlPlanes: false

32
terraform/locals.tf Normal file
View File

@@ -0,0 +1,32 @@
locals {
# Get current DHCP IPs from QEMU guest agent (from VM resource computed attribute)
# Find first non-loopback, non-empty IP address from any interface
controlplane_dhcp_ips = [
for vm in proxmox_virtual_environment_vm.controlplane :
flatten([
for iface_ips in vm.ipv4_addresses :
[for ip in iface_ips : ip if ip != "127.0.0.1"]
])[0]
]
worker_dhcp_ips = [
for vm in proxmox_virtual_environment_vm.worker :
flatten([
for iface_ips in vm.ipv4_addresses :
[for ip in iface_ips : ip if ip != "127.0.0.1"]
])[0]
]
# Use DHCP IPs for initial connection and configuration apply
# Note: If static IPs are configured via var.controlplane_ips/var.worker_ips,
# Talos will reconfigure the network after initial apply, but we connect via DHCP
controlplane_ips = local.controlplane_dhcp_ips
worker_ips = local.worker_dhcp_ips
# For client config and bootstrap, use static IPs if configured, otherwise DHCP
controlplane_endpoints = length(var.controlplane_ips) > 0 ? var.controlplane_ips : local.controlplane_dhcp_ips
# Extract Proxmox API URL without protocol for node labeling
proxmox_host = replace(var.proxmox_endpoint, "/^https?:\\/\\//", "")
}

260
terraform/main.tf Normal file
View File

@@ -0,0 +1,260 @@
# Download and upload Talos ISO to Proxmox
# resource "null_resource" "talos_iso" {
# provisioner "local-exec" {
# command = <<-EOT
# ISO_URL="${var.talos_iso_url != "" ? var.talos_iso_url : "https://github.com/siderolabs/talos/releases/download/${var.talos_version}/metal-amd64.iso"}"
# ISO_FILE="talos-${var.talos_version}-metal-amd64.iso"
#
# if [ ! -f "$ISO_FILE" ]; then
# echo "Downloading Talos ISO..."
# curl -L "$ISO_URL" -o "$ISO_FILE"
# fi
#
# echo "Uploading ISO to Proxmox..."
# scp "$ISO_FILE" ${var.proxmox_ssh_user}@${split("://", var.proxmox_endpoint)[1]}:/var/lib/vz/template/iso/
# EOT
# }
# }
# Create control plane VMs
resource "proxmox_virtual_environment_vm" "controlplane" {
count = var.controlplane_count
name = "${var.cluster_name}-cp-${count.index + 1}"
node_name = var.proxmox_node
vm_id = var.vm_id_prefix + count.index
description = "Talos Control Plane ${count.index + 1}"
started = true
on_boot = true
cpu {
cores = var.controlplane_cpu
type = "host"
}
memory {
dedicated = var.controlplane_memory
}
disk {
datastore_id = var.proxmox_storage
interface = "scsi0"
size = var.controlplane_disk_size
file_format = "raw"
discard = "on"
ssd = true
}
network_device {
bridge = var.proxmox_network_bridge
model = "virtio"
}
cdrom {
# enabled = true
file_id = "local:iso/talos-1.11.5-nocloud-amd64.iso"
interface = "ide2"
}
operating_system {
type = "l26"
}
agent {
enabled = true
}
boot_order = ["scsi0", "ide2"]
# depends_on = [null_resource.talos_iso]
}
# Create worker VMs
resource "proxmox_virtual_environment_vm" "worker" {
count = var.worker_count
name = "${var.cluster_name}-worker-${count.index + 1}"
node_name = var.proxmox_node
vm_id = var.vm_id_prefix + var.controlplane_count + count.index
description = "Talos Worker ${count.index + 1}"
started = true
on_boot = true
cpu {
cores = var.worker_cpu
type = "host"
}
memory {
dedicated = var.worker_memory
}
disk {
datastore_id = var.proxmox_storage
interface = "scsi0"
size = var.worker_disk_size
file_format = "raw"
discard = "on"
ssd = true
}
network_device {
bridge = var.proxmox_network_bridge
model = "virtio"
}
cdrom {
# enabled = true
file_id = "local:iso/talos-1.11.5-nocloud-amd64.iso"
interface = "ide2"
}
operating_system {
type = "l26"
}
agent {
enabled = true
}
boot_order = ["scsi0", "ide2"]
# depends_on = [null_resource.talos_iso]
}
# Wait for VMs to boot into maintenance mode and QEMU agent to start
resource "time_sleep" "wait_for_vms" {
depends_on = [
proxmox_virtual_environment_vm.controlplane,
proxmox_virtual_environment_vm.worker
]
create_duration = "60s"
}
# Generate Talos machine secrets
resource "talos_machine_secrets" "this" {
depends_on = [time_sleep.wait_for_vms]
}
# Generate control plane configuration
data "talos_machine_configuration" "controlplane" {
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
machine_type = "controlplane"
machine_secrets = talos_machine_secrets.this.machine_secrets
}
# Generate worker configuration
data "talos_machine_configuration" "worker" {
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
machine_type = "worker"
machine_secrets = talos_machine_secrets.this.machine_secrets
}
# Generate Talos client configuration
data "talos_client_configuration" "this" {
cluster_name = var.cluster_name
client_configuration = talos_machine_secrets.this.client_configuration
endpoints = local.controlplane_endpoints
}
# Apply configuration to control plane nodes via DHCP IPs
resource "talos_machine_configuration_apply" "controlplane" {
count = var.controlplane_count
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration
node = local.controlplane_dhcp_ips[count.index]
config_patches = concat(
[
templatefile("${path.module}/templates/install-disk-and-hostname.yaml.tmpl", {
hostname = "${var.cluster_name}-cp-${count.index + 1}"
}),
templatefile("${path.module}/templates/proxmox-ccm.yaml.tmpl", {
proxmox_url = var.proxmox_endpoint
proxmox_region = var.proxmox_region
ccm_token_secret = var.proxmox_ccm_token_secret
csi_token_secret = var.proxmox_csi_token_secret
}),
file("${path.module}/files/cp-scheduling.yaml")
],
length(var.controlplane_ips) > 0 && var.gateway != "" ? [
templatefile("${path.module}/templates/static-ip.yaml.tmpl", {
ip_address = var.controlplane_ips[count.index]
netmask = var.netmask
gateway = var.gateway
nameservers = var.nameservers
})
] : [],
var.cluster_vip != "" ? [
templatefile("${path.module}/templates/vip-config.yaml.tmpl", {
vip = var.cluster_vip
interface = "eth0"
})
] : []
)
depends_on = [time_sleep.wait_for_vms]
}
# Apply configuration to worker nodes via DHCP IPs
resource "talos_machine_configuration_apply" "worker" {
count = var.worker_count
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration
node = local.worker_dhcp_ips[count.index]
config_patches = concat(
[
templatefile("${path.module}/templates/install-disk-and-hostname.yaml.tmpl", {
hostname = "${var.cluster_name}-worker-${count.index + 1}"
}),
templatefile("${path.module}/templates/proxmox-ccm.yaml.tmpl", {
proxmox_url = var.proxmox_endpoint
proxmox_region = var.proxmox_region
ccm_token_secret = var.proxmox_ccm_token_secret
csi_token_secret = var.proxmox_csi_token_secret
})
],
length(var.worker_ips) > 0 && var.gateway != "" ? [
templatefile("${path.module}/templates/static-ip.yaml.tmpl", {
ip_address = var.worker_ips[count.index]
netmask = var.netmask
gateway = var.gateway
nameservers = var.nameservers
})
] : []
)
depends_on = [time_sleep.wait_for_vms]
}
# Bootstrap the cluster (use static IP if configured, otherwise DHCP)
resource "talos_machine_bootstrap" "this" {
depends_on = [talos_machine_configuration_apply.controlplane]
client_configuration = talos_machine_secrets.this.client_configuration
node = local.controlplane_endpoints[0]
}
# Retrieve kubeconfig (use static IP if configured, otherwise DHCP)
resource "talos_cluster_kubeconfig" "this" {
depends_on = [talos_machine_bootstrap.this]
client_configuration = talos_machine_secrets.this.client_configuration
node = local.controlplane_endpoints[0]
}
# Wait for Kubernetes API to be ready
resource "time_sleep" "wait_for_k8s_api" {
depends_on = [talos_cluster_kubeconfig.this]
create_duration = "60s"
}

37
terraform/outputs.tf Normal file
View File

@@ -0,0 +1,37 @@
output "talosconfig" {
description = "Talos configuration for cluster access"
value = data.talos_client_configuration.this.talos_config
sensitive = true
}
output "kubeconfig" {
description = "Kubernetes configuration for cluster access"
value = talos_cluster_kubeconfig.this.kubeconfig_raw
sensitive = true
}
output "controlplane_ips" {
description = "Control plane node IPs"
value = local.controlplane_ips
}
output "worker_ips" {
description = "Worker node IPs"
value = local.worker_ips
}
output "controlplane_vm_ids" {
description = "Proxmox VM IDs for control plane nodes"
value = proxmox_virtual_environment_vm.controlplane[*].vm_id
}
output "worker_vm_ids" {
description = "Proxmox VM IDs for worker nodes"
value = proxmox_virtual_environment_vm.worker[*].vm_id
}
output "cluster_endpoint" {
description = "Kubernetes cluster endpoint"
value = var.cluster_endpoint
}

8
terraform/state.tf Normal file
View File

@@ -0,0 +1,8 @@
terraform {
backend "s3" {
bucket = "terraform-states-fzeoh1h"
key = "talos-cluster-proxmox/terraform.tfstate"
region = "eu-west-3" # required but unused
}
}

View File

@@ -0,0 +1,5 @@
machine:
network:
hostname: ${hostname}
install:
image: "factory.talos.dev/nocloud-installer/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:v1.11.0"

View File

@@ -0,0 +1,55 @@
cluster:
inlineManifests:
- name: proxmox-cloud-controller-manager
contents: |-
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: proxmox-cloud-controller-manager
namespace: kube-system
stringData:
config.yaml: |
clusters:
- url: ${proxmox_url}/api2/json
insecure: true
token_id: "kubernetes@pve!ccm"
token_secret: "${ccm_token_secret}"
region: ${proxmox_region}
- name: proxmox-csi-plugin
contents: |-
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: proxmox-csi-plugin
namespace: csi-proxmox
stringData:
config.yaml: |
clusters:
- url: ${proxmox_url}/api2/json
insecure: true
token_id: "kubernetes-csi@pve!csi"
token_secret: "${csi_token_secret}"
region: ${proxmox_region}
- name: proxmox-hdd-sc
contents: |-
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
name: proxmox-hdd-lvm
parameters:
csi.storage.k8s.io/fstype: xfs
storage: hdd
provisioner: csi.proxmox.sinextra.dev
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
externalCloudProvider:
enabled: true
manifests:
- https://raw.githubusercontent.com/sergelogvinov/proxmox-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
- https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/main/docs/deploy/proxmox-csi-plugin.yml

View File

@@ -0,0 +1,14 @@
machine:
network:
interfaces:
- interface: eth0
addresses:
- ${ip_address}/${netmask}
routes:
- network: 0.0.0.0/0
gateway: ${gateway}
dhcp: false
nameservers:
%{ for ns in nameservers ~}
- ${ns}
%{ endfor ~}

View File

@@ -0,0 +1,6 @@
machine:
network:
interfaces:
- interface: ${interface}
vip:
ip: ${vip}

View File

@@ -0,0 +1,47 @@
# Example configuration for Talos cluster on Proxmox
# Copy this file to terraform.tfvars and customize
# Proxmox Connection
proxmox_endpoint = "https://proxmox.example.com:8006"
proxmox_username = "root@pam"
proxmox_password = "your-password-here"
proxmox_node = "pve"
# Proxmox API Tokens for CCM/CSI (required)
proxmox_ccm_token_secret = "your-ccm-token-secret-here"
proxmox_csi_token_secret = "your-csi-token-secret-here"
# Cluster Configuration
cluster_name = "talos-cluster"
cluster_endpoint = "https://10.0.0.100:6443"
# Optional: Use a VIP for HA control plane
# cluster_vip = "10.0.0.100"
# VM Resource Configuration
controlplane_count = 3
worker_count = 2
controlplane_cpu = 2
controlplane_memory = 4096
worker_cpu = 4
worker_memory = 8192
# Optional: Static IP Configuration
# Uncomment and configure for static IPs instead of DHCP
# controlplane_ips = ["10.0.0.101", "10.0.0.102", "10.0.0.103"]
# worker_ips = ["10.0.0.104", "10.0.0.105"]
# gateway = "10.0.0.1"
# netmask = 24
# nameservers = ["1.1.1.1", "8.8.8.8"]
# Optional: Proxmox Configuration
# proxmox_storage = "local-lvm"
# proxmox_network_bridge = "vmbr0"
# vm_id_prefix = 800
# Optional: Talos Configuration
# talos_version = "v1.9.1"
# Optional: Proxmox CCM/CSI Configuration
# proxmox_region = "proxmox"

181
terraform/variables.tf Normal file
View File

@@ -0,0 +1,181 @@
# Proxmox variables
variable "proxmox_endpoint" {
description = "The Proxmox API endpoint (e.g., https://proxmox.example.com:8006)"
type = string
}
variable "proxmox_username" {
description = "The Proxmox username (e.g., root@pam)"
type = string
default = "root@pam"
}
variable "proxmox_password" {
description = "The Proxmox password"
type = string
sensitive = true
}
variable "proxmox_insecure" {
description = "Allow insecure connections to Proxmox API"
type = bool
default = true
}
variable "proxmox_ssh_user" {
description = "SSH user for Proxmox node access"
type = string
default = "root"
}
variable "proxmox_node" {
description = "The Proxmox node name where VMs will be created"
type = string
}
variable "proxmox_storage" {
description = "The storage location for VM disks"
type = string
default = "local"
}
variable "proxmox_network_bridge" {
description = "The network bridge to use for VMs"
type = string
default = "vmbr40"
}
# Talos ISO variables
variable "talos_version" {
description = "The Talos version to use"
type = string
default = "v1.9.1"
}
variable "talos_iso_url" {
description = "URL to download Talos ISO (leave empty to use default)"
type = string
default = ""
}
# Cluster variables
variable "cluster_name" {
description = "A name to provide for the Talos cluster"
type = string
}
variable "cluster_endpoint" {
description = "The endpoint for the Talos cluster (e.g., https://10.0.0.100:6443)"
type = string
}
variable "cluster_vip" {
description = "Virtual IP for the cluster control plane (optional, for HA setup)"
type = string
default = ""
}
# VM configuration
variable "vm_id_prefix" {
description = "Starting VM ID prefix (e.g., 800 will create VMs 800, 801, 802...)"
type = number
default = 800
}
variable "controlplane_count" {
description = "Number of control plane nodes"
type = number
default = 3
}
variable "worker_count" {
description = "Number of worker nodes"
type = number
default = 2
}
variable "controlplane_cpu" {
description = "Number of CPU cores for control plane nodes"
type = number
default = 2
}
variable "controlplane_memory" {
description = "Memory in MB for control plane nodes"
type = number
default = 4096
}
variable "controlplane_disk_size" {
description = "Disk size for control plane nodes (e.g., 20G)"
type = number
default = 20
}
variable "worker_cpu" {
description = "Number of CPU cores for worker nodes"
type = number
default = 4
}
variable "worker_memory" {
description = "Memory in MB for worker nodes"
type = number
default = 8192
}
variable "worker_disk_size" {
description = "Disk size for worker nodes (e.g., 50G)"
type = number
default = 10
}
# Network configuration
variable "controlplane_ips" {
description = "List of static IPs for control plane nodes (leave empty for DHCP)"
type = list(string)
default = []
}
variable "worker_ips" {
description = "List of static IPs for worker nodes (leave empty for DHCP)"
type = list(string)
default = []
}
variable "proxmox_region" {
description = "Proxmox region identifier for CCM"
type = string
default = "proxmox"
}
variable "gateway" {
description = "Default gateway for static IP configuration"
type = string
default = ""
}
variable "netmask" {
description = "Network mask (CIDR notation, e.g., 24 for 255.255.255.0)"
type = number
default = 24
}
variable "nameservers" {
description = "List of DNS nameservers for static IP configuration"
type = list(string)
default = ["1.1.1.1", "8.8.8.8"]
}
# Proxmox CCM/CSI token variables
variable "proxmox_ccm_token_secret" {
description = "Proxmox API token secret for Cloud Controller Manager"
type = string
sensitive = true
}
variable "proxmox_csi_token_secret" {
description = "Proxmox API token secret for CSI driver"
type = string
sensitive = true
}

26
terraform/versions.tf Normal file
View File

@@ -0,0 +1,26 @@
terraform {
required_providers {
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
proxmox = {
source = "bpg/proxmox"
version = "~> 0.69"
}
}
}
provider "talos" {}
provider "proxmox" {
endpoint = var.proxmox_endpoint
username = var.proxmox_username
password = var.proxmox_password
insecure = var.proxmox_insecure
ssh {
agent = true
username = var.proxmox_ssh_user
}
}