terraform: extend kubernetes a little bit

This commit is contained in:
Jan Novak
2026-01-02 23:17:43 +01:00
parent bdf82c7e49
commit d3697c8132
14 changed files with 610 additions and 10 deletions

View File

@@ -12,8 +12,35 @@ provider "libvirt" {
uri = "qemu+ssh://novakj@192.168.0.7/system" uri = "qemu+ssh://novakj@192.168.0.7/system"
} }
provider "libvirt" {
alias = "kvm-homer"
uri = "qemu+ssh://novakj@192.168.0.7/system"
}
resource "libvirt_volume" "ubuntu_base" { provider "libvirt" {
alias = "kvm-beelink"
uri = "qemu+ssh://novakj@192.168.0.6/system"
}
resource "libvirt_volume" "ubuntu_base_homer" {
provider = libvirt.kvm-homer
name = "ubuntu-24.04-base.qcow2"
pool = "default"
create = {
content = {
url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
}
}
target = {
format = {
type = "qcow2"
}
}
}
resource "libvirt_volume" "ubuntu_base_beelink" {
provider = libvirt.kvm-beelink
name = "ubuntu-24.04-base.qcow2" name = "ubuntu-24.04-base.qcow2"
pool = "default" pool = "default"
create = { create = {

View File

@@ -0,0 +1,45 @@
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager

View File

@@ -0,0 +1,79 @@
```bash
# 1. Add repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# 2. Install CRDs separately (production best practice - avoids Helm CRD lifecycle issues)
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
# 3. Create values file (production baseline)
cat <<EOF > kube-prometheus-values.yaml
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager
EOF
# 4. Install
helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
-n monitoring --create-namespace \
--set prometheusOperator.createCustomResource=false \
-f kube-prometheus-values.yaml
# 5. Verify
kubectl -n monitoring get pods
kubectl -n monitoring get prometheuses
kubectl -n monitoring get servicemonitors --all-namespaces
```

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,64 @@
# democratic-csi
```bash
helm repo add democratic-csi https://democratic-csi.github.io/charts/
helm install zfs-nvmeof democratic-csi/democratic-csi -f values.yaml
cat <<'EOF' > values.yaml
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4
EOF
cat <<'EOF' > pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi
EOF
```

View File

@@ -0,0 +1,40 @@
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4

View File

@@ -10,10 +10,11 @@ locals {
resource "libvirt_volume" "ubuntu_disk" { resource "libvirt_volume" "ubuntu_disk" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}.qcow2" name = "${local.master_vm_name}.qcow2"
pool = "default" pool = "default"
backing_store = { backing_store = {
path = libvirt_volume.ubuntu_base.path path = libvirt_volume.ubuntu_base_homer.path
format = { format = {
type = "qcow2" type = "qcow2"
} }
@@ -123,6 +124,10 @@ locals {
--- ---
apiVersion: kubeadm.k8s.io/v1beta3 apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration kind: ClusterConfiguration
extraArgs:
oidc-issuer-url: "https://idm.home.hrajfrisbee.cz/oauth2/openid/k8s"
oidc-client-id: "k8s"
oidc-signing-algs: "ES256"
networking: networking:
podSubnet: "10.244.0.0/16" podSubnet: "10.244.0.0/16"
--- ---
@@ -198,6 +203,7 @@ locals {
} }
resource "libvirt_cloudinit_disk" "commoninit" { resource "libvirt_cloudinit_disk" "commoninit" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}-cloudinit.iso" name = "${local.master_vm_name}-cloudinit.iso"
user_data = local.user_data user_data = local.user_data
meta_data = yamlencode({ meta_data = yamlencode({
@@ -209,6 +215,7 @@ resource "libvirt_cloudinit_disk" "commoninit" {
# Create a volume from the cloud-init ISO # Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit" { resource "libvirt_volume" "cloudinit" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}-cloudinit.iso" name = "${local.master_vm_name}-cloudinit.iso"
pool = "default" pool = "default"
create = { create = {
@@ -220,6 +227,7 @@ resource "libvirt_volume" "cloudinit" {
resource "libvirt_domain" "master" { resource "libvirt_domain" "master" {
provider = libvirt.kvm-homer
name = local.master_vm_name name = local.master_vm_name
memory = "2048" memory = "2048"
memory_unit = "MiB" memory_unit = "MiB"

View File

@@ -0,0 +1,314 @@
locals {
vm_node_02_name = "kube-node-33"
vm_node_02_ip_address = "192.168.0.33"
}
resource "libvirt_volume" "node_02_disk" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base_beelink.path
format = {
type = "qcow2"
}
}
target = {
format = {
type = "qcow2"
}
}
capacity = 21474836480
}
locals {
user_data_node_02 = <<-EOF
#cloud-config
hostname: ${local.vm_node_02_name}
users:
- name: ubuntu
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- ${file("~/.ssh/id_rsa.pub")}
chpasswd:
list: |
ubuntu:yourpassword
expire: false
ssh_pwauth: true
package_update: true
packages:
- qemu-guest-agent
- openssh-server
- apt-transport-https
- ca-certificates
- curl
- gnupg
- nvme-cli
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# Update existing containerd config to enable registry config_path
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
# Mirror configs for each upstream registry
- path: /etc/containerd/certs.d/docker.io/hosts.toml
content: |
server = "https://registry-1.docker.io"
[host."http://${local.zot_registry_ip}:5000/v2/docker.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml
content: |
server = "https://registry.k8s.io"
[host."http://${local.zot_registry_ip}:5000/v2/registry.k8s.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/ghcr.io/hosts.toml
content: |
server = "https://ghcr.io"
[host."http://${local.zot_registry_ip}:5000/v2/ghcr.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/quay.io/hosts.toml
content: |
server = "https://quay.io"
[host."http://${local.zot_registry_ip}:5000/v2/quay.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /root/kubeadm-config.yaml
content: |
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
podSubnet: "10.244.0.0/16"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
- path: /etc/profile.d/kubectl.sh
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
runcmd:
- systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh
# needed for nvme-tcp module
- apt install linux-modules-extra-$(uname -r)
- modprobe nvme-tcp
- echo "nvme-tcp" >> /etc/modules-load.d/nvme-tcp.conf
# relevant to kubernetes
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
# containerd
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update && apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
# join cluster
- ${local.join_command}
EOF
network_config_node_02 = <<-EOF
version: 2
ethernets:
eth0:
match:
driver: virtio_net
addresses:
- ${local.vm_node_02_ip_address}/${local.cidr}
routes:
- to: default
via: ${local.gateway}
nameservers:
addresses:
- ${local.dns}
EOF
}
resource "libvirt_cloudinit_disk" "commoninit_node_02" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}-cloudinit.iso"
user_data = local.user_data_node_02
meta_data = yamlencode({
instance-id = local.vm_node_02_name
local-hostname = local.vm_node_02_name
})
network_config = local.network_config_node_02
}
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit_node_02" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}-cloudinit.iso"
pool = "default"
create = {
content = {
url = libvirt_cloudinit_disk.commoninit_node_02.path
}
}
}
resource "libvirt_domain" "node_02" {
provider = libvirt.kvm-beelink
name = local.vm_node_02_name
memory = "8192"
memory_unit = "MiB"
vcpu = 4
type = "kvm"
autostart = true
running = true
os = {
type = "hvm"
type_arch = "x86_64"
type_machine = "q35"
}
devices = {
disks = [
{
driver = {
name = "qemu"
type = "qcow2"
}
source = {
file = {
file = libvirt_volume.node_02_disk.path
}
}
target = {
dev = "vda"
bus = "virtio"
}
},
{
device = "cdrom"
driver = {
name = "qemu"
type = "raw"
}
source = {
file = {
file = libvirt_volume.cloudinit_node_02.path
}
}
target = {
dev = "sda"
bus = "sata"
}
}
]
interfaces = [
{
type = "bridge"
model = {
type = "virtio"
}
source = {
bridge = {
bridge = local.bridge
}
}
}
]
serials = [
{
type = "pty"
}
]
xml = {
xslt = <<-XSLT
<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="devices">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<serial type="pty">
<target port="0"/>
</serial>
<console type="pty">
<target type="serial" port="0"/>
</console>
<graphics type="vnc" port="-1" autoport="yes" listen="0.0.0.0">
<listen type="address" address="0.0.0.0"/>
</graphics>
<channel type="unix">
<target type="virtio" name="org.qemu.guest_agent.0"/>
</channel>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
XSLT
}
}
}
output "node_02_ip" {
value = local.vm_node_02_ip_address
}

View File

@@ -5,10 +5,11 @@ locals {
resource "libvirt_volume" "node_01_disk" { resource "libvirt_volume" "node_01_disk" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}.qcow2" name = "${local.vm_node_01_name}.qcow2"
pool = "default" pool = "default"
backing_store = { backing_store = {
path = libvirt_volume.ubuntu_base.path path = libvirt_volume.ubuntu_base_homer.path
format = { format = {
type = "qcow2" type = "qcow2"
} }
@@ -22,7 +23,7 @@ resource "libvirt_volume" "node_01_disk" {
} }
locals { locals {
user_data_node = <<-EOF user_data_node_01 = <<-EOF
#cloud-config #cloud-config
hostname: ${local.vm_node_01_name} hostname: ${local.vm_node_01_name}
users: users:
@@ -44,6 +45,7 @@ locals {
- ca-certificates - ca-certificates
- curl - curl
- gnupg - gnupg
- nvme-cli
write_files: write_files:
- path: /etc/modules-load.d/k8s.conf - path: /etc/modules-load.d/k8s.conf
@@ -135,6 +137,11 @@ locals {
- systemctl enable --now qemu-guest-agent - systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh - systemctl enable --now ssh
# needed for nvme-tcp module
- apt install linux-modules-extra-$(uname -r)
- modprobe nvme-tcp
- echo "nvme-tcp" >> /etc/modules-load.d/nvme-tcp.conf
# relevant to kubernetes # relevant to kubernetes
- modprobe overlay - modprobe overlay
- modprobe br_netfilter - modprobe br_netfilter
@@ -175,8 +182,9 @@ locals {
} }
resource "libvirt_cloudinit_disk" "commoninit_node_01" { resource "libvirt_cloudinit_disk" "commoninit_node_01" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}-cloudinit.iso" name = "${local.vm_node_01_name}-cloudinit.iso"
user_data = local.user_data_node user_data = local.user_data_node_01
meta_data = yamlencode({ meta_data = yamlencode({
instance-id = local.vm_node_01_name instance-id = local.vm_node_01_name
local-hostname = local.vm_node_01_name local-hostname = local.vm_node_01_name
@@ -186,6 +194,7 @@ resource "libvirt_cloudinit_disk" "commoninit_node_01" {
# Create a volume from the cloud-init ISO # Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit_node_01" { resource "libvirt_volume" "cloudinit_node_01" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}-cloudinit.iso" name = "${local.vm_node_01_name}-cloudinit.iso"
pool = "default" pool = "default"
create = { create = {
@@ -197,10 +206,11 @@ resource "libvirt_volume" "cloudinit_node_01" {
resource "libvirt_domain" "node_01" { resource "libvirt_domain" "node_01" {
provider = libvirt.kvm-homer
name = local.vm_node_01_name name = local.vm_node_01_name
memory = "4096" memory = "8192"
memory_unit = "MiB" memory_unit = "MiB"
vcpu = 2 vcpu = 4
type = "kvm" type = "kvm"
autostart = true autostart = true
running = true running = true

View File

@@ -12,6 +12,8 @@
```bash ```bash
# recreate specific resources # recreate specific resources
tofu destroy -target=libvirt_domain.ubuntu_vm -target=libvirt_volume.cloudinit tofu destroy -target=libvirt_domain.ubuntu_vm -target=libvirt_volume.cloudinit
tofu destroy -target=null_resource.kubeadm_token
tofu destroy -target=libvirt_cloudinit_disk.commoninit_node_01 -target=libvirt_cloudinit_disk.commoninit_node_02
tofu apply tofu apply
# taint resource to have it recreated # taint resource to have it recreated

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long