Compare commits

...

4 Commits

Author SHA1 Message Date
Jan Novak
1096c7b603 gitops: plane - project management 2026-01-05 11:32:55 +01:00
Jan Novak
d3697c8132 terraform: extend kubernetes a little bit 2026-01-02 23:17:43 +01:00
Jan Novak
bdf82c7e49 gitops: cert-manager (semi manual deployment / incomplete) 2026-01-02 23:16:41 +01:00
Jan Novak
777772019c docker-30: kanidm deployment 2026-01-02 23:15:30 +01:00
22 changed files with 13047 additions and 14 deletions

View File

@@ -0,0 +1,74 @@
## add user to k8s group
based on: https://blog.kammel.dev/post/k8s_home_lab_2025_06/
```bash
export GROUP_NAME=k8s_users
kanidm group create ${GROUP_NAME}
kanidm group add-members ${GROUP_NAME} novakj
export OAUTH2_NAME=k8s
kanidm system oauth2 create-public ${OAUTH2_NAME} ${OAUTH2_NAME} http://localhost:8000
kanidm system oauth2 add-redirect-url ${OAUTH2_NAME} http://localhost:8000
kanidm system oauth2 update-scope-map ${OAUTH2_NAME} ${GROUP_NAME} email openid profile groups
kanidm system oauth2 enable-localhost-redirects ${OAUTH2_NAME}
kubectl oidc-login setup \
--oidc-issuer-url=https://idm.home.hrajfrisbee.cz/oauth2/openid/k8s \
--oidc-client-id=k8s
kubectl config set-credentials oidc \
--exec-api-version=client.authentication.k8s.io/v1 \
--exec-interactive-mode=Never \
--exec-command=kubectl \
--exec-arg=oidc-login \
--exec-arg=get-token \
--exec-arg="--oidc-issuer-url=https://idm.home.hrajfrisbee.cz/oauth2/openid/k8s" \
--exec-arg="--oidc-client-id=k8s"
kubectl create clusterrolebinding oidc-cluster-admin \
--clusterrole=cluster-admin \
--user='https://idm.home.hrajfrisbee.cz/oauth2/openid/k8s#35842461-a1c4-4ad6-8b29-697c5ddbfe84'
```
## commands
```bash
# recover admin password
# on the docker host
docker exec -i -t kanidmd kanidmd recover-account admin
docker exec -i -t kanidmd kanidmd recover-account idm_admin
# kanidm mangement commands (could be run on any logged in client)
kanidm person credential create-reset-token novakj
kanidm person get novakj | grep memberof
kanidm group get kanidm group get
kanidm group get idm_all_accounts
kanidm group get idm_all_persons
kanidm group account-policy credential-type-minimum idm_all_accounts any
kanidm person get novakj | grep memberof
kanidm group get idm_people_self_name_write
```
```bash
docker run -d --name=kanidmd --restart=always \
-p '8443:8443' \
-p '3636:3636' \
--volume /srv/docker/kanidm/data:/data \
docker.io/kanidm/server:latest
docker run --rm -i -t -v --restart=always \
-p '8443:8443' \
-p '3636:3636' \
--volume /srv/docker/kanidm/data:/data \
docker.io/kanidm/server:latest \
kanidmd cert-generate
```

View File

@@ -0,0 +1,136 @@
# The server configuration file version.
version = "2"
# The webserver bind address. Requires TLS certificates.
# If the port is set to 443 you may require the
# NET_BIND_SERVICE capability. This accepts a single address
# or an array of addresses to listen on.
# Defaults to "127.0.0.1:8443"
bindaddress = "0.0.0.0:8443"
#
# The read-only ldap server bind address. Requires
# TLS certificates. If set to 636 you may require the
# NET_BIND_SERVICE capability. This accepts a single address
# or an array of addresses to listen on.
# Defaults to "" (disabled)
# ldapbindaddress = "0.0.0.0:3636"
#
# The path to the kanidm database.
db_path = "/data/kanidm.db"
#
# If you have a known filesystem, kanidm can tune the
# database page size to match. Valid choices are:
# [zfs, other]
# If you are unsure about this leave it as the default
# (other). After changing this
# value you must run a vacuum task.
# - zfs:
# * sets database pagesize to 64k. You must set
# recordsize=64k on the zfs filesystem.
# - other:
# * sets database pagesize to 4k, matching most
# filesystems block sizes.
# db_fs_type = "zfs"
#
# The number of entries to store in the in-memory cache.
# Minimum value is 256. If unset
# an automatic heuristic is used to scale this.
# You should only adjust this value if you experience
# memory pressure on your system.
# db_arc_size = 2048
#
# TLS chain and key in pem format. Both must be present.
# If the server receives a SIGHUP, these files will be
# re-read and reloaded if their content is valid.
tls_chain = "/data/chain.pem"
tls_key = "/data/key.pem"
#
# The log level of the server. May be one of info, debug, trace
#
# NOTE: this can be overridden by the environment variable
# `KANIDM_LOG_LEVEL` at runtime
# Defaults to "info"
# log_level = "info"
#
# The DNS domain name of the server. This is used in a
# number of security-critical contexts
# such as webauthn, so it *must* match your DNS
# hostname. It is used to create
# security principal names such as `william@idm.example.com`
# so that in a (future) trust configuration it is possible
# to have unique Security Principal Names (spns) throughout
# the topology.
#
# ⚠️ WARNING ⚠️
#
# Changing this value WILL break many types of registered
# credentials for accounts including but not limited to
# webauthn, oauth tokens, and more.
# If you change this value you *must* run
# `kanidmd domain rename` immediately after.
domain = "idm.home.hrajfrisbee.cz"
#
# The origin for webauthn. This is the url to the server,
# with the port included if it is non-standard (any port
# except 443). This must match or be a descendent of the
# domain name you configure above. If these two items are
# not consistent, the server WILL refuse to start!
# origin = "https://idm.example.com"
# # OR
# origin = "https://idm.example.com:8443"
origin = "https://idm.home.hrajfrisbee.cz"
# HTTPS requests can be reverse proxied by a loadbalancer.
# To preserve the original IP of the caller, these systems
# will often add a header such as "Forwarded" or
# "X-Forwarded-For". Some other proxies can use the PROXY
# protocol v2 header. While we support the PROXY protocol
# v1 header, we STRONGLY discourage it's use as it has
# significantly greater overheads compared to v2 during
# processing.
# This setting allows configuration of the list of trusted
# IPs or IP ranges which can supply this header information,
# and which format the information is provided in.
# Defaults to "none" (no trusted sources)
# Only one option can be used at a time.
# [http_client_address_info]
# proxy-v2 = ["127.0.0.1", "127.0.0.0/8"]
# # OR
# [http_client_address_info]
# x-forward-for = ["127.0.0.1", "127.0.0.0/8"]
# # OR
# [http_client_address_info]
# # AVOID IF POSSIBLE!!!
# proxy-v1 = ["127.0.0.1", "127.0.0.0/8"]
# LDAPS requests can be reverse proxied by a loadbalancer.
# To preserve the original IP of the caller, these systems
# can add a header such as the PROXY protocol v2 header.
# While we support the PROXY protocol v1 header, we STRONGLY
# discourage it's use as it has significantly greater
# overheads compared to v2 during processing.
# This setting allows configuration of the list of trusted
# IPs or IP ranges which can supply this header information,
# and which format the information is provided in.
# Defaults to "none" (no trusted sources)
# [ldap_client_address_info]
# proxy-v2 = ["127.0.0.1", "127.0.0.0/8"]
# # OR
# [ldap_client_address_info]
# # AVOID IF POSSIBLE!!!
# proxy-v1 = ["127.0.0.1", "127.0.0.0/8"]
[online_backup]
# The path to the output folder for online backups
path = "/data/kanidm/backups/"
# The schedule to run online backups (see https://crontab.guru/)
# every day at 22:00 UTC (default)
schedule = "00 22 * * *"
# four times a day at 3 minutes past the hour, every 6th hours
# schedule = "03 */6 * * *"
# We also support non standard cron syntax, with the following format:
# sec min hour day of month month day of week year
# (it's very similar to the standard cron syntax, it just allows to specify the seconds
# at the beginning and the year at the end)
# Number of backups to keep (default 7)
# versions = 7

View File

@@ -0,0 +1,17 @@
# docker-30
## taiscale
```bash
# Add signing key
curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/$(lsb_release -cs).noarmor.gpg | sudo tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
# Add repo
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/ubuntu $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/tailscale.list
# Install
sudo apt update && sudo apt install tailscale
# Start
sudo tailscale up
```

File diff suppressed because it is too large Load Diff

View File

@@ -20,11 +20,12 @@ spec:
crds: CreateReplace
values:
crds:
enabled: true
prometheus:
enabled: false
prometheus:
enabled: true
extraObjects:
- apiVersion: cert-manager.io/v1
- |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
@@ -38,7 +39,8 @@ spec:
- http01:
ingress:
ingressClassName: nginx
- apiVersion: cert-manager.io/v1
- |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod

View File

@@ -0,0 +1,135 @@
# helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: plane
namespace: plane
spec:
interval: 30m
chart:
spec:
chart: plane-ce
version: "1.16.0" # pin version, avoid 'stable'
sourceRef:
kind: HelmRepository
name: plane
namespace: flux-system
interval: 12h
timeout: 10m
install:
createNamespace: true
remediation:
retries: 3
upgrade:
remediation:
retries: 3
values:
planeVersion: "v1.16.0"
ingress:
enabled: true
appHost: "plane.lab.home.hrajfrisbee.cz"
minioHost: "plane-minio.lab.home.hrajfrisbee.cz"
rabbitmqHost: "plane-mq.lab.home.hrajfrisbee.cz" # optional
ingressClass: nginx
ingress_annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
# nginx.ingress.kubernetes.io/proxy-body-size: "10m"
# PostgreSQL - local stateful or external
postgres:
local_setup: true
storageClass: freenas-iscsi
volumeSize: 10Gi
# assign_cluster_ip: false
# nodeSelector: {}
# tolerations: []
# affinity: {}
# Redis/Valkey
redis:
local_setup: true
storageClass: freenas-iscsi
volumeSize: 2Gi
# RabbitMQ
rabbitmq:
local_setup: true
storageClass: freenas-iscsi
volumeSize: 1Gi
# MinIO (S3-compatible storage)
minio:
local_setup: true
storageClass: freenas-iscsi
volumeSize: 10Gi
env:
# Database credentials (change these!)
pgdb_username: plane
pgdb_password: plane-not-so-secret # TODO: do this properly
pgdb_name: plane
# Application secret (MUST change - used for encryption)
secret_key: 6u8w9T8P9zolcTMTC1DnErasyHnE6QGyB77tCPPFC/mnbPykb6DfiMW6id3Qy+Ly
# Storage
docstore_bucket: uploads
doc_upload_size_limit: 5242880
# Optional: External services (when local_setup: false)
# pgdb_remote_url: "postgresql://user:pass@host:5432/plane"
# remote_redis_url: "redis://host:6379/"
# aws_access_key: ""
# aws_secret_access_key: ""
# aws_region: ""
# aws_s3_endpoint_url: ""
# Workload resources (adjust based on cluster capacity)
web:
replicas: 2
memoryLimit: 1000Mi
cpuLimit: 500m
memoryRequest: 128Mi
cpuRequest: 100m
api:
replicas: 2
memoryLimit: 1000Mi
cpuLimit: 500m
memoryRequest: 128Mi
cpuRequest: 100m
worker:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
beatworker:
replicas: 1
memoryLimit: 500Mi
cpuLimit: 250m
space:
replicas: 1
memoryLimit: 500Mi
cpuLimit: 250m
admin:
replicas: 1
memoryLimit: 500Mi
cpuLimit: 250m
live:
replicas: 1
memoryLimit: 500Mi
cpuLimit: 250m
# TLS (requires cert-manager)
ssl:
createIssuer: false
generateCerts: true
issuer: letsencrypt-prod
# email: admin@example.com
# server: https://acme-v02.api.letsencrypt.org/directory
# tls_secret_name: plane-tls # if using existing cert

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: plane
namespace: flux-system
spec:
interval: 1h
url: https://helm.plane.so/

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: plane

View File

@@ -12,8 +12,35 @@ provider "libvirt" {
uri = "qemu+ssh://novakj@192.168.0.7/system"
}
provider "libvirt" {
alias = "kvm-homer"
uri = "qemu+ssh://novakj@192.168.0.7/system"
}
resource "libvirt_volume" "ubuntu_base" {
provider "libvirt" {
alias = "kvm-beelink"
uri = "qemu+ssh://novakj@192.168.0.6/system"
}
resource "libvirt_volume" "ubuntu_base_homer" {
provider = libvirt.kvm-homer
name = "ubuntu-24.04-base.qcow2"
pool = "default"
create = {
content = {
url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
}
}
target = {
format = {
type = "qcow2"
}
}
}
resource "libvirt_volume" "ubuntu_base_beelink" {
provider = libvirt.kvm-beelink
name = "ubuntu-24.04-base.qcow2"
pool = "default"
create = {

View File

@@ -0,0 +1,45 @@
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager

View File

@@ -0,0 +1,79 @@
```bash
# 1. Add repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# 2. Install CRDs separately (production best practice - avoids Helm CRD lifecycle issues)
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
# 3. Create values file (production baseline)
cat <<EOF > kube-prometheus-values.yaml
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager
EOF
# 4. Install
helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
-n monitoring --create-namespace \
--set prometheusOperator.createCustomResource=false \
-f kube-prometheus-values.yaml
# 5. Verify
kubectl -n monitoring get pods
kubectl -n monitoring get prometheuses
kubectl -n monitoring get servicemonitors --all-namespaces
```

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,64 @@
# democratic-csi
```bash
helm repo add democratic-csi https://democratic-csi.github.io/charts/
helm install zfs-nvmeof democratic-csi/democratic-csi -f values.yaml
cat <<'EOF' > values.yaml
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4
EOF
cat <<'EOF' > pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi
EOF
```

View File

@@ -0,0 +1,40 @@
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4

View File

@@ -10,10 +10,11 @@ locals {
resource "libvirt_volume" "ubuntu_disk" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base.path
path = libvirt_volume.ubuntu_base_homer.path
format = {
type = "qcow2"
}
@@ -123,6 +124,10 @@ locals {
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
extraArgs:
oidc-issuer-url: "https://idm.home.hrajfrisbee.cz/oauth2/openid/k8s"
oidc-client-id: "k8s"
oidc-signing-algs: "ES256"
networking:
podSubnet: "10.244.0.0/16"
---
@@ -198,6 +203,7 @@ locals {
}
resource "libvirt_cloudinit_disk" "commoninit" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}-cloudinit.iso"
user_data = local.user_data
meta_data = yamlencode({
@@ -209,6 +215,7 @@ resource "libvirt_cloudinit_disk" "commoninit" {
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit" {
provider = libvirt.kvm-homer
name = "${local.master_vm_name}-cloudinit.iso"
pool = "default"
create = {
@@ -220,6 +227,7 @@ resource "libvirt_volume" "cloudinit" {
resource "libvirt_domain" "master" {
provider = libvirt.kvm-homer
name = local.master_vm_name
memory = "2048"
memory_unit = "MiB"

View File

@@ -0,0 +1,314 @@
locals {
vm_node_02_name = "kube-node-33"
vm_node_02_ip_address = "192.168.0.33"
}
resource "libvirt_volume" "node_02_disk" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base_beelink.path
format = {
type = "qcow2"
}
}
target = {
format = {
type = "qcow2"
}
}
capacity = 21474836480
}
locals {
user_data_node_02 = <<-EOF
#cloud-config
hostname: ${local.vm_node_02_name}
users:
- name: ubuntu
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- ${file("~/.ssh/id_rsa.pub")}
chpasswd:
list: |
ubuntu:yourpassword
expire: false
ssh_pwauth: true
package_update: true
packages:
- qemu-guest-agent
- openssh-server
- apt-transport-https
- ca-certificates
- curl
- gnupg
- nvme-cli
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# Update existing containerd config to enable registry config_path
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
# Mirror configs for each upstream registry
- path: /etc/containerd/certs.d/docker.io/hosts.toml
content: |
server = "https://registry-1.docker.io"
[host."http://${local.zot_registry_ip}:5000/v2/docker.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml
content: |
server = "https://registry.k8s.io"
[host."http://${local.zot_registry_ip}:5000/v2/registry.k8s.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/ghcr.io/hosts.toml
content: |
server = "https://ghcr.io"
[host."http://${local.zot_registry_ip}:5000/v2/ghcr.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/quay.io/hosts.toml
content: |
server = "https://quay.io"
[host."http://${local.zot_registry_ip}:5000/v2/quay.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /root/kubeadm-config.yaml
content: |
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
podSubnet: "10.244.0.0/16"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
- path: /etc/profile.d/kubectl.sh
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
runcmd:
- systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh
# needed for nvme-tcp module
- apt install linux-modules-extra-$(uname -r)
- modprobe nvme-tcp
- echo "nvme-tcp" >> /etc/modules-load.d/nvme-tcp.conf
# relevant to kubernetes
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
# containerd
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update && apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
# join cluster
- ${local.join_command}
EOF
network_config_node_02 = <<-EOF
version: 2
ethernets:
eth0:
match:
driver: virtio_net
addresses:
- ${local.vm_node_02_ip_address}/${local.cidr}
routes:
- to: default
via: ${local.gateway}
nameservers:
addresses:
- ${local.dns}
EOF
}
resource "libvirt_cloudinit_disk" "commoninit_node_02" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}-cloudinit.iso"
user_data = local.user_data_node_02
meta_data = yamlencode({
instance-id = local.vm_node_02_name
local-hostname = local.vm_node_02_name
})
network_config = local.network_config_node_02
}
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit_node_02" {
provider = libvirt.kvm-beelink
name = "${local.vm_node_02_name}-cloudinit.iso"
pool = "default"
create = {
content = {
url = libvirt_cloudinit_disk.commoninit_node_02.path
}
}
}
resource "libvirt_domain" "node_02" {
provider = libvirt.kvm-beelink
name = local.vm_node_02_name
memory = "8192"
memory_unit = "MiB"
vcpu = 4
type = "kvm"
autostart = true
running = true
os = {
type = "hvm"
type_arch = "x86_64"
type_machine = "q35"
}
devices = {
disks = [
{
driver = {
name = "qemu"
type = "qcow2"
}
source = {
file = {
file = libvirt_volume.node_02_disk.path
}
}
target = {
dev = "vda"
bus = "virtio"
}
},
{
device = "cdrom"
driver = {
name = "qemu"
type = "raw"
}
source = {
file = {
file = libvirt_volume.cloudinit_node_02.path
}
}
target = {
dev = "sda"
bus = "sata"
}
}
]
interfaces = [
{
type = "bridge"
model = {
type = "virtio"
}
source = {
bridge = {
bridge = local.bridge
}
}
}
]
serials = [
{
type = "pty"
}
]
xml = {
xslt = <<-XSLT
<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="devices">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<serial type="pty">
<target port="0"/>
</serial>
<console type="pty">
<target type="serial" port="0"/>
</console>
<graphics type="vnc" port="-1" autoport="yes" listen="0.0.0.0">
<listen type="address" address="0.0.0.0"/>
</graphics>
<channel type="unix">
<target type="virtio" name="org.qemu.guest_agent.0"/>
</channel>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
XSLT
}
}
}
output "node_02_ip" {
value = local.vm_node_02_ip_address
}

View File

@@ -5,10 +5,11 @@ locals {
resource "libvirt_volume" "node_01_disk" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base.path
path = libvirt_volume.ubuntu_base_homer.path
format = {
type = "qcow2"
}
@@ -22,7 +23,7 @@ resource "libvirt_volume" "node_01_disk" {
}
locals {
user_data_node = <<-EOF
user_data_node_01 = <<-EOF
#cloud-config
hostname: ${local.vm_node_01_name}
users:
@@ -43,7 +44,8 @@ locals {
- apt-transport-https
- ca-certificates
- curl
- gnupg
- gnupg
- nvme-cli
write_files:
- path: /etc/modules-load.d/k8s.conf
@@ -135,6 +137,11 @@ locals {
- systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh
# needed for nvme-tcp module
- apt install linux-modules-extra-$(uname -r)
- modprobe nvme-tcp
- echo "nvme-tcp" >> /etc/modules-load.d/nvme-tcp.conf
# relevant to kubernetes
- modprobe overlay
- modprobe br_netfilter
@@ -175,8 +182,9 @@ locals {
}
resource "libvirt_cloudinit_disk" "commoninit_node_01" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}-cloudinit.iso"
user_data = local.user_data_node
user_data = local.user_data_node_01
meta_data = yamlencode({
instance-id = local.vm_node_01_name
local-hostname = local.vm_node_01_name
@@ -186,6 +194,7 @@ resource "libvirt_cloudinit_disk" "commoninit_node_01" {
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit_node_01" {
provider = libvirt.kvm-homer
name = "${local.vm_node_01_name}-cloudinit.iso"
pool = "default"
create = {
@@ -197,10 +206,11 @@ resource "libvirt_volume" "cloudinit_node_01" {
resource "libvirt_domain" "node_01" {
provider = libvirt.kvm-homer
name = local.vm_node_01_name
memory = "4096"
memory = "8192"
memory_unit = "MiB"
vcpu = 2
vcpu = 4
type = "kvm"
autostart = true
running = true

View File

@@ -12,6 +12,8 @@
```bash
# recreate specific resources
tofu destroy -target=libvirt_domain.ubuntu_vm -target=libvirt_volume.cloudinit
tofu destroy -target=null_resource.kubeadm_token
tofu destroy -target=libvirt_cloudinit_disk.commoninit_node_01 -target=libvirt_cloudinit_disk.commoninit_node_02
tofu apply
# taint resource to have it recreated

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long