Compare commits

..

2 Commits

Author SHA1 Message Date
Jan Novak
0fee1b879a terraform: create ubuntu vms and install kubernetes with kubeadm 2025-12-29 14:32:30 +01:00
Jan Novak
acfe11bf74 vagrant: lower memory setup, remove extra disk on node 2025-12-28 17:53:18 +01:00
12 changed files with 822 additions and 5 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.terraform/

View File

@@ -0,0 +1,57 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/dmacvicar/libvirt" {
version = "0.9.1"
constraints = "~> 0.8"
hashes = [
"h1:94QTfgc4WIynVQPmag+oVlrEP0po7YttSzND7u56Q0c=",
"zh:0480649879c36dff007d4e510ff68b473c24703c66649622739f75fa6303c9df",
"zh:320ebb4758739d53054e81cd3eec1bb1dfebc9cda32c6b40b9820cd0a12b21ea",
"zh:339365c8bd5e02129daa210138af6c9c9e8e15622798d248db46963d4c39c0dd",
"zh:4f280769ba65eaee764a64c8d119d96b8ac53425a5298e94e1976c7bd00569ae",
"zh:7381c242accfc030bab88cdf8c06afa950ed5294c28278d101c7bbc93b57fb31",
"zh:83ea6c39bc62024958a3830d2d940210969eb80541a0288cd53d5dcb3a0d664a",
"zh:8c000e8b5d0526cabbf30fab6aad1fba3f828cb59b18244379999069ed51d605",
"zh:8e0d66ed50aa1737e2fb03b29694ddb5dcb93bbafbb521d1b571e78709f49c7d",
"zh:9161fddae518f7d6e0dbf8fcf7d6686d660ec47f8fa90dbcab22e293fb3ad785",
"zh:b2e80faecf4ba0b361a98036856027b0c08480a59581e8f461b0637b251bab2c",
"zh:b2fb50332d71dce8158be69ef53d3a87436bb5103ea3939c1ce81665298f9e48",
"zh:cf22d7c5b42cfcfc101392735a4c7f2ee8518942a257e524acd48a0a5e02ae08",
"zh:f5156222e08403b1df5a9b707bacf8f1caabaf9580b69794121b252e3bd93cd5",
"zh:f7eac58fdccc1ae378ee1c6c0bb9185455b18dcd9b367aeed7067f681ecc372f",
]
}
provider "registry.opentofu.org/hashicorp/local" {
version = "2.6.1"
hashes = [
"h1:+XfQ7VmNtYMp0eOnoQH6cZpSMk12IP1X6tEkMoMGQ/A=",
"zh:0416d7bf0b459a995cf48f202af7b7ffa252def7d23386fc05b34f67347a22ba",
"zh:24743d559026b59610eb3d9fa9ec7fbeb06399c0ef01272e46fe5c313eb5c6ff",
"zh:2561cdfbc90090fee7f844a5cb5cbed8472ce264f5d505acb18326650a5b563f",
"zh:3ebc3f2dc7a099bd83e5c4c2b6918e5b56ec746766c58a31a3f5d189cb837db5",
"zh:490e0ce925fc3848027e10017f960e9e19e7f9c3b620524f67ce54217d1c6390",
"zh:bf08934295877f831f2e5f17a0b3ebb51dd608b2509077f7b22afa7722e28950",
"zh:c298c0f72e1485588a73768cb90163863b6c3d4c71982908c219e9b87904f376",
"zh:cedbaed4967818903ef378675211ed541c8243c4597304161363e828c7dc3d36",
"zh:edda76726d7874128cf1e182640c332c5a5e6a66a053c0aa97e2a0e4267b3b92",
]
}
provider "registry.opentofu.org/hashicorp/null" {
version = "3.2.4"
hashes = [
"h1:i+WKhUHL2REY5EGmiHjfUljJB8UKZ9QdhdM5uTeUhC4=",
"zh:1769783386610bed8bb1e861a119fe25058be41895e3996d9216dd6bb8a7aee3",
"zh:32c62a9387ad0b861b5262b41c5e9ed6e940eda729c2a0e58100e6629af27ddb",
"zh:339bf8c2f9733fce068eb6d5612701144c752425cebeafab36563a16be460fb2",
"zh:36731f23343aee12a7e078067a98644c0126714c4fe9ac930eecb0f2361788c4",
"zh:3d106c7e32a929e2843f732625a582e562ff09120021e510a51a6f5d01175b8d",
"zh:74bcb3567708171ad83b234b92c9d63ab441ef882b770b0210c2b14fdbe3b1b6",
"zh:90b55bdbffa35df9204282251059e62c178b0ac7035958b93a647839643c0072",
"zh:ae24c0e5adc692b8f94cb23a000f91a316070fdc19418578dcf2134ff57cf447",
"zh:b5c10d4ad860c4c21273203d1de6d2f0286845edf1c64319fa2362df526b5f58",
"zh:e05bbd88e82e1d6234988c85db62fd66f11502645838fff594a2ec25352ecd80",
]
}

View File

@@ -0,0 +1,29 @@
terraform {
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "~> 0.8"
}
}
}
provider "libvirt" {
uri = "qemu+ssh://novakj@192.168.0.7/system"
}
resource "libvirt_volume" "ubuntu_base" {
name = "ubuntu-24.04-base.qcow2"
pool = "default"
create = {
content = {
url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
}
}
target = {
format = {
type = "qcow2"
}
}
}

View File

@@ -0,0 +1 @@
kubeadm join 192.168.0.31:6443 --token ps457c.7g4koxq4awqigdwy --discovery-token-ca-cert-hash sha256:bd969ebcf4c68c424a76c8fc483f194ab8b32a2fd52f1cf992e164f496907447

View File

@@ -0,0 +1,58 @@
resource "null_resource" "kubeadm_token" {
depends_on = [libvirt_domain.master] # or whatever your master resource is
provisioner "remote-exec" {
inline = [
"until sudo kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes; do sleep 5; done",
"sudo kubeadm token create --print-join-command > /tmp/join-command.txt"
]
connection {
type = "ssh"
host = local.master_ip
user = "ubuntu"
private_key = file("~/.ssh/id_rsa")
timeout = "10m" # connection timeout
}
}
provisioner "local-exec" {
command = "scp -o StrictHostKeyChecking=no ubuntu@${local.master_ip}:/tmp/join-command.txt ./join-command.txt"
}
}
data "local_file" "join_command" {
depends_on = [null_resource.kubeadm_token]
filename = "./join-command.txt"
}
# get kubeconfig and store it locally
resource "null_resource" "kubeconfig" {
provisioner "remote-exec" {
inline = [
"until sudo ls -la /etc/kubernetes/admin.conf; do sleep 5; done",
"sudo cp /etc/kubernetes/admin.conf /tmp/admin.conf",
"sudo chown $(id -u):$(id -g) /tmp/admin.conf",
# Rewrite server address from localhost/internal to accessible IP
"sudo sed -i 's|server: https://.*:6443|server: https://${local.master_ip}:6443|' /tmp/admin.conf"
]
connection {
type = "ssh"
host = local.master_ip
user = "ubuntu"
private_key = file("~/.ssh/id_rsa")
}
}
provisioner "local-exec" {
command = "scp -o StrictHostKeyChecking=no ubuntu@${local.master_ip}:/tmp/admin.conf ./kubeconfig"
}
}
locals {
# parse: kubeadm join 192.168.1.10:6443 --token xxx --discovery-token-ca-cert-hash sha256:yyy
join_command = trimspace(data.local_file.join_command.content)
}

View File

@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJSWh2TjVRZFNKUnd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeU1qa3hNekUyTXpGYUZ3MHpOVEV5TWpjeE16SXhNekZhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUR6c2NVNnpMUWd0RFVkeHV3TUdPeFZqdzVhNVNCRzJrWEtWcHJnWEsyT1Yrd0IrRFFUK3JsUjVCM1QKSTFpd3BUMG5EcG5FcCtOM0VNYkg0T0RhWERhZkNrMUIwcXFDbW9LSXViT1V5TEZ1bmNNd3BJdW9ZOXJDN3puOQpmYVFabFNYeks5b1gzcUc4WjZCWTE4L3c3WHMybVJMYWgxVDQ5cGVwOEp0dGYyKy90dkFqcVBUU3NtVDVHTHErCmNvOXlVNUNDQUMwM0ZPakpIZU9PMEsyTVYycEZPc2ZoUGMxamJsR1VjNHdiVDU0WTR1em1LNjJTM3EyYm9PenQKcHI2RHZkbmJwZHcrR1lJQXZjVWtjMnZtODl1MXVtd0RNWUhYd3A3UWM4N2x1MGZSeHByUjdqLyt0MEVrSlVTWApuV25YVzhnNnhtYm5rakVISDUwaTdXakRMMUw5QWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJUTnBEYVptMmpSUmpjRVNhbDltMC92SkpZalhUQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ1ZvT25DUnlBLwowUXBMYkt5Y2VPallFeWFBYXRwbU5lTTJRNmJOVklzNnhlaHBPSWQ1NWQzZGdTR05TSXZtcllwVHYwNHNhZVo3CjF3VTBndHVtSVN6U2V5OGNiZXp1bGNIamlmT0dhYVpESS9OSXdoVStPSW5qcG9jTFNONTliZDJrNXV3VVV6dHMKU1UrWXNHT1NnNE84YlRyWUxwSUc3SEMyWGFiQmJCeXpLV3ZNc0t3bGtORFZPSnNLMENZRklBTnEvNWZmNC96NwpPd1c2bXJTTkwxM25wV2JLNVd0REtlUVlzcXdZVGhnM2E3MjJwVHc3U2l5bjFtSkFLUzVrMSttMG43WlNUc2dHCjVwLzg3Q2NrZ01xNHJaRml3K0VJL09mbW0vRk9scXhmYjVjR1RCdy9QZTRwQ1NNUUVYV2RjUTNHbVE1bnUvMHEKLy84L2RlL244VlFBCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://192.168.0.31:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJR0lTZmJsakh4akV3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeU1qa3hNekUyTXpGYUZ3MHlOakV5TWpreE16SXhNekZhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDeGV1eHEKQkpxbmozR2R3ZWdsYVd1R0pKL0FFVTFNYTQ2V2Z3Si9FWTB1Z216VDdmREZvaS9vbUtWblUvZmF5ZHVjVmVQMApEYTd6d2lKODFZWTdMZTdwUis4MHlUNVJ6WWVsMmtIeEpQWS9OTXVEeXk2SzdZT2E4NVRSZVg2dmNjTlZncGt1CnkzSE5vbmNLcGNWay8vZlRKUXFaVmtpdmFNcS90bUszQjFvYlRLR2pzY1M4TmNpRnNFS09iU3ZWdUpXOTcySFcKUTBMNUFCNWU4SGd5TlFHeEE2VGE2by9EZFQyZmJGby91ZkRKUU4relhXQzRtTTRMajdZNDhxMnMyLzUrSUg3OQpHY2ZLcXp6bE5aZjFHSHFwczdOc1pYVUhqaFZIdHU5VnNteWpjY2JENWREYVNyRWowS0tMNWhyM3BtWDZoYlVQCnJWQVkyRS9PbU1xTUxjT1BBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRk0ya05wbWJhTkZHTndSSgpxWDJiVCs4a2xpTmRNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNGMGdzUU9YM1ZYZTEwY1ZpVVplblpaQ1ZyCjkrdFhvT2pJcDlxRDU4eHhRSzZSMmhBTmhRa0s4Z2ZrbUpkaDJsNDcvaVgvQVF6MnhTdVhNVUhoRlpVYlEzb08KaXkvamZNZGJNZ24wRzdmYVQ3Vm0xYnIzTkpyMFJQbjVPRkNpZTRtclN4c0hXQVJnL3o4Q3hLalIxb1FwamU1dwpsNFdlcXhFckx2MHRDYnNneTZndldBL3I5eGVSeTBIbHE4bG9yOWdZejJlaUxJSFU3UStRRXdyU3B5akdDQjRNCnlhZGdqT0x5bzNidHhYWjRXQ2JMV2kwOFZCSWJIWThVWC83QmN6YU9wdm9oOTVDTE9VVWY0RlV3MitoUlVhWDIKbjRTbTJwdE1TSmNKdFBabDgzN3A2aGNtYkQ0Y2VnMHlMSGhZVGJSYnhkVFZTM0hxSHhFS0JXdTRwaTFwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBc1hyc2FnU2FwNDl4bmNIb0pXbHJoaVNmd0JGTlRHdU9sbjhDZnhHTkxvSnMwKzN3CnhhSXY2SmlsWjFQMzJzbmJuRlhqOUEydTg4SWlmTldHT3kzdTZVZnZOTWsrVWMySHBkcEI4U1QyUHpUTGc4c3UKaXUyRG12T1UwWGwrcjNIRFZZS1pMc3R4emFKM0NxWEZaUC8zMHlVS21WWklyMmpLdjdaaXR3ZGFHMHlobzdIRQp2RFhJaGJCQ2ptMHIxYmlWdmU5aDFrTkMrUUFlWHZCNE1qVUJzUU9rMnVxUHczVTluMnhhUDdud3lVRGZzMTFnCnVKak9DNCsyT1BLdHJOditmaUIrL1JuSHlxczg1VFdYOVJoNnFiT3piR1YxQjQ0VlI3YnZWYkpzbzNIR3crWFEKMmtxeEk5Q2lpK1lhOTZabCtvVzFENjFRR05oUHpwaktqQzNEandJREFRQUJBb0lCQUI3SnpVTmpBeFU0VENScQo1QVBCY3ZhMVdjVXUrekxib2NPM3lzNFNWR1NTR2FEMk1RT0g3RG0wYVZVd2V0bklsTUZkSVF0UWFGNCsrazZmCm1BYWlKRVR4eHF4b2tOZ09DSThiK3I2RGcvaFpCMGtZc1QxR2tNTUxxSEJpWERqQlNucUdDbDdVeWl3N0Y5MlAKL2x0VUVFRGZ0eVA4bE9WaEQzTU1oN2xxbkJZSi9qTzlDbitsWWJiZnJhcitEZUpTQzhiUDZWb1hjVFlacHpoSworeUZmNW1vdjZDMTdHeTMrb1E1ZDJMWG5aL3Exb2k5YjJ6VFBhUUREaTJndStnT0I4U0hWam5FMGFpeHRwc2NpCklGSW1sZlh0WXltSy9KQ3kvMCtkdDF1cEhIUnc1ZXRtMmRmdXNKUS9JdkhxU2JyVVRoVllVRjZ3RXZrWk9BaGYKQVNvVldxRUNnWUVBMExHaTA5ejA3dS91aFlPenNNQVV5eVFTakN3U3hBbTFLcmhqVXNsSis0TVJRci9QdlVRRwpmSGZTWm1UeTdLbjZIOS9PL2dmNTFQb25KOHpOWHVqUFcvdGRCWTlHT2Z2aE9LS1lieWo5QWpPUXdFREhTblFpClhuejBwV3BvYUIvcWU5cGdYSk44U000b2lEbW1XdmJKTk52TXpOTHlpQzl2Rkpwc1VtTmJXVEVDZ1lFQTJiWDUKL3BIWFh0WklZVkJEc2ZrenNwK0xSZVF0SWtrNjcwaVBMNGplbVdzcUQzc2N5ZllnUWYvSytKeURwWkd5SXhWSQprYVZxZlRmeUlqWEh1SllvOG45ZTFvVnRoT3E4dEVpSnd2MXg2WWc3anpIZjB4U1hHdEtwYytJTDQ3MFRMMWdiCitqRTQva2Q2SS9ybUhja2NPRStaTzAzRDZIZk9mU3FhWFZiYnVMOENnWUVBbHBLa0RjS05nbHZndDdwWGlyNTgKQWptTG5GRXNWUDlBRkl1bk5oTC9heFdjL1pFUlhOaFk2Yk0yUGlTMTFTV3F6eGFXN1ZocDNFMkV0TVdpbzhqTAoyK3pYcCt4QTJoTXU0OE1Fd3FkTExTeHhVeFVwOVVFbmp3OFBJTTVPZUZvV3Y3Zkc4T1NFdHE3d05hR0tzblQrCnBVRzFXVThXS0VZNUdoOTA2bGkvanZFQ2dZQWlEb0dJL1M5L3VtMmdLM29yZ1J4Qk9Rd0VadWxZajdRVWNubFQKcWttSUhtZDUvSHhJYTZRb0x1ZUZkSlp6ZEJNMXdjS1VaaGYrZHAzK0ZBT21NSGMvV0FWeDhYVnNjWHdMaDcxbApMMWgrZTcwMWJVdmlMVHBtQzhaT1JuSXRzZ29xUWRJK2xTeWhPblUwNDY3VHdmNFVJUCtMYzJMT0hzL0NiTU94CkRrRlk2UUtCZ0RYLzUwV1MzSVZuejM4TFlkcnpCSC9OUXAyMzJpcmxaekwzZ0E2VVJ6SVpUU1ZKRWUrd2RmY3QKQXZPQlpEa2hYZ00rcm1XU2FjUmhCY3ZHVysrUDVTVy9jbkNFTUd4cC95L3VaeVpmS01Kd3VHZStzZWZhbVJkWgowVVpYZzRtdFhLdndCSCtCMks0dWJTSXZQYzI2TTJyVFNYVUtqTnAyUkwycjFCYmVoK1Q3Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==

View File

@@ -0,0 +1,327 @@
locals {
master_vm_name = "kube-master-31"
master_ip = "192.168.0.31"
gateway = "192.168.0.4"
dns = "8.8.8.8"
cidr = 24
bridge = "br0"
zot_registry_ip = "192.168.0.30" # wherever zot is running
}
resource "libvirt_volume" "ubuntu_disk" {
name = "${local.master_vm_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base.path
format = {
type = "qcow2"
}
}
target = {
format = {
type = "qcow2"
}
}
capacity = 21474836480
}
locals {
user_data = <<-EOF
#cloud-config
hostname: ${local.master_vm_name}
users:
- name: ubuntu
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- ${file("~/.ssh/id_rsa.pub")}
chpasswd:
list: |
ubuntu:yourpassword
expire: false
ssh_pwauth: true
package_update: true
packages:
- qemu-guest-agent
- openssh-server
- apt-transport-https
- ca-certificates
- curl
- gnupg
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# Update existing containerd config to enable registry config_path
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
# Mirror configs for each upstream registry
- path: /etc/containerd/certs.d/docker.io/hosts.toml
content: |
server = "https://registry-1.docker.io"
[host."http://${local.zot_registry_ip}:5000/v2/docker.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml
content: |
server = "https://registry.k8s.io"
[host."http://${local.zot_registry_ip}:5000/v2/registry.k8s.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/ghcr.io/hosts.toml
content: |
server = "https://ghcr.io"
[host."http://${local.zot_registry_ip}:5000/v2/ghcr.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/quay.io/hosts.toml
content: |
server = "https://quay.io"
[host."http://${local.zot_registry_ip}:5000/v2/quay.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /root/kubeadm-config.yaml
content: |
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
podSubnet: "10.244.0.0/16"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
- path: /etc/profile.d/kubectl.sh
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
runcmd:
- systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh
# relevant to kubernetes
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
# containerd
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update && apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
# init cluster
- kubeadm init --config=/root/kubeadm-config.yaml --skip-phases=addon/kube-proxy
# kubeconfig for root
- mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config
# wait for API server
- |
echo "Waiting for API server..."
until kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes ; do
echo "Waiting for API server..."
sleep 5
done
# CNI (cilium example, swap for flannel/calico as needed)
- |
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/download/$${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz
tar xzvf cilium-linux-amd64.tar.gz -C /usr/local/bin
cilium install --kubeconfig=/etc/kubernetes/admin.conf --set kubeProxyReplacement=true --wait
EOF
network_config = <<-EOF
version: 2
ethernets:
eth0:
match:
driver: virtio_net
addresses:
- ${local.master_ip}/${local.cidr}
routes:
- to: default
via: ${local.gateway}
nameservers:
addresses:
- ${local.dns}
EOF
}
resource "libvirt_cloudinit_disk" "commoninit" {
name = "${local.master_vm_name}-cloudinit.iso"
user_data = local.user_data
meta_data = yamlencode({
instance-id = local.master_vm_name
local-hostname = local.master_vm_name
})
network_config = local.network_config
}
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit" {
name = "${local.master_vm_name}-cloudinit.iso"
pool = "default"
create = {
content = {
url = libvirt_cloudinit_disk.commoninit.path
}
}
}
resource "libvirt_domain" "master" {
name = local.master_vm_name
memory = "2048"
memory_unit = "MiB"
vcpu = 2
type = "kvm"
autostart = true
running = true
os = {
type = "hvm"
type_arch = "x86_64"
type_machine = "q35"
}
devices = {
disks = [
{
driver = {
name = "qemu"
type = "qcow2"
}
source = {
file = {
file = libvirt_volume.ubuntu_disk.path
}
}
target = {
dev = "vda"
bus = "virtio"
}
},
{
device = "cdrom"
driver = {
name = "qemu"
type = "raw"
}
source = {
file = {
file = libvirt_volume.cloudinit.path
}
}
target = {
dev = "sda"
bus = "sata"
}
}
]
interfaces = [
{
type = "bridge"
model = {
type = "virtio"
}
source = {
bridge = {
bridge = local.bridge
}
}
}
]
serials = [
{
type = "pty"
}
]
xml = {
xslt = <<-XSLT
<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="devices">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<serial type="pty">
<target port="0"/>
</serial>
<console type="pty">
<target type="serial" port="0"/>
</console>
<graphics type="vnc" port="-1" autoport="yes" listen="0.0.0.0">
<listen type="address" address="0.0.0.0"/>
</graphics>
<channel type="unix">
<target type="virtio" name="org.qemu.guest_agent.0"/>
</channel>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
XSLT
}
}
}
output "master_ip" {
value = local.master_ip
}

View File

@@ -0,0 +1,304 @@
locals {
vm_node_01_name = "kube-node-32"
vm_node_01_ip_address = "192.168.0.32"
}
resource "libvirt_volume" "node_01_disk" {
name = "${local.vm_node_01_name}.qcow2"
pool = "default"
backing_store = {
path = libvirt_volume.ubuntu_base.path
format = {
type = "qcow2"
}
}
target = {
format = {
type = "qcow2"
}
}
capacity = 21474836480
}
locals {
user_data_node = <<-EOF
#cloud-config
hostname: ${local.vm_node_01_name}
users:
- name: ubuntu
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- ${file("~/.ssh/id_rsa.pub")}
chpasswd:
list: |
ubuntu:yourpassword
expire: false
ssh_pwauth: true
package_update: true
packages:
- qemu-guest-agent
- openssh-server
- apt-transport-https
- ca-certificates
- curl
- gnupg
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# Update existing containerd config to enable registry config_path
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
# Mirror configs for each upstream registry
- path: /etc/containerd/certs.d/docker.io/hosts.toml
content: |
server = "https://registry-1.docker.io"
[host."http://${local.zot_registry_ip}:5000/v2/docker.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml
content: |
server = "https://registry.k8s.io"
[host."http://${local.zot_registry_ip}:5000/v2/registry.k8s.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/ghcr.io/hosts.toml
content: |
server = "https://ghcr.io"
[host."http://${local.zot_registry_ip}:5000/v2/ghcr.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /etc/containerd/certs.d/quay.io/hosts.toml
content: |
server = "https://quay.io"
[host."http://${local.zot_registry_ip}:5000/v2/quay.io"]
capabilities = ["pull", "resolve"]
skip_verify = true
override_path = true
- path: /root/kubeadm-config.yaml
content: |
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
podSubnet: "10.244.0.0/16"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
- path: /etc/profile.d/kubectl.sh
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
runcmd:
- systemctl enable --now qemu-guest-agent
- systemctl enable --now ssh
# relevant to kubernetes
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
# containerd
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update && apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
# join cluster
- ${local.join_command}
EOF
network_config_node_01 = <<-EOF
version: 2
ethernets:
eth0:
match:
driver: virtio_net
addresses:
- ${local.vm_node_01_ip_address}/${local.cidr}
routes:
- to: default
via: ${local.gateway}
nameservers:
addresses:
- ${local.dns}
EOF
}
resource "libvirt_cloudinit_disk" "commoninit_node_01" {
name = "${local.vm_node_01_name}-cloudinit.iso"
user_data = local.user_data_node
meta_data = yamlencode({
instance-id = local.vm_node_01_name
local-hostname = local.vm_node_01_name
})
network_config = local.network_config_node_01
}
# Create a volume from the cloud-init ISO
resource "libvirt_volume" "cloudinit_node_01" {
name = "${local.vm_node_01_name}-cloudinit.iso"
pool = "default"
create = {
content = {
url = libvirt_cloudinit_disk.commoninit_node_01.path
}
}
}
resource "libvirt_domain" "node_01" {
name = local.vm_node_01_name
memory = "4096"
memory_unit = "MiB"
vcpu = 2
type = "kvm"
autostart = true
running = true
os = {
type = "hvm"
type_arch = "x86_64"
type_machine = "q35"
}
devices = {
disks = [
{
driver = {
name = "qemu"
type = "qcow2"
}
source = {
file = {
file = libvirt_volume.node_01_disk.path
}
}
target = {
dev = "vda"
bus = "virtio"
}
},
{
device = "cdrom"
driver = {
name = "qemu"
type = "raw"
}
source = {
file = {
file = libvirt_volume.cloudinit_node_01.path
}
}
target = {
dev = "sda"
bus = "sata"
}
}
]
interfaces = [
{
type = "bridge"
model = {
type = "virtio"
}
source = {
bridge = {
bridge = local.bridge
}
}
}
]
serials = [
{
type = "pty"
}
]
xml = {
xslt = <<-XSLT
<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="devices">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<serial type="pty">
<target port="0"/>
</serial>
<console type="pty">
<target type="serial" port="0"/>
</console>
<graphics type="vnc" port="-1" autoport="yes" listen="0.0.0.0">
<listen type="address" address="0.0.0.0"/>
</graphics>
<channel type="unix">
<target type="virtio" name="org.qemu.guest_agent.0"/>
</channel>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
XSLT
}
}
}
output "node_01_ip" {
value = local.vm_node_01_ip_address
}

View File

@@ -0,0 +1,19 @@
# kubernetes
## 1: create VMs on hypervisor
## 2: install kubernetes with kubeadm
## 3: bootstrap flux
## 4: deploy the rest
### Random notes
```bash
# recreate specific resources
tofu destroy -target=libvirt_domain.ubuntu_vm -target=libvirt_volume.cloudinit
tofu apply
# taint resource to have it recreated
tofu taint libvirt_domain.ubuntu_vm
```

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

10
vagrant/Vagrantfile vendored
View File

@@ -7,10 +7,10 @@ VAGRANT_BOX = "generic/ubuntu2204"
VAGRANT_BOX_VERSION = "4.3.12"
CPUS_MASTER_NODE = 2
CPUS_WORKER_NODE = 4
#MEMORY_MASTER_NODE = 2048
MEMORY_MASTER_NODE = 4096
#MEMORY_WORKER_NODE = 4096
MEMORY_WORKER_NODE = 8192
MEMORY_MASTER_NODE = 2048
#MEMORY_MASTER_NODE = 4096
MEMORY_WORKER_NODE = 4096
#MEMORY_WORKER_NODE = 8192
WORKER_NODES_COUNT = 3
@@ -86,7 +86,7 @@ Vagrant.configure(2) do |config|
libvirt.cpus = CPUS_WORKER_NODE
libvirt.storage_pool_path = '/srv/vms'
libvirt.storage :file, :size => '50G', :type => 'qcow2'
libvirt.storage :file, :size => '50G', :type => 'qcow2'
# libvirt.storage :file, :size => '50G', :type => 'qcow2'
end
node.vm.provision "shell", path: "bootstrap_kworker.sh"