locals { master_vm_name = "kube-master-31" master_ip = "192.168.0.31" gateway = "192.168.0.4" dns = "8.8.8.8" cidr = 24 bridge = "br0" zot_registry_ip = "192.168.0.30" # wherever zot is running } resource "libvirt_volume" "ubuntu_disk" { name = "${local.master_vm_name}.qcow2" pool = "default" backing_store = { path = libvirt_volume.ubuntu_base.path format = { type = "qcow2" } } target = { format = { type = "qcow2" } } capacity = 21474836480 } locals { user_data = <<-EOF #cloud-config hostname: ${local.master_vm_name} users: - name: ubuntu sudo: ALL=(ALL) NOPASSWD:ALL shell: /bin/bash ssh_authorized_keys: - ${file("~/.ssh/id_rsa.pub")} chpasswd: list: | ubuntu:yourpassword expire: false ssh_pwauth: true package_update: true packages: - qemu-guest-agent - openssh-server - apt-transport-https - ca-certificates - curl - gnupg write_files: - path: /etc/modules-load.d/k8s.conf content: | overlay br_netfilter - path: /etc/sysctl.d/k8s.conf content: | net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.ipv4.ip_forward = 1 - path: /etc/containerd/config.toml content: | version = 2 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true # Update existing containerd config to enable registry config_path - path: /etc/containerd/config.toml content: | version = 2 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" # Mirror configs for each upstream registry - path: /etc/containerd/certs.d/docker.io/hosts.toml content: | server = "https://registry-1.docker.io" [host."http://${local.zot_registry_ip}:5000/v2/docker.io"] capabilities = ["pull", "resolve"] skip_verify = true override_path = true - path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml content: | server = "https://registry.k8s.io" [host."http://${local.zot_registry_ip}:5000/v2/registry.k8s.io"] capabilities = ["pull", "resolve"] skip_verify = true override_path = true - path: /etc/containerd/certs.d/ghcr.io/hosts.toml content: | server = "https://ghcr.io" [host."http://${local.zot_registry_ip}:5000/v2/ghcr.io"] capabilities = ["pull", "resolve"] skip_verify = true override_path = true - path: /etc/containerd/certs.d/quay.io/hosts.toml content: | server = "https://quay.io" [host."http://${local.zot_registry_ip}:5000/v2/quay.io"] capabilities = ["pull", "resolve"] skip_verify = true override_path = true - path: /root/kubeadm-config.yaml content: | apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration nodeRegistration: criSocket: unix:///run/containerd/containerd.sock --- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration networking: podSubnet: "10.244.0.0/16" --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd - path: /etc/profile.d/kubectl.sh content: | alias k='kubectl' source <(kubectl completion bash) complete -o default -F __start_kubectl k runcmd: - systemctl enable --now qemu-guest-agent - systemctl enable --now ssh # relevant to kubernetes - modprobe overlay - modprobe br_netfilter - sysctl --system # containerd - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg - echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list - apt-get update && apt-get install -y containerd.io - systemctl restart containerd # kubeadm/kubelet/kubectl v1.32 - curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /" > /etc/apt/sources.list.d/kubernetes.list - apt-get update && apt-get install -y kubelet kubeadm kubectl - apt-mark hold kubelet kubeadm kubectl # init cluster - kubeadm init --config=/root/kubeadm-config.yaml --skip-phases=addon/kube-proxy # kubeconfig for root - mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config # wait for API server - | echo "Waiting for API server..." until kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes ; do echo "Waiting for API server..." sleep 5 done # CNI (cilium example, swap for flannel/calico as needed) - | CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/download/$${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz tar xzvf cilium-linux-amd64.tar.gz -C /usr/local/bin cilium install --kubeconfig=/etc/kubernetes/admin.conf --set kubeProxyReplacement=true --wait EOF network_config = <<-EOF version: 2 ethernets: eth0: match: driver: virtio_net addresses: - ${local.master_ip}/${local.cidr} routes: - to: default via: ${local.gateway} nameservers: addresses: - ${local.dns} EOF } resource "libvirt_cloudinit_disk" "commoninit" { name = "${local.master_vm_name}-cloudinit.iso" user_data = local.user_data meta_data = yamlencode({ instance-id = local.master_vm_name local-hostname = local.master_vm_name }) network_config = local.network_config } # Create a volume from the cloud-init ISO resource "libvirt_volume" "cloudinit" { name = "${local.master_vm_name}-cloudinit.iso" pool = "default" create = { content = { url = libvirt_cloudinit_disk.commoninit.path } } } resource "libvirt_domain" "master" { name = local.master_vm_name memory = "2048" memory_unit = "MiB" vcpu = 2 type = "kvm" autostart = true running = true os = { type = "hvm" type_arch = "x86_64" type_machine = "q35" } devices = { disks = [ { driver = { name = "qemu" type = "qcow2" } source = { file = { file = libvirt_volume.ubuntu_disk.path } } target = { dev = "vda" bus = "virtio" } }, { device = "cdrom" driver = { name = "qemu" type = "raw" } source = { file = { file = libvirt_volume.cloudinit.path } } target = { dev = "sda" bus = "sata" } } ] interfaces = [ { type = "bridge" model = { type = "virtio" } source = { bridge = { bridge = local.bridge } } } ] serials = [ { type = "pty" } ] xml = { xslt = <<-XSLT XSLT } } } output "master_ip" { value = local.master_ip }