Compare commits

...

16 Commits

Author SHA1 Message Date
Jan Novak
0eab64c954 hosting: some config files for host: shadow, some named conf for
utility-101-shadow vm
2026-02-20 02:16:16 +01:00
Jan Novak
be362a5ab7 gitops/cilium: configure gateway and wildcard certificate it needs 2026-02-20 02:15:02 +01:00
Jan Novak
bb9f2ae3ce docker-30: several new and forgotten config files relevant to services
running in docker
2026-02-20 02:13:55 +01:00
Jan Novak
dc947165a4 gitops/ghost: add httproute resource aka gatewayApi instead of ingress 2026-02-20 02:13:09 +01:00
Jan Novak
1cd7625220 gitops/cert-manager: add dns challenger cluster issuer, add
deployment/service with socat proxy that works around my internet
provider's medling into dns traffic on port 53.
2026-02-20 02:11:50 +01:00
Jan Novak
409f8247e6 gitops/cert-manager: enable Gateway API support
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:43:04 +01:00
Jan Novak
8608696909 gitops/cilium: fix gateway.yaml indentation
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:04:18 +01:00
Jan Novak
6454c893cb gitops/cilium: move gateway listeners from helm values to Gateway resource
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:02:14 +01:00
Jan Novak
b2daa822a6 gitops/cilium: configure gateway listeners and allow routes from all namespaces
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 00:51:37 +01:00
Jan Novak
8ae7b086a5 gitops/00-crds: add Gateway API v1.2.0 CRDs for Cilium gateway support
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-17 12:17:46 +01:00
Jan Novak
4b7ed6085b gitops/cilium: enable Gateway API and add HTTPRoute for ghost
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-17 11:55:49 +01:00
Jan Novak
0d97a796e9 gitops/velero: add manifests and runbook - kustomization is yet to be
created
2026-01-17 00:07:03 +01:00
Jan Novak
b9f99c2950 gitops/plane: fix issuer on ingress 2026-01-16 13:21:15 +01:00
Jan Novak
a20ae55b8f gitops/cilium: specify which interfaces it handles to not clash with
tailscaled
2026-01-15 01:24:49 +01:00
Jan Novak
36f447c39c gitops: assorted leftovers and fixes 2026-01-14 14:49:54 +01:00
Jan Novak
76e3ff9d03 kubernetes/terraform: several updates 2026-01-14 14:49:19 +01:00
75 changed files with 13436 additions and 46 deletions

9
.gitignore vendored
View File

@@ -1,4 +1,7 @@
.terraform/
.DS_Store
./kubernetes-kvm-terraform/join-command.txt
./kubernetes-kvm-terraform/kubeconfig
.terraform/
.terraform.lock.hcl
kubernetes-kvm-terraform/join-command.txt
kubernetes-kvm-terraform/kubeconfig

View File

@@ -57,6 +57,15 @@ services:
- GITEA__server__ROOT_URL=https://gitea.home.hrajfrisbee.cz
- GITEA__security__SECRET_KEY=${GITEA_SECRET_KEY}
- GITEA__security__INTERNAL_TOKEN=${INTERNAL_TOKEN}
- GITEA__mailer__ENABLED=true
- GITEA__mailer__PROTOCOL=smtps
- GITEA__mailer__SMTP_ADDR=smtp.gmail.com
- GITEA__mailer__SMTP_PORT=465
- GITEA__mailer__USER=kacerr.cz@gmail.com
- GITEA__mailer__PASSWD=${GMAIL_GITEA_APP_PASSWORD}
- GITEA__mailer__FROM=kacerr.cz+gitea@gmail.com
- GITEA__packages__ENABLED=true
#- GITEA__storage__STORAGE_TYPE=minio
#- GITEA__storage__MINIO_ENDPOINT=minio:9000
#- GITEA__storage__MINIO_ACCESS_KEY_ID=gitea
@@ -83,7 +92,7 @@ services:
depends_on:
- gitea
environment:
GITEA_INSTANCE_URL: http://gitea:3000
GITEA_INSTANCE_URL: https://gitea.home.hrajfrisbee.cz/
GITEA_RUNNER_REGISTRATION_TOKEN: ${RUNNER_TOKEN}
volumes:
- ./runner-data:/data

View File

@@ -54,6 +54,50 @@ kanidm person get novakj | grep memberof
kanidm group get idm_people_self_name_write
```
## configure oauth proxy
```bash
kanidm system oauth2 create oauth2-proxy "OAuth2 Proxy" https://oauth2-proxy.lab.home.hrajfrisbee.cz/oauth2/callback
kanidm system oauth2 set-landing-url oauth2-proxy https://oauth2-proxy.lab.home.hrajfrisbee.cz
kanidm system oauth2 enable-pkce oauth2-proxy
kanidm system oauth2 warning-insecure-client-disable-pkce oauth2-proxy # if proxy doesn't support PKCE
kanidm system oauth2 get oauth2-proxy # note the client secret
# update incorrect urls if needed
remove-redirect-url
kanidm system oauth2 add-redirect-url oauth2-proxy https://oauth2-proxy.lab.home.hrajfrisbee.cz/oauth2/callback
kanidm system oauth2 set-landing-url oauth2-proxy https://oauth2-proxy.lab.home.hrajfrisbee.cz
# output
✔ Multiple authentication tokens exist. Please select one · idm_admin@idm.home.hrajfrisbee.cz
---
class: account
class: key_object
class: key_object_internal
class: key_object_jwe_a128gcm
class: key_object_jwt_es256
class: memberof
class: oauth2_resource_server
class: oauth2_resource_server_basic
class: object
displayname: OAuth2 Proxy
key_internal_data: 69df0a387991455f7c9800f13b881803: valid jwe_a128gcm 0
key_internal_data: c5f61c48a9c0eb61ba993a36748826cc: valid jws_es256 0
name: oauth2-proxy
oauth2_allow_insecure_client_disable_pkce: true
oauth2_rs_basic_secret: hidden
oauth2_rs_origin_landing: https://oauth2-proxylab.home.hrajfrisbee.cz/
oauth2_strict_redirect_uri: true
spn: oauth2-proxy@idm.home.hrajfrisbee.cz
uuid: d0dcbad5-90e4-4e36-a51b-653624069009
secret: 7KJbUe5x35NVCT1VbzZfhYBU19cz9Xe9Z1fvw4WazrkHX2c8
kanidm system oauth2 update-scope-map oauth2-proxy k8s_users openid profile email
```
```bash

View File

@@ -0,0 +1,46 @@
# nginx.conf
error_log /dev/stderr;
http {
server {
listen 9080;
location / {
proxy_pass http://192.168.0.35:80;
proxy_set_header Host $host;
}
}
log_format detailed '$remote_addr - [$time_local] '
'"$request_method $host$request_uri" '
'$status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log /dev/stdout detailed;
}
stream {
# Stream doesn't log by default, enable explicitly:
log_format stream_log '$remote_addr [$time_local] '
'$protocol $ssl_preread_server_name '
'$status $bytes_sent $bytes_received $session_time';
access_log /dev/stdout stream_log;
# Nginx ingress in kubernetes
server {
listen 9443;
proxy_pass 192.168.0.35:443;
}
# Gateway provided by cilium/envoy
server {
listen 9444;
proxy_pass 192.168.0.36:443;
}
}
events {}

View File

@@ -0,0 +1,9 @@
docker rm -f lab-proxy || /usr/bin/true
docker run -d --name lab-proxy \
--restart unless-stopped \
-v /srv/docker/lab-proxy/nginx.conf:/etc/nginx/nginx.conf:ro \
-p 9443:9443 \
-p 9444:9444 \
-p 9080:9080 \
nginx:alpine

View File

@@ -0,0 +1,9 @@
#!/bin/bash
docker rm -f maru-hleda-byt
# gitea registry login with kacerr / token
docker run -d --name maru-hleda-byt \
-p 8080:8080 \
-v /srv/maru-hleda-byt/data:/app/data \
gitea.home.hrajfrisbee.cz/littlemeat/maru-hleda-byt:0.01

View File

@@ -0,0 +1,22 @@
server {
listen 443 ssl http2;
server_name gitea.home.hrajfrisbee.cz;
ssl_certificate /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
location / {
proxy_pass http://192.168.0.30:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Gitea Git over HTTP
client_max_body_size 512m;
}

View File

@@ -0,0 +1,35 @@
server {
listen 443 ssl http2;
server_name jellyfin.home.hrajfrisbee.cz;
ssl_certificate /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# Security headers for media streaming
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
# Increase body size for high-res movie posters
client_max_body_size 20M;
location / {
# Proxy to your Synology or VM IP and Jellyfin port (default 8096)
proxy_pass http://192.168.0.2:8096;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
# Disable buffering for smoother streaming
proxy_buffering off;
}
}

View File

@@ -29,10 +29,10 @@ log "Backup size: ${BACKUP_SIZE} bytes"
# --- Upload to MinIO ---
log "Uploading to ${MC_ALIAS}/${S3_BUCKET}..."
set -x
mc cp --quiet "${BACKUP_FILE}" "${MC_ALIAS}/${S3_BUCKET}/vault-backup-${TIMESTAMP}.tar.gz"
minio-cli cp --quiet "${BACKUP_FILE}" "${MC_ALIAS}/${S3_BUCKET}/vault-backup-${TIMESTAMP}.tar.gz"
# --- Prune old backups ---
log "Pruning backups older than ${RETENTION_DAYS} days..."
mc rm --quiet --recursive --force --older-than "${RETENTION_DAYS}d" "${MC_ALIAS}/${S3_BUCKET}/"
minio-cli rm --quiet --recursive --force --older-than "${RETENTION_DAYS}d" "${MC_ALIAS}/${S3_BUCKET}/"
log "Backup complete: vault-backup-${TIMESTAMP}.tar.gz"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-dns
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: kacerr.cz@gmail.com
privateKeySecretRef:
name: letsencrypt-dns-account-key
solvers:
- dns01:
rfc2136:
nameserver: dns-update-proxy.cert-manager.svc.cluster.local:53
tsigKeyName: acme-update-key
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: acme-update-key
key: acme-update-key
selector:
dnsZones:
- "lab.home.hrajfrisbee.cz"

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-update-proxy
namespace: cert-manager
spec:
replicas: 1
selector:
matchLabels:
app: dns-update-proxy
template:
metadata:
labels:
app: dns-update-proxy
spec:
containers:
- name: socat-tcp
image: alpine/socat
args:
- TCP-LISTEN:53,fork,reuseaddr
- TCP:87.236.195.209:5353
ports:
- containerPort: 53
protocol: TCP
- name: socat-udp
image: alpine/socat
args:
- -T5
- UDP-RECVFROM:53,fork,reuseaddr
- UDP:87.236.195.209:5353
ports:
- containerPort: 53
protocol: UDP

View File

@@ -0,0 +1,18 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: acme-update-key
namespace: cert-manager
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-backend # or your store
kind: ClusterSecretStore
target:
name: acme-update-key
creationPolicy: Owner
data:
- secretKey: acme-update-key
remoteRef:
key: k8s_home/cert-manager
property: acme-update-key

View File

@@ -19,8 +19,14 @@ spec:
upgrade:
crds: CreateReplace
values:
global:
logLevel: 6
crds:
enabled: false
config:
apiVersion: controller.config.cert-manager.io/v1alpha1
kind: ControllerConfiguration
enableGatewayAPI: true
prometheus:
enabled: true
extraObjects:

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: dns-update-proxy
namespace: cert-manager
spec:
selector:
app: dns-update-proxy
ports:
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-lab-home-hrajfrisbee
namespace: kube-system
spec:
secretName: wildcard-lab-home-hrajfrisbee-tls
issuerRef:
name: letsencrypt-prod-dns
kind: ClusterIssuer
dnsNames:
- "*.lab.home.hrajfrisbee.cz"

View File

@@ -0,0 +1,27 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: cilium-gateway
namespace: kube-system
spec:
gatewayClassName: cilium
listeners:
- name: http
port: 80
protocol: HTTP
allowedRoutes:
namespaces:
from: All
- name: lab-home-hrajfrisbee-https-wildcard
hostname: "*.lab.home.hrajfrisbee.cz"
port: 443
protocol: HTTPS
tls:
mode: Terminate
certificateRefs:
- kind: Secret
name: wildcard-lab-home-hrajfrisbee-tls
allowedRoutes:
namespaces:
from: All

View File

@@ -18,6 +18,7 @@ spec:
values:
cluster:
name: "home-kube"
devices: "eth+ bond+ en+"
hubble:
relay:
enabled: true
@@ -30,6 +31,13 @@ spec:
clusterPoolIPv4PodCIDRList: "10.96.0.0/16"
l2announcements:
enabled: true
gatewayAPI:
enabled: true
kubeProxyReplacement: true
k8sServiceHost: 192.168.0.31 # or LB IP
k8sServicePort: 6443
# disable envoy daemonset - i guess that is stupid idea anyway
# envoy:
# enabled: false
# l7Proxy: false

View File

@@ -6,13 +6,13 @@ metadata:
spec:
provider:
vault:
server: "https://vault.hrajfrisbee.cz:8200"
server: "https://vault.hrajfrisbee.cz"
path: "secret"
version: "v2"
auth:
appRole:
path: "approle"
roleId: "8833d0f8-d35d-d7ea-658b-c27837d121ab" # or reference a secret
roleId: "864e352d-2064-2bf9-2c73-dbd676a95368" # or reference a secret
secretRef:
name: vault-approle
key: secret-id

View File

@@ -6,5 +6,5 @@ metadata:
annotations:
kustomize.toolkit.fluxcd.io/reconcile: disabled
type: Opaque
data:
secret-id: # --- find me in keepass bro ---
stringData:
secret-id: --- fill in the secret_id ---

View File

@@ -1,18 +0,0 @@
apiVersion: external-secrets.io/v1
kind: SecretStore
metadata:
name: vault-backend
namespace: external-secrets
spec:
provider:
vault:
server: "https://vault.hrajfrisbee.cz:8200"
path: "secret"
version: "v2"
auth:
appRole:
path: "approle"
roleId: "864e352d-2064-2bf9-2c73-dbd676a95368" # or reference a secret
secretRef:
name: vault-approle
key: secret-id

View File

@@ -0,0 +1,30 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ghost-on-kubernetes-redirect
namespace: ghost-on-kubernetes
labels:
app: ghost-on-kubernetes
app.kubernetes.io/name: ghost-on-kubernetes-httproute
app.kubernetes.io/instance: ghost-on-kubernetes
app.kubernetes.io/version: '6.0'
app.kubernetes.io/component: httproute
app.kubernetes.io/part-of: ghost-on-kubernetes
spec:
parentRefs:
- name: cilium-gateway
namespace: kube-system
sectionName: http
hostnames:
- ghost.lab.home.hrajfrisbee.cz
rules:
- matches:
- path:
type: PathPrefix
value: /
filters:
- type: RequestRedirect
requestRedirect:
scheme: https
statusCode: 301

View File

@@ -0,0 +1,29 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ghost-on-kubernetes
namespace: ghost-on-kubernetes
labels:
app: ghost-on-kubernetes
app.kubernetes.io/name: ghost-on-kubernetes-httproute
app.kubernetes.io/instance: ghost-on-kubernetes
app.kubernetes.io/version: '6.0'
app.kubernetes.io/component: httproute
app.kubernetes.io/part-of: ghost-on-kubernetes
spec:
parentRefs:
- name: cilium-gateway
namespace: kube-system
sectionName: lab-home-hrajfrisbee-https-wildcard
hostnames:
- ghost.lab.home.hrajfrisbee.cz
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: ghost-on-kubernetes-service
namespace: ghost-on-kubernetes
port: 2368

View File

@@ -25,7 +25,7 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
pathType: Prefix
backend:
service:
name: ghost-on-kubernetes-service

View File

@@ -11,11 +11,13 @@ spec:
sourceRef:
kind: HelmRepository
name: ingress-nginx
version: 4.12.0
version: 4.14.1
values:
controller:
admissionWebhooks:
enabled: false
patch:
enabled: false
config:
annotations-risk-level: "Critical"
interval: 5m0s

View File

@@ -17,7 +17,7 @@ data:
ttl 30
}
hosts {
192.168.0.30 vault.hrajfrisbee.cz
# 192.168.0.30 vault.hrajfrisbee.cz
fallthrough
}
prometheus :9153

View File

@@ -0,0 +1,19 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: mariadb-operator-crds
namespace: mariadb-operator
spec:
interval: 1h
chart:
spec:
chart: mariadb-operator-crds
version: "25.10.*"
sourceRef:
kind: HelmRepository
name: mariadb-operator
namespace: flux-system
install:
crds: Create
upgrade:
crds: CreateReplace

View File

@@ -0,0 +1,31 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: mariadb-operator
namespace: mariadb-operator
spec:
interval: 1h
dependsOn:
- name: mariadb-operator-crds
chart:
spec:
chart: mariadb-operator
version: "25.10.*"
sourceRef:
kind: HelmRepository
name: mariadb-operator
namespace: flux-system
values:
# uses built-in cert-controller for webhook TLS (no cert-manager dep)
webhook:
cert:
certManager:
enabled: false
# disable HA for operator itself (fine for testing)
ha:
enabled: false
# optional: enable metrics
metrics:
enabled: false
serviceMonitor:
enabled: false

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: mariadb-operator
namespace: flux-system
spec:
interval: 1h
url: https://helm.mariadb.com/mariadb-operator

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: mariadb-operator

View File

@@ -0,0 +1,34 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: nextcloud-secrets
namespace: nextcloud
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-backend # or your store
kind: ClusterSecretStore
target:
name: nextcloud-secrets
creationPolicy: Owner
data:
- secretKey: nextcloud-password
remoteRef:
key: k8s_home/nextcloud/admin
property: password
- secretKey: nextcloud-username
remoteRef:
key: k8s_home/nextcloud/admin
property: username
- secretKey: db-username
remoteRef:
key: k8s_home/nextcloud/postgres
property: db-username
- secretKey: postgres-password
remoteRef:
key: k8s_home/nextcloud/postgres
property: password
- secretKey: redis-password
remoteRef:
key: k8s_home/nextcloud/redis
property: password

View File

@@ -0,0 +1,263 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: nextcloud
namespace: nextcloud
spec:
interval: 30m
timeout: 15m # Nextcloud init can be slow
chart:
spec:
chart: nextcloud
version: "8.6.0" # Latest as of Jan 2025
sourceRef:
kind: HelmRepository
name: nextcloud
namespace: flux-system
interval: 12h
install:
crds: CreateReplace
remediation:
retries: 3
upgrade:
crds: CreateReplace
cleanupOnFail: true
remediation:
retries: 3
remediateLastFailure: true
# CRITICAL: Suspend during major version upgrades to prevent restart loops
# suspend: true
values:
image:
repository: nextcloud
tag: 32.0.3-apache # Latest as of Jan 2025. For fresh installs only.
# UPGRADE PATH: If upgrading from older version, go sequentially:
# 29.x → 30.0.x → 31.0.x → 32.0.x (one major at a time)
pullPolicy: IfNotPresent
replicaCount: 1 # >1 requires Redis, see below
nextcloud:
host: nextcloud.lab.home.hrajfrisbee.cz # Substitute or hardcode
# existingSecret: nextcloud-admin # Alternative to inline credentials
existingSecret:
enabled: true
secretName: nextcloud-secrets
# usernameKey: username
passwordKey: nextcloud-password
username: admin
# password set via valuesFrom secret
# PHP tuning - critical for stability
phpConfigs:
uploadLimit.ini: |
upload_max_filesize = 16G
post_max_size = 16G
max_input_time = 3600
max_execution_time = 3600
www-conf.ini: |
[www]
pm = dynamic
pm.max_children = 20
pm.start_servers = 4
pm.min_spare_servers = 2
pm.max_spare_servers = 6
pm.max_requests = 500
memory.ini: |
memory_limit = 1G
opcache.ini: |
opcache.enable = 1
opcache.interned_strings_buffer = 32
opcache.max_accelerated_files = 10000
opcache.memory_consumption = 256
opcache.save_comments = 1
opcache.revalidate_freq = 60
; Set to 0 if using ConfigMap-mounted configs
configs:
# Proxy and overwrite settings - CRITICAL for ingress
proxy.config.php: |-
<?php
$CONFIG = array (
'trusted_proxies' => array(
0 => '127.0.0.1',
1 => '10.0.0.0/8',
2 => '172.16.0.0/12',
3 => '192.168.0.0/16',
),
'forwarded_for_headers' => array('HTTP_X_FORWARDED_FOR'),
'overwriteprotocol' => 'https',
);
# Performance and maintenance
custom.config.php: |-
<?php
$CONFIG = array (
'default_phone_region' => 'US',
'maintenance_window_start' => 1,
'filelocking.enabled' => true,
'memcache.local' => '\\OC\\Memcache\\APCu',
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
'redis' => array(
'host' => 'nextcloud-redis-master',
'port' => 6379,
'password' => getenv('REDIS_PASSWORD'),
),
);
extraEnv:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secrets
key: redis-password
# Ingress - adjust for your ingress controller
ingress:
enabled: true
className: nginx # or traefik, etc.
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "16G"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/server-snippet: |
server_tokens off;
proxy_hide_header X-Powered-By;
rewrite ^/.well-known/webfinger /index.php/.well-known/webfinger last;
rewrite ^/.well-known/nodeinfo /index.php/.well-known/nodeinfo last;
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
location = /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
cert-manager.io/cluster-issuer: letsencrypt-prod
tls:
- secretName: nextcloud-tls
hosts:
- nextcloud.lab.home.hrajfrisbee.cz
# PostgreSQL - strongly recommended over MariaDB for Nextcloud
postgresql:
enabled: true
global:
postgresql:
auth:
username: nextcloud
database: nextcloud
existingSecret: nextcloud-secrets
secretKeys:
userPasswordKey: postgres-password
primary:
persistence:
enabled: true
size: 8Gi
storageClass: "" # Use default or specify
resources:
requests:
memory: 256Mi
cpu: 100m
limits:
memory: 512Mi
# Redis - required for file locking and sessions
redis:
enabled: true
auth:
enabled: true
existingSecret: nextcloud-secrets
existingSecretPasswordKey: redis-password
architecture: standalone
master:
persistence:
enabled: true
size: 1Gi
# Disable built-in databases we're not using
mariadb:
enabled: false
internalDatabase:
enabled: false
externalDatabase:
enabled: true
type: postgresql
host: nextcloud-postgresql # Service name created by subchart
user: nextcloud
database: nextcloud
existingSecret:
enabled: true
secretName: nextcloud-secrets
passwordKey: postgres-password
# Cron job - CRITICAL: never use AJAX cron
cronjob:
enabled: true
schedule: "*/5 * * * *"
resources:
requests:
memory: 256Mi
cpu: 50m
limits:
memory: 512Mi
# Main persistence
persistence:
enabled: true
storageClass: "" # Specify your storage class
size: 100Gi
accessMode: ReadWriteOnce
# nextcloudData - separate PVC for user data (recommended)
nextcloudData:
enabled: true
storageClass: ""
size: 500Gi
accessMode: ReadWriteOnce
# Resource limits - tune based on usage
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
memory: 2Gi
# Liveness/Readiness - tuned to prevent upgrade restart loops
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 6
successThreshold: 1
startupProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 30 # 15 minutes for upgrades
# Security context - avoid fsGroup recursive chown
securityContext:
fsGroupChangePolicy: OnRootMismatch
podSecurityContext:
fsGroup: 33 # www-data
# Metrics - optional but recommended
metrics:
enabled: false # Enable if you have Prometheus
# serviceMonitor:
# enabled: true

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: nextcloud
namespace: flux-system
spec:
interval: 24h
url: https://nextcloud.github.io/helm/

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: nextcloud
labels:
pod-security.kubernetes.io/enforce: baseline
pod-security.kubernetes.io/warn: restricted

View File

@@ -4,7 +4,7 @@ metadata:
name: oauth2-proxy-secrets
namespace: oauth2-proxy
annotations:
kustomize.toolkit.fluxcd.io/reconcile: disabled
kustomize.toolkit.fluxcd.io/reconcile: disabled
stringData:
client-id: oauth2-proxy
client-secret: <REPLACE_WITH_KANIDM_SECRET>

View File

@@ -33,7 +33,7 @@ spec:
rabbitmqHost: "plane-mq.lab.home.hrajfrisbee.cz" # optional
ingressClass: nginx
ingress_annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/auth-url: "https://oauth2-proxy.lab.home.hrajfrisbee.cz/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://oauth2-proxy.lab.home.hrajfrisbee.cz/oauth2/start?rd=$scheme://$host$escaped_request_uri"
nginx.ingress.kubernetes.io/auth-response-headers: "X-Auth-Request-User,X-Auth-Request-Email,Authorization"

View File

@@ -0,0 +1,30 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: seafile-secret
namespace: seafile
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-backend # or your store
kind: ClusterSecretStore
target:
name: seafile-secret
creationPolicy: Owner
data:
- secretKey: JWT_PRIVATE_KEY
remoteRef:
key: k8s_home/seafile
property: JWT_PRIVATE_KEY
- secretKey: SEAFILE_MYSQL_DB_PASSWORD
remoteRef:
key: k8s_home/seafile
property: SEAFILE_MYSQL_DB_PASSWORD
- secretKey: INIT_SEAFILE_ADMIN_PASSWORD
remoteRef:
key: k8s_home/seafile
property: INIT_SEAFILE_ADMIN_PASSWORD
- secretKey: INIT_SEAFILE_MYSQL_ROOT_PASSWORD
remoteRef:
key: k8s_home/seafile
property: INIT_SEAFILE_MYSQL_ROOT_PASSWORD

View File

@@ -0,0 +1,114 @@
# apps/seafile/helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: seafile
namespace: seafile
spec:
interval: 30m
chart:
spec:
chart: ce
version: "13.0.2"
sourceRef:
kind: HelmRepository
name: seafile
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 3
upgrade:
remediation:
retries: 3
# Post-render patches
postRenderers:
- kustomize:
patches:
# Remove imagePullSecrets from all Deployments
- target:
kind: Deployment
patch: |
- op: remove
path: /spec/template/spec/imagePullSecrets
# Remove from StatefulSets (MariaDB, etc.)
- target:
kind: StatefulSet
patch: |
- op: remove
path: /spec/template/spec/imagePullSecrets
# Remove from Pods if any
- target:
kind: Pod
patch: |
- op: remove
path: /spec/imagePullSecrets
values:
seafile:
initMode: true
# The following are the configurations of seafile container
configs:
image: seafileltd/seafile-mc:13.0-latest
seafileDataVolume:
storage: 10Gi
# The following are environments of seafile services
env:
# for Seafile server
TIME_ZONE: "UTC"
SEAFILE_LOG_TO_STDOUT: "true"
SITE_ROOT: "/"
SEAFILE_SERVER_HOSTNAME: "seafile.lab.home.hrajfrisbee.cz"
SEAFILE_SERVER_PROTOCOL: "https"
# for database
SEAFILE_MYSQL_DB_HOST: "seafile-mariadb"
SEAFILE_MYSQL_DB_PORT: "3306"
SEAFILE_MYSQL_DB_USER: "seafile"
#SEAFILE_MYSQL_DB_CCNET_DB_NAME: "ccnet-db"
#SEAFILE_MYSQL_DB_SEAFILE_DB_NAME: "seafile-db"
#SEAFILE_MYSQL_DB_SEAHUB_DB_NAME: "seahub-db"
# for cache
CACHE_PROVIDER: "redis"
## for redis
REDIS_HOST: "redis"
REDIS_PORT: "6379"
## for memcached
#MEMCACHED_HOST: ""
#MEMCACHED_PORT: "11211"
# for notification
ENABLE_NOTIFICATION_SERVER: "false"
NOTIFICATION_SERVER_URL: ""
# for seadoc
ENABLE_SEADOC: "false"
SEADOC_SERVER_URL: "" # only valid in ENABLE_SEADOC = true
# for Seafile AI
ENABLE_SEAFILE_AI: "false"
SEAFILE_AI_SERVER_URL: ""
# for Metadata server
MD_FILE_COUNT_LIMIT: "100000"
# initialization (only valid in first-time deployment and initMode = true)
## for Seafile admin
INIT_SEAFILE_ADMIN_EMAIL: "kacerr.cz@gmail.com"
# if you are using another secret name / key for seafile or mysql, please make correct the following fields:
#secretsMap:
# DB_ROOT_PASSWD: # Env's name
# secret: seafile-secret # secret's name, `seafile-secret` if not specify
# key: INIT_SEAFILE_MYSQL_ROOT_PASSWORD # secret's key, `Env's name` if not specify
# extra configurations
extraResources: {}
extraEnv: []
extraVolumes: []

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: seafile
namespace: flux-system
spec:
interval: 1h
url: https://haiwen.github.io/seafile-helm-chart/repo

View File

@@ -0,0 +1,35 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
meta.helm.sh/release-name: seafile
meta.helm.sh/release-namespace: seafile
nginx.ingress.kubernetes.io/proxy-body-size: "100m" # 0 = unlimited, or "500m"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
labels:
app.kubernetes.io/component: app
app.kubernetes.io/instance: seafile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: seafile
name: seafile
namespace: seafile
spec:
ingressClassName: nginx
rules:
- host: seafile.lab.home.hrajfrisbee.cz
http:
paths:
- backend:
service:
name: seafile
port:
number: 80
path: /
pathType: Prefix
tls:
- hosts:
- seafile.lab.home.hrajfrisbee.cz
secretName: seafile-tls

View File

@@ -0,0 +1,10 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: ccnet-db
namespace: seafile
spec:
mariaDbRef:
name: seafile-mariadb
characterSet: utf8mb4
collate: utf8mb4_general_ci

View File

@@ -0,0 +1,10 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: seafile-db
namespace: seafile
spec:
mariaDbRef:
name: seafile-mariadb
characterSet: utf8mb4
collate: utf8mb4_general_ci

View File

@@ -0,0 +1,10 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: seahub-db
namespace: seafile
spec:
mariaDbRef:
name: seafile-mariadb
characterSet: utf8mb4
collate: utf8mb4_general_ci

View File

@@ -0,0 +1,61 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: Grant
metadata:
name: all-privileges
spec:
mariaDbRef:
name: seafile-mariadb
username: seafile
database: "*"
table: "*"
privileges:
- ALL PRIVILEGES
grantOption: true
# ---
# apiVersion: k8s.mariadb.com/v1alpha1
# kind: Grant
# metadata:
# name: seafile-grant
# namespace: seafile
# spec:
# mariaDbRef:
# name: seafile-mariadb
# privileges:
# - ALL PRIVILEGES
# database: seafile-db
# table: "*"
# username: seafile
# host: "%"
# grantOption: false
# ---
# apiVersion: k8s.mariadb.com/v1alpha1
# kind: Grant
# metadata:
# name: seahub-grant
# namespace: seafile
# spec:
# mariaDbRef:
# name: seafile-mariadb
# privileges:
# - ALL PRIVILEGES
# database: seahub-db
# table: "*"
# username: seafile
# host: "%"
# grantOption: false
# ---
# apiVersion: k8s.mariadb.com/v1alpha1
# kind: Grant
# metadata:
# name: ccnet-grant
# namespace: seafile
# spec:
# mariaDbRef:
# name: seafile-mariadb
# privileges:
# - ALL PRIVILEGES
# database: ccnet-db
# table: "*"
# username: seafile
# host: "%"
# grantOption: false

View File

@@ -0,0 +1,13 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: User
metadata:
name: seafile
namespace: seafile
spec:
mariaDbRef:
name: seafile-mariadb
passwordSecretKeyRef:
name: seafile-secret
key: SEAFILE_MYSQL_DB_PASSWORD
maxUserConnections: 20
host: "%"

View File

@@ -0,0 +1,33 @@
apiVersion: k8s.mariadb.com/v1alpha1
kind: MariaDB
metadata:
name: seafile-mariadb
namespace: seafile
spec:
rootPasswordSecretKeyRef:
name: seafile-secret
key: INIT_SEAFILE_MYSQL_ROOT_PASSWORD
image: mariadb:11.4
port: 3306
storage:
size: 10Gi
# storageClassName: your-storage-class
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
memory: 1Gi
myCnf: |
[mariadb]
bind-address=*
default_storage_engine=InnoDB
binlog_format=row
innodb_autoinc_lock_mode=2
innodb_buffer_pool_size=256M
max_allowed_packet=256M

View File

@@ -0,0 +1,39 @@
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: seafile-memcached
# namespace: seafile
# spec:
# replicas: 1
# selector:
# matchLabels:
# app: seafile-memcached
# template:
# metadata:
# labels:
# app: seafile-memcached
# spec:
# containers:
# - name: memcached
# image: memcached:1.6-alpine
# args: ["-m", "128"] # 128MB memory limit
# ports:
# - containerPort: 11211
# resources:
# requests:
# memory: 64Mi
# cpu: 25m
# limits:
# memory: 192Mi
# ---
# apiVersion: v1
# kind: Service
# metadata:
# name: seafile-memcached
# namespace: seafile
# spec:
# selector:
# app: seafile-memcached
# ports:
# - port: 11211
# targetPort: 11211

View File

@@ -0,0 +1,67 @@
seafile:
initMode: true
# The following are the configurations of seafile container
configs:
image: seafileltd/seafile-mc:13.0-latest
seafileDataVolume:
storage: 10Gi
# The following are environments of seafile services
env:
# for Seafile server
TIME_ZONE: "UTC"
SEAFILE_LOG_TO_STDOUT: "true"
SITE_ROOT: "/"
SEAFILE_SERVER_HOSTNAME: "seafile.lab.home.hrajfrisbee.cz"
SEAFILE_SERVER_PROTOCOL: "https"
# for database
SEAFILE_MYSQL_DB_HOST: "seafile-mariadb"
SEAFILE_MYSQL_DB_PORT: "3306"
SEAFILE_MYSQL_DB_USER: "seafile"
SEAFILE_MYSQL_DB_CCNET_DB_NAME: "ccnet-db"
SEAFILE_MYSQL_DB_SEAFILE_DB_NAME: "seafile-db"
SEAFILE_MYSQL_DB_SEAHUB_DB_NAME: "seahub-db"
# for cache
CACHE_PROVIDER: "redis"
## for redis
REDIS_HOST: "redis"
REDIS_PORT: "6379"
## for memcached
#MEMCACHED_HOST: ""
#MEMCACHED_PORT: "11211"
# for notification
ENABLE_NOTIFICATION_SERVER: "false"
NOTIFICATION_SERVER_URL: ""
# for seadoc
ENABLE_SEADOC: "false"
SEADOC_SERVER_URL: "" # only valid in ENABLE_SEADOC = true
# for Seafile AI
ENABLE_SEAFILE_AI: "false"
SEAFILE_AI_SERVER_URL: ""
# for Metadata server
MD_FILE_COUNT_LIMIT: "100000"
# initialization (only valid in first-time deployment and initMode = true)
## for Seafile admin
INIT_SEAFILE_ADMIN_EMAIL: "kacerr.cz@gmail.com"
# if you are using another secret name / key for seafile or mysql, please make correct the following fields:
#secretsMap:
# DB_ROOT_PASSWD: # Env's name
# secret: seafile-secret # secret's name, `seafile-secret` if not specify
# key: INIT_SEAFILE_MYSQL_ROOT_PASSWORD # secret's key, `Env's name` if not specify
# extra configurations
extraResources: {}
extraEnv: []
extraVolumes: []

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: seafile
name: seafile

View File

@@ -0,0 +1,4 @@
## deployment
it looks like seafile deployment is not "straightforward" it first has to be started in `initialization mode` - `initMode: true` and after initialization switched into `normal` mode.

View File

@@ -0,0 +1,84 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-config
namespace: seafile
data:
redis.conf: |
maxmemory 128mb
maxmemory-policy allkeys-lru
appendonly yes
appendfsync everysec
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: seafile
labels:
app: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
strategy:
type: Recreate
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
args:
- redis-server
- /etc/redis/redis.conf
ports:
- containerPort: 6379
name: redis
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
memory: 256Mi
volumeMounts:
- name: redis-config
mountPath: /etc/redis
- name: redis-data
mountPath: /data
livenessProbe:
exec:
command: [redis-cli, ping]
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
exec:
command: [redis-cli, ping]
initialDelaySeconds: 3
periodSeconds: 5
volumes:
- name: redis-config
configMap:
name: redis-config
- name: redis-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: seafile
labels:
app: redis
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
name: redis
type: ClusterIP

View File

@@ -0,0 +1,24 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: tetragon
namespace: kube-system
spec:
interval: 1h
chart:
spec:
chart: tetragon
version: "1.6.0"
sourceRef:
kind: HelmRepository
name: cilium
namespace: flux-system
values:
export:
stdout:
enabledEvents:
- PROCESS_EXEC
- PROCESS_EXIT
- PROCESS_TRACEPOINT # required for oom tracepoint
tetragon:
btf: /sys/kernel/btf/vmlinux

View File

@@ -0,0 +1,16 @@
apiVersion: cilium.io/v1alpha1
kind: TracingPolicy
metadata:
name: oom-kill
spec:
tracepoints:
- subsystem: oom
# event: oom_kill
event: mark_victim
args:
- index: 4
type: int32
label: killed_pid
- index: 5
type: string
label: killed_comm

View File

@@ -0,0 +1,141 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: velero
namespace: velero
spec:
interval: 30m
chart:
spec:
chart: velero
version: "11.3.2" # Velero 1.16.x - latest stable as of Jan 2025
sourceRef:
kind: HelmRepository
name: vmware-tanzu
namespace: flux-system
install:
crds: CreateReplace
remediation:
retries: 3
upgrade:
crds: CreateReplace
remediation:
retries: 3
values:
# Node agent for filesystem backups (kopia/restic)
deployNodeAgent: true
nodeAgent:
podVolumePath: /var/lib/kubelet/pods
# nodeAgent.privileged removed in chart 8.x+, use containerSecurityContext instead
containerSecurityContext:
privileged: true
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
memory: 1Gi
configuration:
# backupStorageLocation - note: provider is at same level as bucket, not nested
backupStorageLocation:
- name: default
provider: aws
bucket: velero-backups # create this bucket in minio first
accessMode: ReadWrite
default: true
config:
region: us-east-1 # minio ignores but required
s3ForcePathStyle: "true"
s3Url: http://192.168.0.2:9000 # adjust to your minio service
# Volume snapshot location (for CSI snapshots, optional)
volumeSnapshotLocation:
- name: default
provider: aws
config:
region: us-east-1
# Use kopia for fs backups (restic deprecated, kopia is default in 1.14+)
uploaderType: kopia
# Default TTL for backups
defaultBackupTTL: 720h # 30 days
# Features
defaultVolumesToFsBackup: false # opt-in via annotation per-pod
# Credentials
credentials:
useSecret: true
existingSecret: velero-minio-credentials
# Velero server resources
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
memory: 512Mi
# Schedules
schedules:
daily-all-namespaces:
disabled: false
schedule: "0 3 * * *" # 3 AM daily
useOwnerReferencesInBackup: false
template:
ttl: 168h # 7 days
storageLocation: default
includedNamespaces:
- "*"
excludedNamespaces:
- kube-system
- kube-public
- kube-node-lease
- flux-system
- velero
excludedResources:
- events
- events.events.k8s.io
snapshotVolumes: false
defaultVolumesToFsBackup: true
weekly-full:
disabled: false
schedule: "0 4 * * 0" # Sunday 4 AM
template:
ttl: 720h # 30 days
storageLocation: default
includedNamespaces:
- "*"
excludedNamespaces:
- kube-system
- kube-public
- kube-node-lease
snapshotVolumes: false
defaultVolumesToFsBackup: true
# Init containers for plugins - AWS plugin for S3-compatible storage
# Note: CSI plugin merged into velero core in v1.14, no separate initContainer needed
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.11.0 # compatible with Velero 1.15/1.16
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /target
name: plugins
# Metrics
metrics:
enabled: true
serviceMonitor:
enabled: false # set true if using prometheus-operator
additionalLabels: {}
# Disable volume snapshots if not using CSI snapshotter
snapshotsEnabled: false
# Pod annotations/labels
podAnnotations: {}
podLabels: {}

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: vmware-tanzu
namespace: flux-system
spec:
interval: 24h
url: https://vmware-tanzu.github.io/helm-charts

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: velero

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: velero-minio-credentials
namespace: velero
stringData:
cloud: |
[default]
aws_access_key_id=k8s
aws_secret_access_key=poh9ieceHohnum5e

View File

@@ -0,0 +1,625 @@
# Velero Backup & Recovery Runbook
## Quick Reference
| Operation | Command |
|-----------|---------|
| List backups | `velero backup get` |
| Backup status | `velero backup describe <name> --details` |
| Browse backup contents | `velero backup describe <name> --details \| grep -A100 "Resource List"` |
| Restore full namespace | `velero restore create --from-backup <name> --include-namespaces <ns>` |
| Restore single PVC | `velero restore create --from-backup <name> --include-resources pvc,pv --selector app=<label>` |
| Restore specific files | See [Specific File Restore](#specific-file-restore) |
---
## 1. Browsing Backup Contents
### List All Backups
```bash
# All backups with status
velero backup get
# Backups for specific schedule
velero backup get -l velero.io/schedule-name=daily-all-namespaces
# JSON output for scripting
velero backup get -o json | jq '.items[] | {name: .metadata.name, phase: .status.phase, started: .status.startTimestamp}'
```
### Inspect Backup Contents
```bash
# Full backup details including all resources
velero backup describe <backup-name> --details
# List backed-up namespaces
velero backup describe <backup-name> --details | grep -A 5 "Namespaces:"
# List all resources in backup
velero backup describe <backup-name> --details | grep -A 200 "Resource List:" | head -100
# Check which PVCs were backed up
velero backup describe <backup-name> --details | grep -i persistentvolumeclaim
# Check pod volume backups (kopia/restic)
velero backup describe <backup-name> --details | grep -A 50 "Pod Volume Backups"
```
### View Backup Logs
```bash
# Stream logs
velero backup logs <backup-name>
# Search for errors
velero backup logs <backup-name> | grep -i error
# Check specific namespace backup
velero backup logs <backup-name> | grep "namespace=seafile"
```
### Browse Kopia Repository Directly
For direct file-level inspection of kopia backups in MinIO:
```bash
# Get kopia repository password from velero secret
KOPIA_PASSWORD=$(kubectl get secret -n velero velero-repo-credentials -o jsonpath='{.data.repository-password}' | base64 -d)
# Connect to repository (run from a pod with minio access or port-forward)
kopia repository connect s3 \
--bucket=velero-backups \
--endpoint=minio.minio.svc.cluster.local:9000 \
--access-key=<MINIO_ACCESS_KEY> \
--secret-access-key=<MINIO_SECRET_KEY> \
--password="${KOPIA_PASSWORD}" \
--prefix=kopia/<cluster-name>/
# List snapshots
kopia snapshot list --all
# Browse specific snapshot
kopia snapshot list <snapshot-id>
kopia ls <snapshot-id>
# Mount for browsing (requires FUSE)
mkdir /tmp/kopia-mount
kopia mount <snapshot-id> /tmp/kopia-mount &
ls /tmp/kopia-mount/
```
---
## 2. Full Namespace Restore
### Restore to Same Cluster (Disaster Recovery)
```bash
# Restore entire namespace
velero restore create seafile-restore \
--from-backup daily-all-namespaces-20250115030000 \
--include-namespaces seafile \
--wait
# Monitor restore progress
velero restore describe seafile-restore --details
velero restore logs seafile-restore -f
```
### Restore to Different Namespace
```bash
velero restore create seafile-test-restore \
--from-backup daily-all-namespaces-20250115030000 \
--include-namespaces seafile \
--namespace-mappings seafile:seafile-restored \
--wait
```
### Restore with Resource Filtering
```bash
# Restore only specific resource types
velero restore create restore-pvcs-only \
--from-backup <backup-name> \
--include-namespaces seafile \
--include-resources persistentvolumeclaims,persistentvolumes \
--wait
# Exclude certain resources
velero restore create restore-no-secrets \
--from-backup <backup-name> \
--include-namespaces seafile \
--exclude-resources secrets \
--wait
# Restore by label selector
velero restore create restore-app \
--from-backup <backup-name> \
--selector app.kubernetes.io/name=seafile \
--wait
```
---
## 3. Single PVC/Volume Restore
### Restore Specific PVC
```bash
# First, scale down the workload using the PVC
kubectl scale deployment seafile -n seafile --replicas=0
# Delete the corrupted/problematic PVC (data will be restored)
kubectl delete pvc seafile-data -n seafile
# Restore just that PVC
velero restore create restore-seafile-pvc \
--from-backup <backup-name> \
--include-namespaces seafile \
--include-resources persistentvolumeclaims,persistentvolumes \
--selector app=seafile \
--wait
# Scale back up
kubectl scale deployment seafile -n seafile --replicas=1
```
### Restore PVC to New Name (Side-by-Side)
```bash
# Create restore with transforms
cat <<EOF | kubectl apply -f -
apiVersion: velero.io/v1
kind: Restore
metadata:
name: restore-pvc-new-name
namespace: velero
spec:
backupName: <backup-name>
includedNamespaces:
- seafile
includedResources:
- persistentvolumeclaims
- persistentvolumes
labelSelector:
matchLabels:
app: seafile
restorePVs: true
namespaceMapping:
seafile: seafile-recovery
EOF
# Or use restore hooks to rename
velero restore create restore-pvc-renamed \
--from-backup <backup-name> \
--include-namespaces seafile \
--namespace-mappings seafile:seafile-temp \
--wait
```
---
## 4. Specific File Restore
Velero doesn't support single-file restore natively. Use kopia directly:
### Method 1: Kopia Direct Restore
```bash
# Find the backup/snapshot containing your file
# First, get velero's kopia repo credentials
REPO_PASSWORD=$(kubectl get secret -n velero velero-repo-credentials \
-o jsonpath='{.data.repository-password}' | base64 -d)
# Run a debug pod with kopia
kubectl run kopia-restore --rm -it \
--image=kopia/kopia:latest \
--restart=Never \
--namespace=velero \
-- /bin/sh
# Inside the pod:
kopia repository connect s3 \
--bucket=velero-backups \
--endpoint=minio.minio.svc.cluster.local:9000 \
--access-key=<ACCESS_KEY> \
--secret-access-key=<SECRET_KEY> \
--password="<REPO_PASSWORD>" \
--prefix=kopia/<cluster>/
# List snapshots for specific PVC
kopia snapshot list --all | grep seafile
# Restore specific file
kopia restore <snapshot-id>/path/to/file.txt /tmp/restored-file.txt
# Restore specific directory
kopia restore <snapshot-id>/data/uploads/ /tmp/restored-uploads/
```
### Method 2: Mount and Copy
```bash
# Create a temporary pod that mounts the backup
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: backup-browser
namespace: velero
spec:
containers:
- name: browser
image: kopia/kopia:latest
command: ["sleep", "3600"]
env:
- name: KOPIA_PASSWORD
valueFrom:
secretKeyRef:
name: velero-repo-credentials
key: repository-password
volumeMounts:
- name: restore-target
mountPath: /restore
volumes:
- name: restore-target
emptyDir: {}
EOF
# Exec in and restore files
kubectl exec -it -n velero backup-browser -- /bin/sh
# ... run kopia commands inside
```
### Method 3: Full PVC Restore + Copy + Delete
```bash
# 1. Restore PVC to temp namespace
velero restore create temp-restore \
--from-backup <backup-name> \
--include-namespaces seafile \
--namespace-mappings seafile:temp-restore \
--include-resources pvc,pv \
--wait
# 2. Create a pod to access both PVCs
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: file-copier
namespace: seafile
spec:
containers:
- name: copier
image: alpine
command: ["sleep", "3600"]
volumeMounts:
- name: current
mountPath: /current
- name: restored
mountPath: /restored
volumes:
- name: current
persistentVolumeClaim:
claimName: seafile-data
- name: restored
persistentVolumeClaim:
claimName: seafile-data # in temp-restore namespace - need cross-ns mount or copy via node
EOF
# Alternative: use rsync between namespaces
kubectl exec -n temp-restore deployment/temp-pod -- tar cf - /data/specific-file.txt | \
kubectl exec -i -n seafile deployment/seafile -- tar xf - -C /
```
---
## 5. Database-Specific Recovery
### MariaDB (via mariadb-operator)
Velero fs-backup of running DB may be inconsistent. Prefer operator backups:
```bash
# List operator backups
kubectl get backups.k8s.mariadb.com -n mariadb
# Restore from operator backup
kubectl apply -f - <<EOF
apiVersion: k8s.mariadb.com/v1alpha1
kind: Restore
metadata:
name: mariadb-restore
namespace: mariadb
spec:
mariaDbRef:
name: mariadb
backupRef:
name: mariadb-backup-20250115
EOF
```
If you must restore from Velero:
```bash
# 1. Scale down mariadb
kubectl scale statefulset mariadb -n mariadb --replicas=0
# 2. Restore PVC
velero restore create mariadb-pvc-restore \
--from-backup <backup-name> \
--include-namespaces mariadb \
--include-resources pvc,pv \
--wait
# 3. Scale back up - DB will recover from WAL
kubectl scale statefulset mariadb -n mariadb --replicas=1
# 4. Verify data integrity
kubectl exec -it -n mariadb mariadb-0 -- mariadb -e "CHECK TABLE important_table;"
```
### Redis
```bash
# If Redis is persistent (RDB/AOF)
kubectl scale statefulset redis -n redis --replicas=0
velero restore create redis-restore \
--from-backup <backup-name> \
--include-namespaces redis \
--wait
kubectl scale statefulset redis -n redis --replicas=1
```
---
## 6. Backup Management
### Create On-Demand Backup
```bash
# Full backup
velero backup create manual-backup-$(date +%Y%m%d-%H%M%S) \
--default-volumes-to-fs-backup \
--snapshot-volumes=false \
--wait
# Specific namespace pre-maintenance
velero backup create pre-upgrade-seafile-$(date +%Y%m%d) \
--include-namespaces seafile \
--default-volumes-to-fs-backup \
--wait
```
### Delete Old Backups
```bash
# Delete specific backup
velero backup delete <backup-name> --confirm
# Delete backups older than 30 days (careful!)
velero backup get -o json | jq -r '.items[] | select(.status.startTimestamp < (now - 2592000 | todate)) | .metadata.name' | xargs -I {} velero backup delete {} --confirm
```
### Check Backup Storage Location Health
```bash
velero backup-location get
velero backup-location describe default
# Verify connectivity
kubectl logs -n velero deployment/velero | grep -i "backup storage location"
```
---
## 7. Disaster Recovery Procedures
### Complete Cluster Rebuild
```bash
# 1. Install Velero on new cluster with same config
helm upgrade --install velero vmware-tanzu/velero \
-n velero --create-namespace \
-f velero-values.yaml
# 2. Wait for velero to sync backup list from S3
sleep 60
velero backup get
# 3. Restore namespaces in order (dependencies first)
# Restore storage/infra
velero restore create restore-infra \
--from-backup <latest-backup> \
--include-namespaces minio,cert-manager \
--wait
# Restore databases
velero restore create restore-databases \
--from-backup <latest-backup> \
--include-namespaces mariadb,redis \
--wait
# Restore applications
velero restore create restore-apps \
--from-backup <latest-backup> \
--include-namespaces seafile,plane \
--wait
```
### Restore Schedule After Accidental Deletion
```bash
# Schedules are cluster resources, restore from backup
velero restore create restore-schedules \
--from-backup <backup-name> \
--include-resources schedules.velero.io \
--wait
```
---
## 8. Troubleshooting
### Backup Stuck/Failed
```bash
# Check velero logs
kubectl logs -n velero deployment/velero --tail=100
# Check node-agent on specific node
kubectl logs -n velero -l name=node-agent --tail=100
# Check backup details for errors
velero backup describe <backup-name> --details | grep -i -A5 "error\|warning\|failed"
# Common issues:
# - Node-agent not running on node with PV
kubectl get pods -n velero -l name=node-agent -o wide
# - PVC not annotated for backup
kubectl get pvc -A -o json | jq '.items[] | select(.metadata.annotations["backup.velero.io/backup-volumes"] != null)'
```
### Restore Not Restoring Volumes
```bash
# Check if backup has pod volume backups
velero backup describe <backup-name> --details | grep -A20 "Pod Volume Backups"
# Verify restore is configured to restore PVs
velero restore describe <restore-name> --details | grep -i "restorePVs"
# Force PV restore
velero restore create <name> \
--from-backup <backup> \
--restore-volumes=true \
--wait
```
### Kopia Repository Issues
```bash
# Check repository status
kubectl exec -n velero deployment/velero -- \
velero repo get
# Unlock stuck repository
kubectl exec -n velero deployment/velero -- \
velero repo unlock <repo-name>
# Maintenance (runs automatically, but can trigger manually)
kubectl exec -n velero deployment/velero -- \
velero repo maintenance run
```
---
## 9. Monitoring & Alerting
### Prometheus Metrics
Key metrics to monitor:
```promql
# Backup success rate
sum(velero_backup_success_total) / sum(velero_backup_attempt_total)
# Backup duration
velero_backup_duration_seconds{schedule="daily-all-namespaces"}
# Backup size
velero_backup_items_total{backup="<name>"}
# Failed backups in last 24h
increase(velero_backup_failure_total[24h])
```
### AlertManager Rules
```yaml
groups:
- name: velero
rules:
- alert: VeleroBackupFailed
expr: increase(velero_backup_failure_total[1h]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Velero backup failed"
- alert: VeleroBackupMissing
expr: time() - velero_backup_last_successful_timestamp{schedule="daily-all-namespaces"} > 86400
for: 1h
labels:
severity: warning
annotations:
summary: "No successful backup in 24h"
- alert: VeleroNodeAgentDown
expr: kube_daemonset_status_number_unavailable{daemonset="node-agent"} > 0
for: 15m
labels:
severity: warning
```
---
## 10. Regular Maintenance Tasks
### Weekly
```bash
# Verify recent backup integrity
velero backup describe $(velero backup get -o json | jq -r '.items | sort_by(.status.startTimestamp) | last | .metadata.name') --details
# Check backup storage usage
mc ls minio/velero-backups --summarize
```
### Monthly
```bash
# Test restore to scratch namespace
velero restore create monthly-test-$(date +%Y%m) \
--from-backup $(velero backup get -o json | jq -r '.items[0].metadata.name') \
--include-namespaces seafile \
--namespace-mappings seafile:restore-test \
--wait
# Verify restored data
kubectl exec -n restore-test deploy/seafile -- ls -la /data
# Cleanup test
kubectl delete namespace restore-test
velero restore delete monthly-test-$(date +%Y%m) --confirm
```
### Quarterly
- Full DR test: restore to separate cluster
- Review retention policies
- Audit backup coverage (new namespaces/PVCs added?)
- Update velero/plugin versions
---
## Appendix: Common Label Selectors
```bash
# Backup by app label
--selector app.kubernetes.io/name=seafile
# Backup by component
--selector app.kubernetes.io/component=database
# Exclude specific pods from backup
# (add to pod annotation)
kubectl annotate pod <pod> backup.velero.io/backup-volumes-excludes=cache,tmp
```

View File

@@ -0,0 +1,13 @@
```bash
flux bootstrap gitea \
--owner=kacerr \
--repository=home-kubernetes \
--branch=main \
--path=gitops/home-kubernetes \
--hostname=gitea.home.hrajfrisbee.cz \
--personal \
--token-auth
flux token: 0917566fe2c7d11cb7b46618f076003f92477352
```

View File

@@ -0,0 +1,3 @@
```bash
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
```

View File

@@ -34,8 +34,8 @@ driver:
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
datasetParentName: "raid-1-4g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "raid-1-4g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi

View File

@@ -27,8 +27,8 @@ driver:
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
datasetParentName: "raid-1-4g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "raid-1-4g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi

View File

@@ -229,9 +229,9 @@ resource "libvirt_volume" "cloudinit" {
resource "libvirt_domain" "master" {
provider = libvirt.kvm-homer
name = local.master_vm_name
memory = "2048"
memory = "4096"
memory_unit = "MiB"
vcpu = 2
vcpu = 3
type = "kvm"
autostart = true
running = true

View File

@@ -19,7 +19,7 @@ resource "libvirt_volume" "node_02_disk" {
type = "qcow2"
}
}
capacity = 21474836480
capacity = 53687091200
}
locals {
@@ -131,7 +131,18 @@ locals {
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
complete -o default -F __start_kubectl k
- path: /etc/systemd/system/kubelet.service.d/10-containerd.conf
content: |
[Unit]
After=containerd.service
Requires=containerd.service
[Service]
ExecStartPre=/bin/bash -c 'until [ -S /var/run/containerd/containerd.sock ]; do sleep 1; done'
ExecStartPre=/usr/bin/crictl info
runcmd:
- systemctl enable --now qemu-guest-agent
@@ -151,6 +162,16 @@ locals {
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- |
cat > /etc/containerd/config.toml <<'CONTAINERD'
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
CONTAINERD
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
@@ -215,6 +236,10 @@ resource "libvirt_domain" "node_02" {
autostart = true
running = true
cpu = {
mode = "host-passthrough"
}
os = {
type = "hvm"
type_arch = "x86_64"

View File

@@ -19,7 +19,7 @@ resource "libvirt_volume" "node_01_disk" {
type = "qcow2"
}
}
capacity = 21474836480
capacity = 53687091200
}
locals {
@@ -131,7 +131,18 @@ locals {
content: |
alias k='kubectl'
source <(kubectl completion bash)
complete -o default -F __start_kubectl k
complete -o default -F __start_kubectl k
- path: /etc/systemd/system/kubelet.service.d/10-containerd.conf
content: |
[Unit]
After=containerd.service
Requires=containerd.service
[Service]
ExecStartPre=/bin/bash -c 'until [ -S /var/run/containerd/containerd.sock ]; do sleep 1; done'
ExecStartPre=/usr/bin/crictl info
runcmd:
- systemctl enable --now qemu-guest-agent
@@ -151,6 +162,16 @@ locals {
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update && apt-get install -y containerd.io
- |
cat > /etc/containerd/config.toml <<'CONTAINERD'
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
CONTAINERD
- systemctl restart containerd
# kubeadm/kubelet/kubectl v1.32
@@ -215,6 +236,9 @@ resource "libvirt_domain" "node_01" {
autostart = true
running = true
cpu = {
mode = "host-passthrough"
}
os = {
type = "hvm"
type_arch = "x86_64"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

134
shadow/iptables/rules.v4 Normal file
View File

@@ -0,0 +1,134 @@
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*mangle
:PREROUTING ACCEPT [756:126788]
:INPUT ACCEPT [715:122089]
:FORWARD ACCEPT [40:4623]
:OUTPUT ACCEPT [420:58795]
:POSTROUTING ACCEPT [460:63418]
:LIBVIRT_PRT - [0:0]
-A POSTROUTING -j LIBVIRT_PRT
-A LIBVIRT_PRT -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*filter
:INPUT DROP [387:104781]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [42:5859]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LIBVIRT_FWI - [0:0]
:LIBVIRT_FWO - [0:0]
:LIBVIRT_FWX - [0:0]
:LIBVIRT_INP - [0:0]
:LIBVIRT_OUT - [0:0]
:f2b-sshd - [0:0]
-A INPUT -j LIBVIRT_INP
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p icmp -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 5353 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 1022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 2022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 5353 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 5353 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 51820 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 1194 -j ACCEPT
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j LIBVIRT_FWX
-A FORWARD -j LIBVIRT_FWI
-A FORWARD -j LIBVIRT_FWO
-A FORWARD -o br-8be00fb1442a -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-8be00fb1442a -j DOCKER
-A FORWARD -i br-8be00fb1442a ! -o br-8be00fb1442a -j ACCEPT
-A FORWARD -i br-8be00fb1442a -o br-8be00fb1442a -j ACCEPT
-A FORWARD -d 192.168.123.141/32 -p tcp -m tcp --dport 80 -j ACCEPT
-A OUTPUT -j LIBVIRT_OUT
-A OUTPUT -p tcp -m tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o lo -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A OUTPUT -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i br-8be00fb1442a ! -o br-8be00fb1442a -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-8be00fb1442a -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A LIBVIRT_FWI -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -o virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A LIBVIRT_FWO -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -i virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A LIBVIRT_FWO -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWX -i virbr100 -o virbr100 -j ACCEPT
-A LIBVIRT_FWX -i virbr1 -o virbr1 -j ACCEPT
-A LIBVIRT_FWX -i virbr0 -o virbr0 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 5353 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 5353 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 5353 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 5353 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 68 -j ACCEPT
-A f2b-sshd -j RETURN
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*nat
:PREROUTING ACCEPT [409:105569]
:INPUT ACCEPT [22:1288]
:OUTPUT ACCEPT [1:76]
:POSTROUTING ACCEPT [12:818]
:DOCKER - [0:0]
:LIBVIRT_PRT - [0:0]
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p tcp -m tcp --dport 5353 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 5353 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 51820 -j DNAT --to-destination 192.168.123.101:51820
-A PREROUTING -i eno1 -p udp -m udp --dport 1194 -j DNAT --to-destination 192.168.123.101:1194
-A PREROUTING -i eno1 -p tcp -m tcp --dport 21080 -j DNAT --to-destination 192.168.123.141:80
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -s 172.18.0.0/16 ! -o br-8be00fb1442a -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A DOCKER -i br-8be00fb1442a -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
COMMIT
# Completed on Sun Nov 17 01:37:49 2024

View File

@@ -0,0 +1,248 @@
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*mangle
:PREROUTING ACCEPT [756:126788]
:INPUT ACCEPT [715:122089]
:FORWARD ACCEPT [40:4623]
:OUTPUT ACCEPT [420:58795]
:POSTROUTING ACCEPT [460:63418]
:LIBVIRT_PRT - [0:0]
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*filter
:INPUT DROP [387:104781]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [42:5859]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LIBVIRT_FWI - [0:0]
:LIBVIRT_FWO - [0:0]
:LIBVIRT_FWX - [0:0]
:LIBVIRT_INP - [0:0]
:LIBVIRT_OUT - [0:0]
:f2b-sshd - [0:0]
-A INPUT -j LIBVIRT_INP
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p icmp -j ACCEPT
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 1022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 2022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 51820 -j ACCEPT
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j LIBVIRT_FWX
-A FORWARD -j LIBVIRT_FWI
-A FORWARD -j LIBVIRT_FWO
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A FORWARD -i virbr100 -o virbr100 -j ACCEPT
-A FORWARD -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A FORWARD -i virbr0 -o virbr0 -j ACCEPT
-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i eno1 -p udp -m udp --dport 1194 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A FORWARD -i virbr100 -o virbr100 -j ACCEPT
-A FORWARD -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A FORWARD -i virbr0 -o virbr0 -j ACCEPT
-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -o br-8be00fb1442a -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-8be00fb1442a -j DOCKER
-A FORWARD -i br-8be00fb1442a ! -o br-8be00fb1442a -j ACCEPT
-A FORWARD -i br-8be00fb1442a -o br-8be00fb1442a -j ACCEPT
-A FORWARD -d 192.168.123.141/32 -p tcp -m tcp --dport 80 -j ACCEPT
-A OUTPUT -j LIBVIRT_OUT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -p tcp -m tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o lo -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A OUTPUT -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i br-8be00fb1442a ! -o br-8be00fb1442a -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-8be00fb1442a -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A LIBVIRT_FWI -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -o virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A LIBVIRT_FWO -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -i virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A LIBVIRT_FWO -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWX -i virbr100 -o virbr100 -j ACCEPT
-A LIBVIRT_FWX -i virbr1 -o virbr1 -j ACCEPT
-A LIBVIRT_FWX -i virbr0 -o virbr0 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p tcp -m tcp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p tcp -m tcp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p tcp -m tcp --dport 68 -j ACCEPT
-A f2b-sshd -s 222.187.254.41/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 207.46.227.197/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 125.77.23.30/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.175.216/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 94.200.202.26/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 103.80.36.218/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 62.234.126.132/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 106.52.248.175/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 104.248.5.69/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 129.211.49.227/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 112.85.42.176/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.15.62/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.30.112/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.175.167/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.52.39/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 207.154.215.119/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 36.91.76.171/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 134.175.19.71/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 144.217.243.216/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 210.206.92.137/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.30.76/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 49.51.90.173/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.190.2/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*nat
:PREROUTING ACCEPT [409:105569]
:INPUT ACCEPT [22:1288]
:OUTPUT ACCEPT [1:76]
:POSTROUTING ACCEPT [12:818]
:DOCKER - [0:0]
:LIBVIRT_PRT - [0:0]
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 51820 -j DNAT --to-destination 192.168.123.101:51820
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 1194 -j DNAT --to-destination 192.168.123.101:1194
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p tcp -m tcp --dport 21080 -j DNAT --to-destination 192.168.123.141:80
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 172.18.0.0/16 ! -o br-8be00fb1442a -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A DOCKER -i br-8be00fb1442a -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
COMMIT
# Completed on Sun Nov 17 01:37:49 2024

View File

@@ -0,0 +1,363 @@
##
# You should look at the following URL's in order to grasp a solid understanding
# of Nginx configuration files in order to fully unleash the power of Nginx.
# https://www.nginx.com/resources/wiki/start/
# https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/
# https://wiki.debian.org/Nginx/DirectoryStructure
#
# In most cases, administrators will remove this file from sites-enabled/ and
# leave it as reference inside of sites-available where it will continue to be
# updated by the nginx packaging team.
#
# This file will automatically load configuration files provided by other
# applications, such as Drupal or Wordpress. These applications will be made
# available underneath a path with that package name, such as /drupal8.
#
# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.
##
# Default server configuration
#
server {
listen 80 default_server;
# listen [::]:80 default_server;
# SSL configuration
#
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
#
# Note: You should disable gzip for SSL traffic.
# See: https://bugs.debian.org/773332
#
# Read up on ssl_ciphers to ensure a secure configuration.
# See: https://bugs.debian.org/765782
#
# Self signed certs generated by the ssl-cert package
# Don't use them in a production server!
#
# include snippets/snakeoil.conf;
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name _;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files $uri $uri/ =404;
}
# pass PHP scripts to FastCGI server
#
#location ~ \.php$ {
# include snippets/fastcgi-php.conf;
#
# # With php-fpm (or other unix sockets):
# fastcgi_pass unix:/run/php/php7.4-fpm.sock;
# # With php-cgi (or other tcp sockets):
# fastcgi_pass 127.0.0.1:9000;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 80;
server_name *.lab.home.hrajfrisbee.cz;
location / {
proxy_pass http://docker-30:9080;
proxy_set_header Host $host;
}
}
server {
# listen [::]:80 default_server;
# SSL configuration
#
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
#
# Note: You should disable gzip for SSL traffic.
# See: https://bugs.debian.org/773332
#
# Read up on ssl_ciphers to ensure a secure configuration.
# See: https://bugs.debian.org/765782
#
# Self signed certs generated by the ssl-cert package
# Don't use them in a production server!
#
# include snippets/snakeoil.conf;
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
}
server {
server_name teleport.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass https://192.168.123.26:443;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket upgrade settings - CRITICAL for Teleport
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Disable buffering, which can cause issues with real-time connections
proxy_buffering off;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/teleport.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/teleport.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = teleport.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name teleport.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name gitea.home.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass http://docker-30:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Gitea Git over HTTP
client_max_body_size 512m;
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = gitea.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name gitea.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
server_name idm.home.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass https://docker-30:8443;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/idm.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/idm.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = idm.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name idm.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
server_name jellyfin.home.hrajfrisbee.cz; # managed by Certbot
# Security headers for media streaming
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
# Increase body size for high-res movie posters
client_max_body_size 20M;
location / {
# Proxy to your Synology or VM IP and Jellyfin port (default 8096)
proxy_pass https://docker-30:443;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
# Disable buffering for smoother streaming
proxy_buffering off;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/jellyfin.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/jellyfin.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = jellyfin.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name jellyfin.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
root /srv/webs/random-shit;
server_name random-shit.hrajfrisbee.cz; # managed by Certbot
# Enable directory browsing
autoindex on;
# Optional: Show file sizes in MB/GB instead of bytes
autoindex_exact_size off;
# Optional: Show file timestamps in your local server time
autoindex_localtime on;
# Optional: Choose format (html, xml, json, or jsonp)
autoindex_format html;
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/random-shit.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/random-shit.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = random-shit.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name random-shit.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
root /var/www/html;
server_name vault.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass http://docker-30:8200;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Required for Vault
proxy_buffering off;
proxy_request_buffering off;
proxy_http_version 1.1;
proxy_set_header Connection "";
# Timeouts for long-running ops
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300; }
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/vault.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/vault.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = vault.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name vault.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
server_name maru-hleda-byt.home.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass http://docker-30:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/maru-hleda-byt.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/maru-hleda-byt.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = maru-hleda-byt.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name maru-hleda-byt.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}

114
shadow/nginx.conf Normal file
View File

@@ -0,0 +1,114 @@
user www-data;
worker_processes auto;
pid /run/nginx.pid;
error_log /var/log/nginx/error.log;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
stream {
map $ssl_preread_server_name $backend {
# Passthrough to K8s
ghost.lab.home.hrajfrisbee.cz k8s_gatewayapi;
~^.+\.lab\.home\.hrajfrisbee\.cz$ k8s_ingress;
lab\.home\.hrajfrisbee\.cz$ k8s_ingress;
default local_https;
}
upstream k8s_ingress {
server docker-30:9443;
}
upstream k8s_gatewayapi {
server docker-30:9444;
}
upstream local_https {
server 127.0.0.1:8443; # Loop back to http block
}
server {
listen 443;
ssl_preread on;
proxy_pass $backend;
}
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

View File

@@ -5,5 +5,5 @@ metadata:
namespace: kube-system
spec:
cidrs:
- start: "192.168.0.31"
- start: "192.168.0.35"
stop: "192.168.0.39"

View File

@@ -0,0 +1,54 @@
//
// Do any local configuration here
//
// Consider adding the 1918 zones here, if they are not used in your
// organization
//include "/etc/bind/zones.rfc1918";
key "acme-update-key" {
algorithm hmac-sha512;
secret "T6R1TpLGegHwFWO/I1LwtdGePRD+w00Oe4mJECW7qfheKJ/7FxlINH+Yk2vMvJCVNojj8BWoFAyEFCwGBpGROQ==";
};
zone "czechultimate.cz" {
type master;
file "/etc/bind/zones/czechultimate.cz.dns";
inline-signing yes;
auto-dnssec maintain;
key-directory "/etc/bind/keys";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};
zone "hrajfrisbee.cz" {
type master;
file "/etc/bind/zones/hrajfrisbee.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
update-policy {
// Allow ACME challenges only for lab.home subdomain
grant acme-update-key name _acme-challenge.lab.home.hrajfrisbee.cz. TXT;
// If you need wildcards under lab.home (e.g. _acme-challenge.foo.lab.home.hrajfrisbee.cz):
grant acme-update-key subdomain _acme-challenge.lab.home.hrajfrisbee.cz. TXT;
};
};
// points at zlutazimnice nameservers @nic.cz - cannot be working
zone "fraktalbar.cz" {
type master;
file "/etc/bind/zones/fraktalbar.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};
// points at zlutazimnice nameservers @nic.cz - cannot be working
zone "vegtral.cz" {
type master;
file "/etc/bind/zones/vegtral.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};

View File

@@ -0,0 +1,7 @@
## named tweaks
1. Generate TSIG key
```bash
tsig-keygen -a hmac-sha512 acme-update-key
```