Compare commits

...

12 Commits

Author SHA1 Message Date
Jan Novak
0eab64c954 hosting: some config files for host: shadow, some named conf for
utility-101-shadow vm
2026-02-20 02:16:16 +01:00
Jan Novak
be362a5ab7 gitops/cilium: configure gateway and wildcard certificate it needs 2026-02-20 02:15:02 +01:00
Jan Novak
bb9f2ae3ce docker-30: several new and forgotten config files relevant to services
running in docker
2026-02-20 02:13:55 +01:00
Jan Novak
dc947165a4 gitops/ghost: add httproute resource aka gatewayApi instead of ingress 2026-02-20 02:13:09 +01:00
Jan Novak
1cd7625220 gitops/cert-manager: add dns challenger cluster issuer, add
deployment/service with socat proxy that works around my internet
provider's medling into dns traffic on port 53.
2026-02-20 02:11:50 +01:00
Jan Novak
409f8247e6 gitops/cert-manager: enable Gateway API support
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:43:04 +01:00
Jan Novak
8608696909 gitops/cilium: fix gateway.yaml indentation
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:04:18 +01:00
Jan Novak
6454c893cb gitops/cilium: move gateway listeners from helm values to Gateway resource
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 01:02:14 +01:00
Jan Novak
b2daa822a6 gitops/cilium: configure gateway listeners and allow routes from all namespaces
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 00:51:37 +01:00
Jan Novak
8ae7b086a5 gitops/00-crds: add Gateway API v1.2.0 CRDs for Cilium gateway support
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-17 12:17:46 +01:00
Jan Novak
4b7ed6085b gitops/cilium: enable Gateway API and add HTTPRoute for ghost
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-17 11:55:49 +01:00
Jan Novak
0d97a796e9 gitops/velero: add manifests and runbook - kustomization is yet to be
created
2026-01-17 00:07:03 +01:00
29 changed files with 11988 additions and 4 deletions

View File

@@ -0,0 +1,46 @@
# nginx.conf
error_log /dev/stderr;
http {
server {
listen 9080;
location / {
proxy_pass http://192.168.0.35:80;
proxy_set_header Host $host;
}
}
log_format detailed '$remote_addr - [$time_local] '
'"$request_method $host$request_uri" '
'$status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log /dev/stdout detailed;
}
stream {
# Stream doesn't log by default, enable explicitly:
log_format stream_log '$remote_addr [$time_local] '
'$protocol $ssl_preread_server_name '
'$status $bytes_sent $bytes_received $session_time';
access_log /dev/stdout stream_log;
# Nginx ingress in kubernetes
server {
listen 9443;
proxy_pass 192.168.0.35:443;
}
# Gateway provided by cilium/envoy
server {
listen 9444;
proxy_pass 192.168.0.36:443;
}
}
events {}

View File

@@ -0,0 +1,9 @@
docker rm -f lab-proxy || /usr/bin/true
docker run -d --name lab-proxy \
--restart unless-stopped \
-v /srv/docker/lab-proxy/nginx.conf:/etc/nginx/nginx.conf:ro \
-p 9443:9443 \
-p 9444:9444 \
-p 9080:9080 \
nginx:alpine

View File

@@ -0,0 +1,9 @@
#!/bin/bash
docker rm -f maru-hleda-byt
# gitea registry login with kacerr / token
docker run -d --name maru-hleda-byt \
-p 8080:8080 \
-v /srv/maru-hleda-byt/data:/app/data \
gitea.home.hrajfrisbee.cz/littlemeat/maru-hleda-byt:0.01

View File

@@ -0,0 +1,22 @@
server {
listen 443 ssl http2;
server_name gitea.home.hrajfrisbee.cz;
ssl_certificate /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
location / {
proxy_pass http://192.168.0.30:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Gitea Git over HTTP
client_max_body_size 512m;
}

View File

@@ -0,0 +1,35 @@
server {
listen 443 ssl http2;
server_name jellyfin.home.hrajfrisbee.cz;
ssl_certificate /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.home.hrajfrisbee.cz/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# Security headers for media streaming
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
# Increase body size for high-res movie posters
client_max_body_size 20M;
location / {
# Proxy to your Synology or VM IP and Jellyfin port (default 8096)
proxy_pass http://192.168.0.2:8096;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
# Disable buffering for smoother streaming
proxy_buffering off;
}
}

View File

@@ -29,10 +29,10 @@ log "Backup size: ${BACKUP_SIZE} bytes"
# --- Upload to MinIO ---
log "Uploading to ${MC_ALIAS}/${S3_BUCKET}..."
set -x
mc cp --quiet "${BACKUP_FILE}" "${MC_ALIAS}/${S3_BUCKET}/vault-backup-${TIMESTAMP}.tar.gz"
minio-cli cp --quiet "${BACKUP_FILE}" "${MC_ALIAS}/${S3_BUCKET}/vault-backup-${TIMESTAMP}.tar.gz"
# --- Prune old backups ---
log "Pruning backups older than ${RETENTION_DAYS} days..."
mc rm --quiet --recursive --force --older-than "${RETENTION_DAYS}d" "${MC_ALIAS}/${S3_BUCKET}/"
minio-cli rm --quiet --recursive --force --older-than "${RETENTION_DAYS}d" "${MC_ALIAS}/${S3_BUCKET}/"
log "Backup complete: vault-backup-${TIMESTAMP}.tar.gz"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-dns
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: kacerr.cz@gmail.com
privateKeySecretRef:
name: letsencrypt-dns-account-key
solvers:
- dns01:
rfc2136:
nameserver: dns-update-proxy.cert-manager.svc.cluster.local:53
tsigKeyName: acme-update-key
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: acme-update-key
key: acme-update-key
selector:
dnsZones:
- "lab.home.hrajfrisbee.cz"

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-update-proxy
namespace: cert-manager
spec:
replicas: 1
selector:
matchLabels:
app: dns-update-proxy
template:
metadata:
labels:
app: dns-update-proxy
spec:
containers:
- name: socat-tcp
image: alpine/socat
args:
- TCP-LISTEN:53,fork,reuseaddr
- TCP:87.236.195.209:5353
ports:
- containerPort: 53
protocol: TCP
- name: socat-udp
image: alpine/socat
args:
- -T5
- UDP-RECVFROM:53,fork,reuseaddr
- UDP:87.236.195.209:5353
ports:
- containerPort: 53
protocol: UDP

View File

@@ -0,0 +1,18 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: acme-update-key
namespace: cert-manager
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-backend # or your store
kind: ClusterSecretStore
target:
name: acme-update-key
creationPolicy: Owner
data:
- secretKey: acme-update-key
remoteRef:
key: k8s_home/cert-manager
property: acme-update-key

View File

@@ -19,8 +19,14 @@ spec:
upgrade:
crds: CreateReplace
values:
global:
logLevel: 6
crds:
enabled: false
config:
apiVersion: controller.config.cert-manager.io/v1alpha1
kind: ControllerConfiguration
enableGatewayAPI: true
prometheus:
enabled: true
extraObjects:

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: dns-update-proxy
namespace: cert-manager
spec:
selector:
app: dns-update-proxy
ports:
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-lab-home-hrajfrisbee
namespace: kube-system
spec:
secretName: wildcard-lab-home-hrajfrisbee-tls
issuerRef:
name: letsencrypt-prod-dns
kind: ClusterIssuer
dnsNames:
- "*.lab.home.hrajfrisbee.cz"

View File

@@ -0,0 +1,27 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: cilium-gateway
namespace: kube-system
spec:
gatewayClassName: cilium
listeners:
- name: http
port: 80
protocol: HTTP
allowedRoutes:
namespaces:
from: All
- name: lab-home-hrajfrisbee-https-wildcard
hostname: "*.lab.home.hrajfrisbee.cz"
port: 443
protocol: HTTPS
tls:
mode: Terminate
certificateRefs:
- kind: Secret
name: wildcard-lab-home-hrajfrisbee-tls
allowedRoutes:
namespaces:
from: All

View File

@@ -31,6 +31,13 @@ spec:
clusterPoolIPv4PodCIDRList: "10.96.0.0/16"
l2announcements:
enabled: true
gatewayAPI:
enabled: true
kubeProxyReplacement: true
k8sServiceHost: 192.168.0.31 # or LB IP
k8sServicePort: 6443
# disable envoy daemonset - i guess that is stupid idea anyway
# envoy:
# enabled: false
# l7Proxy: false

View File

@@ -0,0 +1,30 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ghost-on-kubernetes-redirect
namespace: ghost-on-kubernetes
labels:
app: ghost-on-kubernetes
app.kubernetes.io/name: ghost-on-kubernetes-httproute
app.kubernetes.io/instance: ghost-on-kubernetes
app.kubernetes.io/version: '6.0'
app.kubernetes.io/component: httproute
app.kubernetes.io/part-of: ghost-on-kubernetes
spec:
parentRefs:
- name: cilium-gateway
namespace: kube-system
sectionName: http
hostnames:
- ghost.lab.home.hrajfrisbee.cz
rules:
- matches:
- path:
type: PathPrefix
value: /
filters:
- type: RequestRedirect
requestRedirect:
scheme: https
statusCode: 301

View File

@@ -0,0 +1,29 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ghost-on-kubernetes
namespace: ghost-on-kubernetes
labels:
app: ghost-on-kubernetes
app.kubernetes.io/name: ghost-on-kubernetes-httproute
app.kubernetes.io/instance: ghost-on-kubernetes
app.kubernetes.io/version: '6.0'
app.kubernetes.io/component: httproute
app.kubernetes.io/part-of: ghost-on-kubernetes
spec:
parentRefs:
- name: cilium-gateway
namespace: kube-system
sectionName: lab-home-hrajfrisbee-https-wildcard
hostnames:
- ghost.lab.home.hrajfrisbee.cz
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: ghost-on-kubernetes-service
namespace: ghost-on-kubernetes
port: 2368

View File

@@ -25,7 +25,7 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
pathType: Prefix
backend:
service:
name: ghost-on-kubernetes-service

View File

@@ -0,0 +1,141 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: velero
namespace: velero
spec:
interval: 30m
chart:
spec:
chart: velero
version: "11.3.2" # Velero 1.16.x - latest stable as of Jan 2025
sourceRef:
kind: HelmRepository
name: vmware-tanzu
namespace: flux-system
install:
crds: CreateReplace
remediation:
retries: 3
upgrade:
crds: CreateReplace
remediation:
retries: 3
values:
# Node agent for filesystem backups (kopia/restic)
deployNodeAgent: true
nodeAgent:
podVolumePath: /var/lib/kubelet/pods
# nodeAgent.privileged removed in chart 8.x+, use containerSecurityContext instead
containerSecurityContext:
privileged: true
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
memory: 1Gi
configuration:
# backupStorageLocation - note: provider is at same level as bucket, not nested
backupStorageLocation:
- name: default
provider: aws
bucket: velero-backups # create this bucket in minio first
accessMode: ReadWrite
default: true
config:
region: us-east-1 # minio ignores but required
s3ForcePathStyle: "true"
s3Url: http://192.168.0.2:9000 # adjust to your minio service
# Volume snapshot location (for CSI snapshots, optional)
volumeSnapshotLocation:
- name: default
provider: aws
config:
region: us-east-1
# Use kopia for fs backups (restic deprecated, kopia is default in 1.14+)
uploaderType: kopia
# Default TTL for backups
defaultBackupTTL: 720h # 30 days
# Features
defaultVolumesToFsBackup: false # opt-in via annotation per-pod
# Credentials
credentials:
useSecret: true
existingSecret: velero-minio-credentials
# Velero server resources
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
memory: 512Mi
# Schedules
schedules:
daily-all-namespaces:
disabled: false
schedule: "0 3 * * *" # 3 AM daily
useOwnerReferencesInBackup: false
template:
ttl: 168h # 7 days
storageLocation: default
includedNamespaces:
- "*"
excludedNamespaces:
- kube-system
- kube-public
- kube-node-lease
- flux-system
- velero
excludedResources:
- events
- events.events.k8s.io
snapshotVolumes: false
defaultVolumesToFsBackup: true
weekly-full:
disabled: false
schedule: "0 4 * * 0" # Sunday 4 AM
template:
ttl: 720h # 30 days
storageLocation: default
includedNamespaces:
- "*"
excludedNamespaces:
- kube-system
- kube-public
- kube-node-lease
snapshotVolumes: false
defaultVolumesToFsBackup: true
# Init containers for plugins - AWS plugin for S3-compatible storage
# Note: CSI plugin merged into velero core in v1.14, no separate initContainer needed
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.11.0 # compatible with Velero 1.15/1.16
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /target
name: plugins
# Metrics
metrics:
enabled: true
serviceMonitor:
enabled: false # set true if using prometheus-operator
additionalLabels: {}
# Disable volume snapshots if not using CSI snapshotter
snapshotsEnabled: false
# Pod annotations/labels
podAnnotations: {}
podLabels: {}

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: vmware-tanzu
namespace: flux-system
spec:
interval: 24h
url: https://vmware-tanzu.github.io/helm-charts

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: velero

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: velero-minio-credentials
namespace: velero
stringData:
cloud: |
[default]
aws_access_key_id=k8s
aws_secret_access_key=poh9ieceHohnum5e

View File

@@ -0,0 +1,625 @@
# Velero Backup & Recovery Runbook
## Quick Reference
| Operation | Command |
|-----------|---------|
| List backups | `velero backup get` |
| Backup status | `velero backup describe <name> --details` |
| Browse backup contents | `velero backup describe <name> --details \| grep -A100 "Resource List"` |
| Restore full namespace | `velero restore create --from-backup <name> --include-namespaces <ns>` |
| Restore single PVC | `velero restore create --from-backup <name> --include-resources pvc,pv --selector app=<label>` |
| Restore specific files | See [Specific File Restore](#specific-file-restore) |
---
## 1. Browsing Backup Contents
### List All Backups
```bash
# All backups with status
velero backup get
# Backups for specific schedule
velero backup get -l velero.io/schedule-name=daily-all-namespaces
# JSON output for scripting
velero backup get -o json | jq '.items[] | {name: .metadata.name, phase: .status.phase, started: .status.startTimestamp}'
```
### Inspect Backup Contents
```bash
# Full backup details including all resources
velero backup describe <backup-name> --details
# List backed-up namespaces
velero backup describe <backup-name> --details | grep -A 5 "Namespaces:"
# List all resources in backup
velero backup describe <backup-name> --details | grep -A 200 "Resource List:" | head -100
# Check which PVCs were backed up
velero backup describe <backup-name> --details | grep -i persistentvolumeclaim
# Check pod volume backups (kopia/restic)
velero backup describe <backup-name> --details | grep -A 50 "Pod Volume Backups"
```
### View Backup Logs
```bash
# Stream logs
velero backup logs <backup-name>
# Search for errors
velero backup logs <backup-name> | grep -i error
# Check specific namespace backup
velero backup logs <backup-name> | grep "namespace=seafile"
```
### Browse Kopia Repository Directly
For direct file-level inspection of kopia backups in MinIO:
```bash
# Get kopia repository password from velero secret
KOPIA_PASSWORD=$(kubectl get secret -n velero velero-repo-credentials -o jsonpath='{.data.repository-password}' | base64 -d)
# Connect to repository (run from a pod with minio access or port-forward)
kopia repository connect s3 \
--bucket=velero-backups \
--endpoint=minio.minio.svc.cluster.local:9000 \
--access-key=<MINIO_ACCESS_KEY> \
--secret-access-key=<MINIO_SECRET_KEY> \
--password="${KOPIA_PASSWORD}" \
--prefix=kopia/<cluster-name>/
# List snapshots
kopia snapshot list --all
# Browse specific snapshot
kopia snapshot list <snapshot-id>
kopia ls <snapshot-id>
# Mount for browsing (requires FUSE)
mkdir /tmp/kopia-mount
kopia mount <snapshot-id> /tmp/kopia-mount &
ls /tmp/kopia-mount/
```
---
## 2. Full Namespace Restore
### Restore to Same Cluster (Disaster Recovery)
```bash
# Restore entire namespace
velero restore create seafile-restore \
--from-backup daily-all-namespaces-20250115030000 \
--include-namespaces seafile \
--wait
# Monitor restore progress
velero restore describe seafile-restore --details
velero restore logs seafile-restore -f
```
### Restore to Different Namespace
```bash
velero restore create seafile-test-restore \
--from-backup daily-all-namespaces-20250115030000 \
--include-namespaces seafile \
--namespace-mappings seafile:seafile-restored \
--wait
```
### Restore with Resource Filtering
```bash
# Restore only specific resource types
velero restore create restore-pvcs-only \
--from-backup <backup-name> \
--include-namespaces seafile \
--include-resources persistentvolumeclaims,persistentvolumes \
--wait
# Exclude certain resources
velero restore create restore-no-secrets \
--from-backup <backup-name> \
--include-namespaces seafile \
--exclude-resources secrets \
--wait
# Restore by label selector
velero restore create restore-app \
--from-backup <backup-name> \
--selector app.kubernetes.io/name=seafile \
--wait
```
---
## 3. Single PVC/Volume Restore
### Restore Specific PVC
```bash
# First, scale down the workload using the PVC
kubectl scale deployment seafile -n seafile --replicas=0
# Delete the corrupted/problematic PVC (data will be restored)
kubectl delete pvc seafile-data -n seafile
# Restore just that PVC
velero restore create restore-seafile-pvc \
--from-backup <backup-name> \
--include-namespaces seafile \
--include-resources persistentvolumeclaims,persistentvolumes \
--selector app=seafile \
--wait
# Scale back up
kubectl scale deployment seafile -n seafile --replicas=1
```
### Restore PVC to New Name (Side-by-Side)
```bash
# Create restore with transforms
cat <<EOF | kubectl apply -f -
apiVersion: velero.io/v1
kind: Restore
metadata:
name: restore-pvc-new-name
namespace: velero
spec:
backupName: <backup-name>
includedNamespaces:
- seafile
includedResources:
- persistentvolumeclaims
- persistentvolumes
labelSelector:
matchLabels:
app: seafile
restorePVs: true
namespaceMapping:
seafile: seafile-recovery
EOF
# Or use restore hooks to rename
velero restore create restore-pvc-renamed \
--from-backup <backup-name> \
--include-namespaces seafile \
--namespace-mappings seafile:seafile-temp \
--wait
```
---
## 4. Specific File Restore
Velero doesn't support single-file restore natively. Use kopia directly:
### Method 1: Kopia Direct Restore
```bash
# Find the backup/snapshot containing your file
# First, get velero's kopia repo credentials
REPO_PASSWORD=$(kubectl get secret -n velero velero-repo-credentials \
-o jsonpath='{.data.repository-password}' | base64 -d)
# Run a debug pod with kopia
kubectl run kopia-restore --rm -it \
--image=kopia/kopia:latest \
--restart=Never \
--namespace=velero \
-- /bin/sh
# Inside the pod:
kopia repository connect s3 \
--bucket=velero-backups \
--endpoint=minio.minio.svc.cluster.local:9000 \
--access-key=<ACCESS_KEY> \
--secret-access-key=<SECRET_KEY> \
--password="<REPO_PASSWORD>" \
--prefix=kopia/<cluster>/
# List snapshots for specific PVC
kopia snapshot list --all | grep seafile
# Restore specific file
kopia restore <snapshot-id>/path/to/file.txt /tmp/restored-file.txt
# Restore specific directory
kopia restore <snapshot-id>/data/uploads/ /tmp/restored-uploads/
```
### Method 2: Mount and Copy
```bash
# Create a temporary pod that mounts the backup
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: backup-browser
namespace: velero
spec:
containers:
- name: browser
image: kopia/kopia:latest
command: ["sleep", "3600"]
env:
- name: KOPIA_PASSWORD
valueFrom:
secretKeyRef:
name: velero-repo-credentials
key: repository-password
volumeMounts:
- name: restore-target
mountPath: /restore
volumes:
- name: restore-target
emptyDir: {}
EOF
# Exec in and restore files
kubectl exec -it -n velero backup-browser -- /bin/sh
# ... run kopia commands inside
```
### Method 3: Full PVC Restore + Copy + Delete
```bash
# 1. Restore PVC to temp namespace
velero restore create temp-restore \
--from-backup <backup-name> \
--include-namespaces seafile \
--namespace-mappings seafile:temp-restore \
--include-resources pvc,pv \
--wait
# 2. Create a pod to access both PVCs
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: file-copier
namespace: seafile
spec:
containers:
- name: copier
image: alpine
command: ["sleep", "3600"]
volumeMounts:
- name: current
mountPath: /current
- name: restored
mountPath: /restored
volumes:
- name: current
persistentVolumeClaim:
claimName: seafile-data
- name: restored
persistentVolumeClaim:
claimName: seafile-data # in temp-restore namespace - need cross-ns mount or copy via node
EOF
# Alternative: use rsync between namespaces
kubectl exec -n temp-restore deployment/temp-pod -- tar cf - /data/specific-file.txt | \
kubectl exec -i -n seafile deployment/seafile -- tar xf - -C /
```
---
## 5. Database-Specific Recovery
### MariaDB (via mariadb-operator)
Velero fs-backup of running DB may be inconsistent. Prefer operator backups:
```bash
# List operator backups
kubectl get backups.k8s.mariadb.com -n mariadb
# Restore from operator backup
kubectl apply -f - <<EOF
apiVersion: k8s.mariadb.com/v1alpha1
kind: Restore
metadata:
name: mariadb-restore
namespace: mariadb
spec:
mariaDbRef:
name: mariadb
backupRef:
name: mariadb-backup-20250115
EOF
```
If you must restore from Velero:
```bash
# 1. Scale down mariadb
kubectl scale statefulset mariadb -n mariadb --replicas=0
# 2. Restore PVC
velero restore create mariadb-pvc-restore \
--from-backup <backup-name> \
--include-namespaces mariadb \
--include-resources pvc,pv \
--wait
# 3. Scale back up - DB will recover from WAL
kubectl scale statefulset mariadb -n mariadb --replicas=1
# 4. Verify data integrity
kubectl exec -it -n mariadb mariadb-0 -- mariadb -e "CHECK TABLE important_table;"
```
### Redis
```bash
# If Redis is persistent (RDB/AOF)
kubectl scale statefulset redis -n redis --replicas=0
velero restore create redis-restore \
--from-backup <backup-name> \
--include-namespaces redis \
--wait
kubectl scale statefulset redis -n redis --replicas=1
```
---
## 6. Backup Management
### Create On-Demand Backup
```bash
# Full backup
velero backup create manual-backup-$(date +%Y%m%d-%H%M%S) \
--default-volumes-to-fs-backup \
--snapshot-volumes=false \
--wait
# Specific namespace pre-maintenance
velero backup create pre-upgrade-seafile-$(date +%Y%m%d) \
--include-namespaces seafile \
--default-volumes-to-fs-backup \
--wait
```
### Delete Old Backups
```bash
# Delete specific backup
velero backup delete <backup-name> --confirm
# Delete backups older than 30 days (careful!)
velero backup get -o json | jq -r '.items[] | select(.status.startTimestamp < (now - 2592000 | todate)) | .metadata.name' | xargs -I {} velero backup delete {} --confirm
```
### Check Backup Storage Location Health
```bash
velero backup-location get
velero backup-location describe default
# Verify connectivity
kubectl logs -n velero deployment/velero | grep -i "backup storage location"
```
---
## 7. Disaster Recovery Procedures
### Complete Cluster Rebuild
```bash
# 1. Install Velero on new cluster with same config
helm upgrade --install velero vmware-tanzu/velero \
-n velero --create-namespace \
-f velero-values.yaml
# 2. Wait for velero to sync backup list from S3
sleep 60
velero backup get
# 3. Restore namespaces in order (dependencies first)
# Restore storage/infra
velero restore create restore-infra \
--from-backup <latest-backup> \
--include-namespaces minio,cert-manager \
--wait
# Restore databases
velero restore create restore-databases \
--from-backup <latest-backup> \
--include-namespaces mariadb,redis \
--wait
# Restore applications
velero restore create restore-apps \
--from-backup <latest-backup> \
--include-namespaces seafile,plane \
--wait
```
### Restore Schedule After Accidental Deletion
```bash
# Schedules are cluster resources, restore from backup
velero restore create restore-schedules \
--from-backup <backup-name> \
--include-resources schedules.velero.io \
--wait
```
---
## 8. Troubleshooting
### Backup Stuck/Failed
```bash
# Check velero logs
kubectl logs -n velero deployment/velero --tail=100
# Check node-agent on specific node
kubectl logs -n velero -l name=node-agent --tail=100
# Check backup details for errors
velero backup describe <backup-name> --details | grep -i -A5 "error\|warning\|failed"
# Common issues:
# - Node-agent not running on node with PV
kubectl get pods -n velero -l name=node-agent -o wide
# - PVC not annotated for backup
kubectl get pvc -A -o json | jq '.items[] | select(.metadata.annotations["backup.velero.io/backup-volumes"] != null)'
```
### Restore Not Restoring Volumes
```bash
# Check if backup has pod volume backups
velero backup describe <backup-name> --details | grep -A20 "Pod Volume Backups"
# Verify restore is configured to restore PVs
velero restore describe <restore-name> --details | grep -i "restorePVs"
# Force PV restore
velero restore create <name> \
--from-backup <backup> \
--restore-volumes=true \
--wait
```
### Kopia Repository Issues
```bash
# Check repository status
kubectl exec -n velero deployment/velero -- \
velero repo get
# Unlock stuck repository
kubectl exec -n velero deployment/velero -- \
velero repo unlock <repo-name>
# Maintenance (runs automatically, but can trigger manually)
kubectl exec -n velero deployment/velero -- \
velero repo maintenance run
```
---
## 9. Monitoring & Alerting
### Prometheus Metrics
Key metrics to monitor:
```promql
# Backup success rate
sum(velero_backup_success_total) / sum(velero_backup_attempt_total)
# Backup duration
velero_backup_duration_seconds{schedule="daily-all-namespaces"}
# Backup size
velero_backup_items_total{backup="<name>"}
# Failed backups in last 24h
increase(velero_backup_failure_total[24h])
```
### AlertManager Rules
```yaml
groups:
- name: velero
rules:
- alert: VeleroBackupFailed
expr: increase(velero_backup_failure_total[1h]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Velero backup failed"
- alert: VeleroBackupMissing
expr: time() - velero_backup_last_successful_timestamp{schedule="daily-all-namespaces"} > 86400
for: 1h
labels:
severity: warning
annotations:
summary: "No successful backup in 24h"
- alert: VeleroNodeAgentDown
expr: kube_daemonset_status_number_unavailable{daemonset="node-agent"} > 0
for: 15m
labels:
severity: warning
```
---
## 10. Regular Maintenance Tasks
### Weekly
```bash
# Verify recent backup integrity
velero backup describe $(velero backup get -o json | jq -r '.items | sort_by(.status.startTimestamp) | last | .metadata.name') --details
# Check backup storage usage
mc ls minio/velero-backups --summarize
```
### Monthly
```bash
# Test restore to scratch namespace
velero restore create monthly-test-$(date +%Y%m) \
--from-backup $(velero backup get -o json | jq -r '.items[0].metadata.name') \
--include-namespaces seafile \
--namespace-mappings seafile:restore-test \
--wait
# Verify restored data
kubectl exec -n restore-test deploy/seafile -- ls -la /data
# Cleanup test
kubectl delete namespace restore-test
velero restore delete monthly-test-$(date +%Y%m) --confirm
```
### Quarterly
- Full DR test: restore to separate cluster
- Review retention policies
- Audit backup coverage (new namespaces/PVCs added?)
- Update velero/plugin versions
---
## Appendix: Common Label Selectors
```bash
# Backup by app label
--selector app.kubernetes.io/name=seafile
# Backup by component
--selector app.kubernetes.io/component=database
# Exclude specific pods from backup
# (add to pod annotation)
kubectl annotate pod <pod> backup.velero.io/backup-volumes-excludes=cache,tmp
```

134
shadow/iptables/rules.v4 Normal file
View File

@@ -0,0 +1,134 @@
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*mangle
:PREROUTING ACCEPT [756:126788]
:INPUT ACCEPT [715:122089]
:FORWARD ACCEPT [40:4623]
:OUTPUT ACCEPT [420:58795]
:POSTROUTING ACCEPT [460:63418]
:LIBVIRT_PRT - [0:0]
-A POSTROUTING -j LIBVIRT_PRT
-A LIBVIRT_PRT -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*filter
:INPUT DROP [387:104781]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [42:5859]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LIBVIRT_FWI - [0:0]
:LIBVIRT_FWO - [0:0]
:LIBVIRT_FWX - [0:0]
:LIBVIRT_INP - [0:0]
:LIBVIRT_OUT - [0:0]
:f2b-sshd - [0:0]
-A INPUT -j LIBVIRT_INP
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p icmp -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 5353 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 1022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 2022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 5353 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 5353 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 51820 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 1194 -j ACCEPT
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j LIBVIRT_FWX
-A FORWARD -j LIBVIRT_FWI
-A FORWARD -j LIBVIRT_FWO
-A FORWARD -o br-8be00fb1442a -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-8be00fb1442a -j DOCKER
-A FORWARD -i br-8be00fb1442a ! -o br-8be00fb1442a -j ACCEPT
-A FORWARD -i br-8be00fb1442a -o br-8be00fb1442a -j ACCEPT
-A FORWARD -d 192.168.123.141/32 -p tcp -m tcp --dport 80 -j ACCEPT
-A OUTPUT -j LIBVIRT_OUT
-A OUTPUT -p tcp -m tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o lo -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A OUTPUT -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i br-8be00fb1442a ! -o br-8be00fb1442a -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-8be00fb1442a -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A LIBVIRT_FWI -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -o virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A LIBVIRT_FWO -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -i virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A LIBVIRT_FWO -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWX -i virbr100 -o virbr100 -j ACCEPT
-A LIBVIRT_FWX -i virbr1 -o virbr1 -j ACCEPT
-A LIBVIRT_FWX -i virbr0 -o virbr0 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 5353 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 5353 -j ACCEPT
-A LIBVIRT_INP -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 5353 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 5353 -j ACCEPT
-A LIBVIRT_OUT -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -p tcp -m tcp --dport 68 -j ACCEPT
-A f2b-sshd -j RETURN
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*nat
:PREROUTING ACCEPT [409:105569]
:INPUT ACCEPT [22:1288]
:OUTPUT ACCEPT [1:76]
:POSTROUTING ACCEPT [12:818]
:DOCKER - [0:0]
:LIBVIRT_PRT - [0:0]
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p tcp -m tcp --dport 5353 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 5353 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 51820 -j DNAT --to-destination 192.168.123.101:51820
-A PREROUTING -i eno1 -p udp -m udp --dport 1194 -j DNAT --to-destination 192.168.123.101:1194
-A PREROUTING -i eno1 -p tcp -m tcp --dport 21080 -j DNAT --to-destination 192.168.123.141:80
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -s 172.18.0.0/16 ! -o br-8be00fb1442a -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A DOCKER -i br-8be00fb1442a -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
COMMIT
# Completed on Sun Nov 17 01:37:49 2024

View File

@@ -0,0 +1,248 @@
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*mangle
:PREROUTING ACCEPT [756:126788]
:INPUT ACCEPT [715:122089]
:FORWARD ACCEPT [40:4623]
:OUTPUT ACCEPT [420:58795]
:POSTROUTING ACCEPT [460:63418]
:LIBVIRT_PRT - [0:0]
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A POSTROUTING -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr100 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
-A LIBVIRT_PRT -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*filter
:INPUT DROP [387:104781]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [42:5859]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LIBVIRT_FWI - [0:0]
:LIBVIRT_FWO - [0:0]
:LIBVIRT_FWX - [0:0]
:LIBVIRT_INP - [0:0]
:LIBVIRT_OUT - [0:0]
:f2b-sshd - [0:0]
-A INPUT -j LIBVIRT_INP
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p icmp -j ACCEPT
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A INPUT -p tcp -m multiport --dports 22 -j f2b-sshd
-A INPUT -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 1022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m tcp --dport 2022 -m state --state NEW,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i virbr100 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 51820 -j ACCEPT
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j LIBVIRT_FWX
-A FORWARD -j LIBVIRT_FWI
-A FORWARD -j LIBVIRT_FWO
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A FORWARD -i virbr100 -o virbr100 -j ACCEPT
-A FORWARD -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A FORWARD -i virbr0 -o virbr0 -j ACCEPT
-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i eno1 -p udp -m udp --dport 1194 -j ACCEPT
-A FORWARD -i eno1 -p tcp -m tcp --dport 53 -j ACCEPT
-A FORWARD -i eno1 -p udp -m udp --dport 53 -j ACCEPT
-A FORWARD -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A FORWARD -i virbr100 -o virbr100 -j ACCEPT
-A FORWARD -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A FORWARD -i virbr0 -o virbr0 -j ACCEPT
-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A FORWARD -o br-8be00fb1442a -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-8be00fb1442a -j DOCKER
-A FORWARD -i br-8be00fb1442a ! -o br-8be00fb1442a -j ACCEPT
-A FORWARD -i br-8be00fb1442a -o br-8be00fb1442a -j ACCEPT
-A FORWARD -d 192.168.123.141/32 -p tcp -m tcp --dport 80 -j ACCEPT
-A OUTPUT -j LIBVIRT_OUT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -p tcp -m tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A OUTPUT -o lo -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A OUTPUT -m conntrack --ctstate ESTABLISHED -j ACCEPT
-A OUTPUT -o virbr100 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i br-8be00fb1442a ! -o br-8be00fb1442a -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-8be00fb1442a -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A LIBVIRT_FWI -d 192.168.123.0/24 -o virbr100 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -o virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWI -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A LIBVIRT_FWI -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.123.0/24 -i virbr100 -j ACCEPT
-A LIBVIRT_FWO -i virbr100 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -i virbr1 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWO -s 192.168.122.0/24 -i virbr0 -j ACCEPT
-A LIBVIRT_FWO -i virbr0 -j REJECT --reject-with icmp-port-unreachable
-A LIBVIRT_FWX -i virbr100 -o virbr100 -j ACCEPT
-A LIBVIRT_FWX -i virbr1 -o virbr1 -j ACCEPT
-A LIBVIRT_FWX -i virbr0 -o virbr0 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr100 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr1 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p udp -m udp --dport 67 -j ACCEPT
-A LIBVIRT_INP -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr100 -p tcp -m tcp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr1 -p tcp -m tcp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p udp -m udp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p tcp -m tcp --dport 53 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT
-A LIBVIRT_OUT -o virbr0 -p tcp -m tcp --dport 68 -j ACCEPT
-A f2b-sshd -s 222.187.254.41/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 207.46.227.197/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 125.77.23.30/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.175.216/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 94.200.202.26/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 103.80.36.218/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 62.234.126.132/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 106.52.248.175/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 104.248.5.69/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 129.211.49.227/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 112.85.42.176/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.15.62/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.30.112/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.175.167/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.52.39/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 207.154.215.119/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 36.91.76.171/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 134.175.19.71/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 144.217.243.216/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 210.206.92.137/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.30.76/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 49.51.90.173/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -s 222.186.190.2/32 -j REJECT --reject-with icmp-port-unreachable
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
-A f2b-sshd -j RETURN
COMMIT
# Completed on Sun Nov 17 01:37:49 2024
# Generated by iptables-save v1.8.10 (nf_tables) on Sun Nov 17 01:37:49 2024
*nat
:PREROUTING ACCEPT [409:105569]
:INPUT ACCEPT [22:1288]
:OUTPUT ACCEPT [1:76]
:POSTROUTING ACCEPT [12:818]
:DOCKER - [0:0]
:LIBVIRT_PRT - [0:0]
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 51820 -j DNAT --to-destination 192.168.123.101:51820
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 1194 -j DNAT --to-destination 192.168.123.101:1194
-A PREROUTING -i eno1 -p tcp -m tcp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p udp -m udp --dport 53 -j DNAT --to-destination 192.168.123.101:53
-A PREROUTING -i eno1 -p tcp -m tcp --dport 21080 -j DNAT --to-destination 192.168.123.141:80
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -j LIBVIRT_PRT
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 172.18.0.0/16 ! -o br-8be00fb1442a -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A POSTROUTING -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A DOCKER -i br-8be00fb1442a -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 224.0.0.0/24 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 -d 255.255.255.255/32 -j RETURN
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A LIBVIRT_PRT -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
COMMIT
# Completed on Sun Nov 17 01:37:49 2024

View File

@@ -212,6 +212,88 @@ server {
return 404; # managed by Certbot
}
server {
server_name jellyfin.home.hrajfrisbee.cz; # managed by Certbot
# Security headers for media streaming
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
# Increase body size for high-res movie posters
client_max_body_size 20M;
location / {
# Proxy to your Synology or VM IP and Jellyfin port (default 8096)
proxy_pass https://docker-30:443;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
# Disable buffering for smoother streaming
proxy_buffering off;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/jellyfin.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/jellyfin.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = jellyfin.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name jellyfin.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
root /srv/webs/random-shit;
server_name random-shit.hrajfrisbee.cz; # managed by Certbot
# Enable directory browsing
autoindex on;
# Optional: Show file sizes in MB/GB instead of bytes
autoindex_exact_size off;
# Optional: Show file timestamps in your local server time
autoindex_localtime on;
# Optional: Choose format (html, xml, json, or jsonp)
autoindex_format html;
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/random-shit.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/random-shit.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = random-shit.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name random-shit.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
@@ -240,8 +322,8 @@ server {
ssl_certificate_key /etc/letsencrypt/live/vault.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = vault.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
@@ -250,6 +332,32 @@ server {
listen 80 ;
server_name vault.hrajfrisbee.cz;
return 404; # managed by Certbot
}
server {
server_name maru-hleda-byt.home.hrajfrisbee.cz; # managed by Certbot
location / {
proxy_pass http://docker-30:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 8443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/maru-hleda-byt.home.hrajfrisbee.cz/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/maru-hleda-byt.home.hrajfrisbee.cz/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = maru-hleda-byt.home.hrajfrisbee.cz) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 ;
server_name maru-hleda-byt.home.hrajfrisbee.cz;
return 404; # managed by Certbot
}

View File

@@ -64,7 +64,10 @@ http {
stream {
map $ssl_preread_server_name $backend {
# Passthrough to K8s
ghost.lab.home.hrajfrisbee.cz k8s_gatewayapi;
~^.+\.lab\.home\.hrajfrisbee\.cz$ k8s_ingress;
lab\.home\.hrajfrisbee\.cz$ k8s_ingress;
default local_https;
}
@@ -73,6 +76,10 @@ stream {
server docker-30:9443;
}
upstream k8s_gatewayapi {
server docker-30:9444;
}
upstream local_https {
server 127.0.0.1:8443; # Loop back to http block
}

View File

@@ -0,0 +1,54 @@
//
// Do any local configuration here
//
// Consider adding the 1918 zones here, if they are not used in your
// organization
//include "/etc/bind/zones.rfc1918";
key "acme-update-key" {
algorithm hmac-sha512;
secret "T6R1TpLGegHwFWO/I1LwtdGePRD+w00Oe4mJECW7qfheKJ/7FxlINH+Yk2vMvJCVNojj8BWoFAyEFCwGBpGROQ==";
};
zone "czechultimate.cz" {
type master;
file "/etc/bind/zones/czechultimate.cz.dns";
inline-signing yes;
auto-dnssec maintain;
key-directory "/etc/bind/keys";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};
zone "hrajfrisbee.cz" {
type master;
file "/etc/bind/zones/hrajfrisbee.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
update-policy {
// Allow ACME challenges only for lab.home subdomain
grant acme-update-key name _acme-challenge.lab.home.hrajfrisbee.cz. TXT;
// If you need wildcards under lab.home (e.g. _acme-challenge.foo.lab.home.hrajfrisbee.cz):
grant acme-update-key subdomain _acme-challenge.lab.home.hrajfrisbee.cz. TXT;
};
};
// points at zlutazimnice nameservers @nic.cz - cannot be working
zone "fraktalbar.cz" {
type master;
file "/etc/bind/zones/fraktalbar.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};
// points at zlutazimnice nameservers @nic.cz - cannot be working
zone "vegtral.cz" {
type master;
file "/etc/bind/zones/vegtral.cz.dns";
allow-transfer {87.236.197.83; 89.187.144.180; 87.236.196.85; };
also-notify {87.236.197.83; 89.187.144.180; 87.236.196.85; };
};

View File

@@ -0,0 +1,7 @@
## named tweaks
1. Generate TSIG key
```bash
tsig-keygen -a hmac-sha512 acme-update-key
```