terraform: extend kubernetes a little bit

This commit is contained in:
Jan Novak
2026-01-02 23:17:43 +01:00
parent bdf82c7e49
commit d3697c8132
14 changed files with 610 additions and 10 deletions

View File

@@ -0,0 +1,45 @@
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager

View File

@@ -0,0 +1,79 @@
```bash
# 1. Add repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# 2. Install CRDs separately (production best practice - avoids Helm CRD lifecycle issues)
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
# 3. Create values file (production baseline)
cat <<EOF > kube-prometheus-values.yaml
prometheus:
prometheusSpec:
retention: 60d
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
resources:
requests:
memory: 0.5Gi
cpu: 500m
limits:
memory: 4Gi
cpu: 2
# Critical for ServiceMonitor discovery across namespaces
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
# storageClassName: <your-storage-class>
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 3Gi
grafana:
persistence:
enabled: true
# storageClassName: <your-storage-class>
size: 10Gi
adminPassword: admin
prometheusOperator:
admissionWebhooks:
certManager:
enabled: false # Set true if using cert-manager
EOF
# 4. Install
helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
-n monitoring --create-namespace \
--set prometheusOperator.createCustomResource=false \
-f kube-prometheus-values.yaml
# 5. Verify
kubectl -n monitoring get pods
kubectl -n monitoring get prometheuses
kubectl -n monitoring get servicemonitors --all-namespaces
```

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,64 @@
# democratic-csi
```bash
helm repo add democratic-csi https://democratic-csi.github.io/charts/
helm install zfs-nvmeof democratic-csi/democratic-csi -f values.yaml
cat <<'EOF' > values.yaml
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4
EOF
cat <<'EOF' > pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: freenas-iscsi # your SC name
resources:
requests:
storage: 10Gi
EOF
```

View File

@@ -0,0 +1,40 @@
controller:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
node:
driver:
image:
registry: docker.io/democraticcsi/democratic-csi
tag: next
csiDriver:
name: "org.democratic-csi.iscsi"
driver:
config:
driver: freenas-api-iscsi
httpConnection:
host: 192.168.0.40
apiKey: 1-0uvRlu1pca3Ed5HAAsEbs7nkx7Rxr6SpsxTd1431x9yhj68hD6qkXl7ovmGTxDTh
iscsi:
targetPortal: "192.168.0.40:3260"
namePrefix: "csi-"
nameSuffix: "-k8s"
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: "None"
zfs:
datasetParentName: "pool-6g/tank/k8s/vols"
detachedSnapshotsDatasetParentName: "pool-6g/tank/k8s/snaps"
storageClasses:
- name: freenas-iscsi
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: ext4