Compare commits

..

23 Commits

Author SHA1 Message Date
baschno
71348ad7f5 kubeprom 2026-02-02 23:51:48 +01:00
baschno
40eae4f567 add pg stuff 2026-02-01 22:09:00 +01:00
baschno
e7f648cf57 ext-secrets initial 2026-01-25 20:23:01 +01:00
baschno
dce92aeb28 authentik initial 2026-01-25 20:22:36 +01:00
baschno
07e4ae31e3 kube-prom-stack 2026-01-25 20:22:18 +01:00
baschno
5e86aafa09 update vault readme 2026-01-25 20:21:19 +01:00
baschno
4444296443 postgres 2026-01-25 20:20:50 +01:00
baschno
9aafb940e9 adding extsecrets + postgres to just 2026-01-12 21:27:22 +01:00
baschno
4075203b1e initial add of enabling k8s with vault 2026-01-11 20:27:54 +01:00
baschno
92decafc3f adding vault client 2026-01-11 20:27:28 +01:00
baschno
09e1bbbc52 longhorn savegame 2026-01-11 10:21:14 +01:00
baschno
48d930fedc longhorn savegame 2026-01-03 20:35:36 +01:00
baschno
1f82ce8d02 longhorn savegame 2026-01-03 20:35:10 +01:00
baschno
a551f2e4ca Longhorn: use values yaml for helm to reduce replicas 2025-12-30 20:10:56 +01:00
baschno
a80dce42b0 add support for Longhorn setup 2025-12-30 20:03:23 +01:00
baschno
63243c6d2e fix formatting 2025-12-29 23:57:28 +01:00
baschno
1f9f7e275c add justfile for test deployment 2025-12-29 18:41:02 +01:00
baschno
09026d6812 move test deployment to different justfile 2025-12-29 18:33:46 +01:00
baschno
24991fce90 add setup-cluster-issuer 2025-12-28 17:04:24 +01:00
baschno
65a59d2d0c WIP: cert manager 2025-12-28 16:19:08 +01:00
baschno
85fb620e39 add module traefik 2025-12-28 11:19:30 +01:00
baschno
b56e02d2ed fix formatting 2025-12-28 11:19:12 +01:00
baschno
15cb2ce903 adding test deployment 2025-12-28 11:18:46 +01:00
49 changed files with 1732 additions and 62 deletions

View File

@@ -0,0 +1,22 @@
set fallback := true
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "prometheus")
[private]
default:
@just --list --unsorted --list-submodules
add-helm-repo:
@helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
@helm repo update
install:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
--wait \
-f kube-stack-config-values.yaml
uninstall:
helm uninstall kube-prometheus-stack

View File

@@ -0,0 +1,78 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# -- here you can pass the whole values directly to the kube-prometheus-stack chart
enabled: true
kubeControllerManager:
enabled: false
nodeExporter:
enabled: false
defaultRules:
create: true
rules:
alertmanager: false
etcd: false
configReloaders: false
general: false
k8s: true
kubeApiserver: false
kubeApiserverAvailability: false
kubeApiserverSlos: false
kubelet: true
kubeProxy: false
kubePrometheusGeneral: false
kubePrometheusNodeRecording: false
kubernetesApps: false
kubernetesResources: false
kubernetesStorage: false
kubernetesSystem: false
kubeScheduler: false
kubeStateMetrics: false
network: false
node: true
nodeExporterAlerting: false
nodeExporterRecording: true
prometheus: false
prometheusOperator: false
#nodeSelector:
#workload: monitor
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
probeSelectorNilUsesHelmValues: false
#nodeSelector:
#workload: monitor
grafana:
enabled: true
# -- the grafana admin password
adminPassword: prom-operator
defaultDashboardsEnabled: false
sidecar:
dashboards:
enabled: true
#nodeSelector:
#workload: monitor
alertmanager:
enabled: true
#alertManagerSpec:
#nodeSelector:
#workload: monitor

View File

@@ -129,3 +129,137 @@ Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```
----
Wenn du Kubernetes mit Vault konfiguriert hast, ermöglichst du eine **sichere Integration zwischen deinem Kubernetes-Cluster und HashiCorp Vault**. Hier sind die wichtigsten Szenarien und Vorteile:
## Hauptfunktionen
### 1. **Automatische Pod-Authentifizierung**
Pods können sich automatisch bei Vault authentifizieren, ohne dass du Credentials manuell verteilen musst. Vault nutzt Kubernetes Service Accounts zur Identitätsverifizierung.
### 2. **Dynamische Secrets für Anwendungen**
Anwendungen können zur Laufzeit Secrets von Vault abrufen, statt sie in ConfigMaps oder Kubernetes Secrets zu speichern.
## Praktische Szenarien
### **Szenario 1: Vault Agent Sidecar Injection**
Vault injiziert automatisch einen Sidecar-Container, der Secrets abruft und für deine App bereitstellt:
```yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "myapp"
vault.hashicorp.com/agent-inject-secret-database: "database/creds/myapp-role"
spec:
serviceAccountName: myapp
containers:
- name: app
image: myapp:latest
```
**Ergebnis:** Datenbank-Credentials werden automatisch in `/vault/secrets/database` bereitgestellt.
### **Szenario 2: Dynamische Datenbank-Credentials**
Statt statische DB-Passwörter zu verwenden, generiert Vault temporäre Credentials:
- Jeder Pod bekommt eigene DB-Credentials
- Credentials sind zeitlich begrenzt (z.B. 24h)
- Automatische Rotation
- Einfaches Widerrufen bei Kompromittierung
### **Szenario 3: Externe Secrets Operator (ESO)**
Secrets werden als native Kubernetes Secrets synchronisiert:
```yaml
apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
name: vault-backend
spec:
provider:
vault:
server: "https://vault.test.k8s.schnrbs.work"
path: "secret"
auth:
kubernetes:
mountPath: "kubernetes"
role: "myapp"
```
### **Szenario 4: Verschlüsselung als Service**
Anwendungen können Vault's Transit Engine nutzen:
```bash
# Daten verschlüsseln ohne den Key zu kennen
vault write transit/encrypt/my-key plaintext=$(base64 <<< "sensitive data")
# Daten entschlüsseln
vault write transit/decrypt/my-key ciphertext="vault:v1:abc..."
```
### **Szenario 5: PKI/Zertifikats-Management**
Automatische Ausstellung von TLS-Zertifikaten für Service-to-Service-Kommunikation:
- Kurzlebige Zertifikate (z.B. 1h)
- Automatische Rotation
- Zero-Trust-Netzwerk
### **Szenario 6: Multi-Tenancy**
Verschiedene Namespaces/Teams haben isolierten Zugriff:
```bash
# Team A darf nur auf secret/team-a/* zugreifen
# Team B darf nur auf secret/team-b/* zugreifen
```
## Vorteile gegenüber Kubernetes Secrets
| Aspekt | Kubernetes Secrets | Vault Integration |
|--------|-------------------|-------------------|
| Verschlüsselung at rest | Optional, etcd-Ebene | Immer, zusätzlich verschlüsselt |
| Secret Rotation | Manuell | Automatisch/dynamisch |
| Audit Log | Begrenzt | Detailliert für jeden Zugriff |
| Dynamische Secrets | Nein | Ja (DB, Cloud, etc.) |
| Granulare Policies | Begrenzt | Sehr feinkörnig |
| Encryption-as-a-Service | Nein | Ja |
## Typischer Workflow nach der Konfiguration
1. **Policy erstellen:** Definiere, wer auf welche Secrets zugreifen darf
2. **Role erstellen:** Verknüpfe Kubernetes Service Accounts mit Vault Policies
3. **Secrets bereitstellen:** Nutze Vault Agent Injection oder CSI Driver
4. **Anwendung deployen:** Pods authentifizieren sich automatisch
## Best Practice Setup
Nach der Kubernetes Auth-Aktivierung solltest du:
```bash
# 1. Policy erstellen
vault policy write myapp - <<EOF
path "secret/data/myapp/*" {
capabilities = ["read"]
}
EOF
# 2. Role erstellen
vault write auth/kubernetes/role/myapp \
bound_service_account_names=myapp \
bound_service_account_namespaces=production \
policies=myapp \
ttl=1h
# 3. Service Account in K8s erstellen
kubectl create serviceaccount myapp -n production
```
Möchtest du ein spezifisches Szenario genauer erkunden oder brauchst du Hilfe bei der Konfiguration eines bestimmten Use Cases?

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: vault-auth-token
annotations:
kubernetes.io/service-account.name: vault-auth
type: kubernetes.io/service-account-token

126
08_Vault/justfile Normal file
View File

@@ -0,0 +1,126 @@
set fallback := true
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export VAULT_CHART_VERSION := env("VAULT_CHART_VERSION", "0.31.0")
export VAULT_HOST := env("VAULT_HOST", "")
export VAULT_ADDR := "https://" + VAULT_HOST
export VAULT_DEBUG := env("VAULT_DEBUG", "false")
SECRET_PATH := "secret"
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
# Remove Helm repository
remove-helm-repo:
helm repo remove hashicorp
# Create Vault namespace
create-namespace:
@kubectl get namespace ${K8S_VAULT_NAMESPACE} > /dev/null || kubectl create namespace ${K8S_VAULT_NAMESPACE}
# Delete Vault namespace
delete-namespace:
@kubectl delete namespace ${K8S_VAULT_NAMESPACE} --ignore-not-found
install:
#!/bin/bash
set -eu
just create-namespace
just add-helm-repo
gomplate -f vault-values.gomplate.yaml -o vault-values.yaml
helm upgrade \
--cleanup-on-fail \
--install \
vault \
hashicorp/vault \
--namespace ${K8S_VAULT_NAMESPACE} \
--wait \
-f vault-values.yaml
kubectl wait pod --for=condition=PodReadyToStartContainers \
-n ${K8S_VAULT_NAMESPACE} vault-0 --timeout=5m
# Wait for Vault service to be ready to accept connections
echo "Waiting for Vault service to be ready..."
for i in {1..30}; do
if kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault status 2>&1 | grep -qE "(Initialized|Sealed)"; then
echo "✓ Vault service is ready"
break
fi
if [ $i -eq 30 ]; then
echo "Error: Timeout waiting for Vault service to be ready"
exit 1
fi
sleep 3
done
init_output=$(kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault operator init -key-shares=1 -key-threshold=1 -format=json || true)
root_token=""
if echo "${init_output}" | grep -q "Vault is already initialized"; then
echo "Vault is already initialized"
while [ -z "${root_token}" ]; do
root_token=$(gum input --prompt="Vault root token: " --password --width=100)
done
else
unseal_key=$(echo "${init_output}" | jq -r '.unseal_keys_b64[0]')
root_token=$(echo "${init_output}" | jq -r '.root_token')
kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault operator unseal "${unseal_key}"
echo "Vault initialized and unsealed successfully"
echo "Root Token: ${root_token}"
echo "Unseal Key: ${unseal_key}"
echo "Please save these credentials securely!"
fi
# Wait for all vault instances to pass readiness checks and be ready to serve requests
kubectl wait pod --for=condition=ready -n ${K8S_VAULT_NAMESPACE} \
-l app.kubernetes.io/name=vault --timeout=5m
just setup-kubernetes-auth "${root_token}"
# Uninstall Vault
uninstall delete-ns='false':
#!/bin/bash
set -euo pipefail
helm uninstall vault -n ${K8S_VAULT_NAMESPACE} --ignore-not-found --wait
just delete-namespace
# Setup Kubernetes authentication
setup-kubernetes-auth root_token='':
#!/bin/bash
set -euo pipefail
export VAULT_TOKEN="{{ root_token }}"
while [ -z "${VAULT_TOKEN}" ]; do
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
done
gomplate -f ./serviceaccount.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
gomplate -f ./rolebinding.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f ./auth-token-secret.yaml
SA_SECRET="vault-auth-token"
SA_JWT=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
SA_CA=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.ca\.crt}' | base64 --decode)
vault auth list -format=json | jq -e '.["kubernetes/"]' >/dev/null 2>&1 || \
vault auth enable kubernetes
vault write auth/kubernetes/config \
token_reviewer_jwt="${SA_JWT}" \
kubernetes_host="https://kubernetes.default.svc" \
kubernetes_ca_cert="${SA_CA}"

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: vault-auth-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: vault-auth
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-auth
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}

View File

@@ -0,0 +1,16 @@
server:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ingressClassName: traefik
hosts:
- host: {{ .Env.VAULT_HOST }}
paths:
- /
tls:
- hosts:
- {{ .Env.VAULT_HOST }}
dataStorage:
storageClass: longhorn

View File

@@ -0,0 +1,16 @@
server:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ingressClassName: traefik
hosts:
- host: vault.test.k8s.schnrbs.work
paths:
- /
tls:
- hosts:
- vault.test.k8s.schnrbs.work
dataStorage:
storageClass: longhorn

View File

@@ -0,0 +1,51 @@
# External Secrets Operator resource configuration
# Based on Goldilocks recommendations (Burstable QoS)
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
# Main controller
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
certController:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
webhook:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 128Mi
limits:
cpu: 50m
memory: 256Mi

View File

@@ -0,0 +1,65 @@
set fallback := true
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
export EXTERNAL_SECRETS_CHART_VERSION := env("EXTERNAL_SECRETS_CHART_VERSION", "1.1.0")
export EXTERNAL_SECRETS_REFRESH_INTERVAL := env("EXTERNAL_SECRETS_REFRESH_INTERVAL", "1800")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export VAULT_HOST := env("VAULT_HOST", "")
export VAULT_ADDR := "https://" + VAULT_HOST
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add external-secrets https://charts.external-secrets.io
helm repo update
# Remove Helm repository
remove-helm-repo:
helm repo remove external-secrets
# Install External Secrets
install:
just add-helm-repo
helm upgrade --cleanup-on-fail \
--install external-secrets external-secrets/external-secrets \
--version ${EXTERNAL_SECRETS_CHART_VERSION} -n ${EXTERNAL_SECRETS_NAMESPACE} \
--create-namespace --wait \
-f external-secrets-values.yaml
kubectl label namespace ${EXTERNAL_SECRETS_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
just create-external-secrets-role
just create-vault-secret-store
# Uninstall External Secrets
uninstall:
just delete-vault-secret-store
helm uninstall external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} --wait
kubectl delete namespace ${EXTERNAL_SECRETS_NAMESPACE} --ignore-not-found
# Create Vault Secret Store for External Secrets
create-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl apply -f -
# Delete Vault Secret Store for External Secrets
delete-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl delete --ignore-not-found -f -
# Create Vault role for External Secrets
create-external-secrets-role root_token='':
#!/bin/bash
set -euo pipefail
export VAULT_TOKEN="{{ root_token }}"
while [ -z "${VAULT_TOKEN}" ]; do
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
done
vault write auth/kubernetes/role/external-secrets \
bound_service_account_names=external-secrets \
bound_service_account_namespaces=${EXTERNAL_SECRETS_NAMESPACE} \
audience=vault \
policies=admin \
ttl=1h

View File

@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ClusterSecretStore
metadata:
name: vault-secret-store
spec:
provider:
vault:
server: http://vault.{{ .Env.K8S_VAULT_NAMESPACE }}:8200
path: secret
version: v2
auth:
kubernetes:
role: external-secrets
mountPath: kubernetes
serviceAccountRef:
name: external-secrets
namespace: {{ .Env.EXTERNAL_SECRETS_NAMESPACE }}
# Audience must match the audience configured in Vault Kubernetes auth role
# Required for Vault 1.21+ compatibility
audiences:
- vault
refreshInterval: {{ .Env.EXTERNAL_SECRETS_REFRESH_INTERVAL }}

View File

@@ -0,0 +1,27 @@
# Pod Security Context for restricted Pod Security Standards
#podSecurityContext:
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
# fsGroup: 10001
#
## Container Security Context for restricted Pod Security Standards
#containerSecurityContext:
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# runAsUser: 10001
# runAsGroup: 10001
# seccompProfile:
# type: RuntimeDefault
# capabilities:
# drop:
# - ALL
#
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 100m
memory: 256Mi

647
10_Postgres/justfile Normal file
View File

@@ -0,0 +1,647 @@
set fallback := true
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add cnpg https://cloudnative-pg.github.io/charts
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove cnpg
# Install CloudNativePG and create a cluster
install:
@just install-cnpg
@just create-cluster
# Uninstall CloudNativePG and delete the cluster
uninstall:
@just delete-cluster
@just uninstall-cnpg
# Install CloudNativePG
install-cnpg:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
--version ${CNPG_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --create-namespace --wait \
-f cnpg-values.yaml
@kubectl label namespace ${CNPG_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
# Uninstall CloudNativePG
uninstall-cnpg:
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
# Create Postgres cluster
create-cluster:
#!/bin/bash
set -euo pipefail
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
password=$(just utils::random-password)
just vault::put-root postgres/admin username=postgres password="${password}"
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
-n ${CNPG_NAMESPACE} --timeout=60s
else
echo "External Secrets Operator not found. Creating superuser secret directly..."
password=$(just utils::random-password)
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
--from-literal=username=postgres \
--from-literal=password="${password}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put-root postgres/admin username=postgres password="${password}"
fi
fi
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
helm upgrade --install postgres-cluster cnpg/cluster \
--version ${CNPG_CLUSTER_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
echo "Waiting for PostgreSQL cluster to be ready..."
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
-n ${CNPG_NAMESPACE} --timeout=300s
# Delete Postgres cluster
delete-cluster:
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
# Print Postgres username
admin-username:
@echo "postgres"
# Print Postgres password
admin-password:
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
-o jsonpath="{.data.password}" | base64 --decode
@echo
# Create Postgres database
create-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} already exists" >&2
exit
fi
echo "Creating database ${DB_NAME}..."
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} created."
# Delete Postgres database
delete-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
if ! just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} does not exist." >&2
exit
fi
# Terminate all connections to the database
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
# Force disconnect if needed
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}';\""
just psql -c "\"DROP DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} deleted."
# Check if database exists
[no-exit-message]
db-exists db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
echo "Database ${DB_NAME} exists."
else
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
# Create Postgres user
create-user username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} already exists" >&2
exit
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
echo "User ${USERNAME} created."
# Delete Postgres user
delete-user username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit
fi
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
just psql -c "\"DROP USER ${USERNAME};\""
echo "User ${USERNAME} deleted."
# Check if user exists
[no-exit-message]
user-exists username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
echo "User ${USERNAME} exists."
else
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
# Change user password
change-password username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just utils::random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
echo "Password changed for user ${USERNAME}."
# Grant all privileges on database to user
grant db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
echo "Privileges granted."
# Revoke all privileges on database from user
revoke db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
echo "Privileges revoked."
# Create Postgres database and user
create-user-and-db username='' db_name='' password='':
@just create-db "{{ db_name }}"
@just create-user "{{ username }}" "{{ password }}"
@just grant "{{ db_name }}" "{{ username }}"
# Delete Postgres database and user
delete-user-and-db username='' db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
if just db-exists ${DB_NAME} &>/dev/null; then
if just user-exists ${USERNAME} &>/dev/null; then
just revoke "${DB_NAME}" "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping revoke."
fi
just delete-db "${DB_NAME}"
else
echo "Database ${DB_NAME} does not exist, skipping database deletion."
fi
if just user-exists ${USERNAME} &>/dev/null; then
just delete-user "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping user deletion."
fi
echo "Cleanup completed."
# Create logical replication slot for CDC
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
PLUGIN=${PLUGIN:-"{{ plugin }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., airbyte_slot")
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' already exists."
exit 0
fi
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
echo "Replication slot '${SLOT_NAME}' created."
# Delete replication slot
delete-replication-slot slot_name='' db_name='postgres':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' does not exist."
exit 1
fi
echo "Deleting replication slot '${SLOT_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
echo "Replication slot '${SLOT_NAME}' deleted."
# List all replication slots
list-replication-slots:
@echo "Replication slots:"
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -c \
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
# Create publication for CDC
create-publication pub_name='' db_name='' tables='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
TABLES="${TABLES:-{{ tables }}}"
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., airbyte_publication")
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
exit 0
fi
if [ -z "${TABLES}" ]; then
echo "Select tables to include in publication:"
echo "1) All tables (ALL TABLES)"
echo "2) All user tables (exclude system/internal tables)"
echo "3) Specific tables (comma-separated list)"
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
case "${CHOICE}" in
"All tables")
TABLES="ALL TABLES"
;;
"User tables only")
# Get list of user tables (excluding _airbyte* and other system tables)
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT string_agg(tablename, ', ') FROM pg_tables
WHERE schemaname = 'public'
AND tablename NOT LIKE '\_%'
AND tablename NOT LIKE 'pg_%';")
if [ -z "${USER_TABLES}" ]; then
echo "No user tables found in database '${DB_NAME}'"
exit 1
fi
TABLES="TABLE ${USER_TABLES}"
echo "Including tables: ${USER_TABLES}"
;;
"Specific tables")
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
--placeholder="e.g., users, products, orders")
TABLES="TABLE ${TABLES}"
;;
esac
elif [ "${TABLES}" = "ALL" ]; then
TABLES="ALL TABLES"
fi
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
if [ "${TABLES}" != "ALL TABLES" ]; then
echo "Setting REPLICA IDENTITY for included tables..."
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
for table in "${TABLE_ARRAY[@]}"; do
table=$(echo "$table" | xargs) # trim whitespace
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
done
fi
echo "Publication '${PUB_NAME}' created."
# Delete publication
delete-publication pub_name='' db_name='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
exit 1
fi
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"DROP PUBLICATION ${PUB_NAME};"
echo "Publication '${PUB_NAME}' deleted."
# List all publications in a database
list-publications db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Publications in database '${DB_NAME}':"
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
# Grant CDC privileges to user
grant-cdc-privileges username='' db_name='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
echo "Granting schema and table privileges..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
GRANT CREATE ON SCHEMA public TO ${USERNAME};
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
echo "Granting pg_read_all_data role..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
echo "CDC privileges granted to user '${USERNAME}'"
# Setup CDC (Change Data Capture)
setup-cdc db_name='' slot_name='' pub_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Setting up CDC on database '${DB_NAME}'..."
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc "SHOW wal_level;")
if [ "${WAL_LEVEL}" != "logical" ]; then
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
echo "Please ensure PostgreSQL is configured with wal_level=logical"
exit 1
fi
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
just create-publication "${PUB_NAME}" "${DB_NAME}"
if [ -n "${USERNAME}" ]; then
echo ""
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
fi
echo ""
echo "CDC setup completed for database '${DB_NAME}'"
echo " Replication Method: Logical Replication (CDC)"
echo " Replication Slot: ${SLOT_NAME}"
echo " Publication: ${PUB_NAME}"
if [ -n "${USERNAME}" ]; then
echo " User with CDC privileges: ${USERNAME}"
fi
# Cleanup CDC (removes slot and publication)
cleanup-cdc db_name='' slot_name='' pub_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
# Check if slot is active
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc \
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
if [ "${SLOT_ACTIVE}" = "t" ]; then
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
echo "Please stop any active replication connections first."
if ! gum confirm "Proceed with deletion anyway?"; then
echo "Cleanup cancelled"
exit 1
fi
fi
# Delete in correct order: Slot first, then Publication
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
echo "Step 2: Deleting publication '${PUB_NAME}'..."
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
echo "Publication '${PUB_NAME}' not found or already deleted"
echo "CDC cleanup completed for database '${DB_NAME}'"
# Run psql
[no-exit-message]
psql *args='':
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
# Dump Postgres database by pg_dump
[no-cd]
dump db_name file exclude_tables='':
#!/bin/bash
set -euo pipefail
DUMP_OPTIONS="-Fc"
if [ -n "{{ exclude_tables }}" ]; then
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
for table in "${TABLES[@]}"; do
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
done
fi
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
/var/lib/postgresql/data/db.dump"
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
# Restore Postgres database by pg_restore
[no-cd]
restore db_name file:
just postgres::create-db {{ db_name }}
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_restore --clean --if-exists \
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
/var/lib/postgresql/data/db.dump"
# Enable Prometheus monitoring
enable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
# Label namespace to enable monitoring
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
# Enable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
echo "Waiting for PodMonitor to be created..."
sleep 3
# Add release label to PodMonitor
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
echo "✓ PostgreSQL monitoring enabled"
# Disable Prometheus monitoring
disable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
# Disable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
# Remove namespace label
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
echo "✓ PostgreSQL monitoring disabled"

View File

@@ -0,0 +1,9 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: foo-pv
spec:
storageClassName: "longhorn"
claimRef:
name: foo-pvc
namespace: foo

View File

View File

@@ -9,7 +9,7 @@ spec:
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain # Optionally, 'Delete' oder 'Recycle'
persistentVolumeReclaimPolicy: Delete # Optionally, 'Delete' oder 'Recycle'
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
csi:
driver: driver.longhorn.io # Der Longhorn CSI-Treiber

View File

@@ -1,16 +1,42 @@
apiVersion: v1
kind: Namespace
metadata:
name: foo
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-nginx-pvc
namespace: foo
spec:
storageClassName: longhorn # Die gleiche StorageClass wie im PV
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # Die angeforderte Größe sollte mit der des PV übereinstimmen
# volumeName: longhorn-test-pv # Der Name des PV, das für diesen PVC verwendet werden soll
---
apiVersion: v1
kind: Pod
metadata:
name: longhorn-demo
namespace: default
namespace: foo
spec:
containers:
- name: demo-container
image: nginx:latest
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- mountPath: /usr/share/nginx/html
name: longhorn-volume
volumes:
- name: longhorn-volume
persistentVolumeClaim:
claimName: longhorn-test-pvc
claimName: longhorn-nginx-pvc

1
12_Authentik/README.md Normal file
View File

@@ -0,0 +1 @@
https://docs.goauthentik.io/install-config/install/kubernetes/#install-authentik-helm-chart

View File

@@ -0,0 +1,10 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-pgdb
namespace: authentik
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -0,0 +1,21 @@
authentik:
secret_key: "PleaseGenerateASecureKey"
# This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.io, and is fully opt-in
error_reporting:
enabled: true
postgresql:
password: "ThisIsNotASecurePassword"
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
postgresql:
enabled: true
auth:
password: "ThisIsNotASecurePassword"

28
12_Authentik/justfile Normal file
View File

@@ -0,0 +1,28 @@
set fallback := true
export AUTHENTIK_NAMESPACE := env("AUTHENTIK_NAMESPACE", "authentik")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add authentik https://charts.goauthentik.io
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove authentik
install:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install authentik authentik/authentik \
-n ${AUTHENTIK_NAMESPACE} --create-namespace --wait \
-f authentik-values.yaml
uninstall:
@helm uninstall authentik -n ${AUTHENTIK_NAMESPACE} --wait
@kubectl delete namespace ${AUTHENTIK_NAMESPACE} --ignore-not-found

View File

@@ -167,7 +167,8 @@ Mit diesen Schritten hast du ein Persistent Volume (PV) und einen Persistent Vol
## Disable Localpath as default
```
kubectl get storageclass
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
```

1
Longhorn/auth Normal file
View File

@@ -0,0 +1 @@
basti:$apr1$N23gJpBe$CYlDcwTfp8YsQMq0UcADQ0

67
Longhorn/justfile Normal file
View File

@@ -0,0 +1,67 @@
set fallback:=true
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
export LONGHORN_VERSION := env("LONGHORN_VERSION","1.10.1")
add-helm-repo:
helm repo add longhorn https://charts.longhorn.io --force-update
helm repo update
# Delete namespace
delete-namespace:
#!/bin/bash
set -euo pipefail
if kubectl get namespace ${LONGHORN_NAMESPACE} &>/dev/null; then
kubectl delete namespace ${LONGHORN_NAMESPACE} --ignore-not-found
else
echo "Namespace ${LONGHORN_NAMESPACE} does not exist."
fi
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repo
helm upgrade longhorn longhorn/longhorn \
--install \
--cleanup-on-fail \
--namespace ${LONGHORN_NAMESPACE} \
--create-namespace \
--version ${LONGHORN_VERSION} \
--values longhorn-values.yaml
# remove default storage class annotation from local-path storage class
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
uninstall:
#!/bin/bash
set -euo pipefail
for crd in $(kubectl get crd -o name | grep longhorn); do
kubectl patch $crd -p '{"metadata":{"finalizers":[]}}' --type=merge
done
kubectl -n ${LONGHORN_NAMESPACE} patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag || true
helm uninstall longhorn --namespace ${LONGHORN_NAMESPACE} || true
just delete-namespace
install-dashboard-ingress:
#!/bin/bash
set -euo pipefail
just env::check
echo "Deploying Longhorn Dashboard Ingress with EXTERNAL_DOMAIN=${EXTERNAL_DOMAIN}"
gomplate -f longhorn-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f longhorn-ingressroute-gomplate.yaml | kubectl apply -f -
uninstall-dashboard-ingress:
#!/bin/bash
set -euo pipefail
kubectl delete -f longhorn-ingressroute-gomplate.yaml || true
kubectl delete -f longhorn-certificate-gomplate.yaml || true

View File

@@ -7,7 +7,7 @@ metadata:
spec:
secretName: longhorn-web-ui-tls
dnsNames:
- longhorn-dashboard.k8s.schnrbs.work
- longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer

View File

@@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
- match: Host(`longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: longhorn-frontend

View File

@@ -1,18 +1,6 @@
global:
nodeSelector:
node.longhorn.io/create-default-disk: "true"
service:
ui:
type: NodePort
nodePort: 30050
manager:
type: ClusterIP
# Replica count for the default Longhorn StorageClass.
persistence:
defaultClass: false
defaultFsType: ext4
defaultClassReplicaCount: 2
reclaimPolicy: Delete
@@ -25,12 +13,10 @@ csi:
# Default replica count and storage path
defaultSettings:
upgradeChecker: false
kubernetesClusterAutoscalerEnabled: false
allowCollectingLonghornUsageMetrics: false
createDefaultDiskLabeledNodes: true
defaultReplicaCount: 2
defaultDataPath: "/k8s-data"
# defaultDataPath: "/k8s-data"
longhornUI:
replicas: 1

View File

@@ -0,0 +1,40 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-volv-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
name: volume-test
namespace: default
spec:
restartPolicy: Always
containers:
- name: volume-test
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- ls
- /data/lost+found
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: volv
mountPath: /data
ports:
- containerPort: 80
volumes:
- name: volv
persistentVolumeClaim:
claimName: longhorn-volv-pvc

View File

@@ -45,3 +45,22 @@ uninstall:
just env::check
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
test-deployment:
#!/bin/bash
set -euo pipefail
just env::check
kubectl apply -f test-deployment.yaml
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
echo "To clean up, run 'just test-deployment-cleanup'."
test-deployment-cleanup:
#!/bin/bash
set -euo pipefail
just env::check
kubectl delete -f test-deployment.yaml
echo "Test deployment and service deleted."

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- schnipo.k8s.schnrbs.work
- schnipo.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: dishes
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dish-schnipo
namespace: dishes
labels:
app: dishes
spec:
replicas: 3
selector:
matchLabels:
app: dishes
template:
metadata:
labels:
app: dishes
spec:
containers:
- name: dish-schnipo
image: bschnorbus/dish-schnipo
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: dish-schnipo
namespace: dishes
spec:
type: ClusterIP
selector:
app: dishes
ports:
- port: 80
targetPort: 8080
protocol: TCP

View File

@@ -7,10 +7,12 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`schnipo.k8s.schnrbs.work`)
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: schnipo
port: 8080
port: 80
targetPort: 8080
tls:
secretName: schnipo-certificate-secret

37
Test-Deployment/justfile Normal file
View File

@@ -0,0 +1,37 @@
set fallback:=true
export EXTERNAL := env("EXTERNAL_DOMAIN", "")
install-nginx:
#!/bin/bash
set -euo pipefail
just env::check
if [ -z "${EXTERNAL}" ]; then
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
exit 1
fi
kubectl apply -f nginx-deployment.yaml
gomplate -f nginx-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f nginx-ingress-route-gomplate.yaml | kubectl apply -f -
install-dishes:
#!/bin/bash
set -euo pipefail
just env::check
if [ -z "${EXTERNAL}" ]; then
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
exit 1
fi
kubectl apply -f dishes-deployment.yaml
gomplate -f dishes-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f dishes-ingress-route-gomplate.yaml | kubectl apply -f -
remove-nginx:
kubectl delete ns test || true
remove-dishes:
kubectl delete ns dishes || true

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- nginx-test.k8s.schnrbs.work
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
spec:
type: LoadBalancer
selector:
app: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-test.k8s.schnrbs.work`)
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: nginx

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: nginx-test.k8s.schnrbs.work
- host: nginx-test.int.schnrbs.work
http:
paths:
- path: /
@@ -19,5 +19,5 @@ spec:
number: 80
tls:
- hosts:
- nginx-test.k8s.schnrbs.work
- nginx-test.int.schnrbs.work
secretName: nginx-certificate-secret

View File

@@ -4,7 +4,7 @@ metadata:
name: cloudflare-cluster-issuer
spec:
acme:
email: hello@schnorbus.net
email: {{ .Env.ACME_EMAIL }}
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: cloudflare-acme-key

View File

@@ -5,4 +5,4 @@ metadata:
namespace: cert-manager
type: Opaque
stringData:
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}

62
Traefik/justfile Normal file
View File

@@ -0,0 +1,62 @@
set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repos
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
--install \
--cleanup-on-fail \
--namespace ${CERT_MANAGER_NAMESPACE} \
--create-namespace \
--values cert-manager-values.yaml
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
setup-cluster-issuer:
#!/bin/bash
set -euo pipefail
just env::check
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
# Get status of cert-manager components
status:
#!/bin/bash
set -euo pipefail
echo "=== cert-manager Components Status ==="
echo ""
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
echo ""
echo "Pods:"
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "Services:"
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io

View File

@@ -11,5 +11,5 @@ ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)

View File

@@ -4,3 +4,8 @@ K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
VAULT_HOST={{ .Env.VAULT_HOST }}
AUTHENTIK_HOST={{ .Env.AUTHENTIK_HOST }}

49
env/justfile vendored
View File

@@ -90,6 +90,55 @@ setup:
fi
done
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
if ! CLOUDFLARE_API_TOKEN=$(
gum input --prompt="Cloudflare API Token: " \
--width=100 --placeholder="API Token" --password
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${ACME_EMAIL}" ]; do
if ! ACME_EMAIL=$(
gum input --prompt="ACME Email for Cert-Manager: " \
--width=100 --placeholder="Email"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${EXTERNAL_DOMAIN}" ]; do
if ! EXTERNAL_DOMAIN=$(
gum input --prompt="External Domain: " \
--width=100 --placeholder="Domain"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${VAULT_HOST}" ]; do
if ! VAULT_HOST=$(
gum input --prompt="Vault hostname: " \
--width=100 --placeholder="vault"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AUTHENTIK_HOST}" ]; do
if ! AUTHENTIK_HOST=$(
gum input --prompt="Authentik hostname: " \
--width=100 --placeholder="authentik"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

View File

@@ -9,3 +9,9 @@ default:
mod env
mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik
mod Longhorn
mod Vault '08_Vault'
mod ExternalSecrets '09_ExternalSecrets'
mod Postgres '10_Postgres'
mod KubePrometheusStack '07_KubePrometheusStack'

View File

@@ -1,25 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-web-ui
namespace: longhorn-system
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: longhorn.k8s.internal.schnrbs.work
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- longhorn.k8s.internal.schnrbs.work
secretName: longhorn-web-ui-tls

View File

@@ -5,3 +5,4 @@ helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"
vault = "1.20.2"