Compare commits

..

6 Commits

Author SHA1 Message Date
baschno
e7f648cf57 ext-secrets initial 2026-01-25 20:23:01 +01:00
baschno
dce92aeb28 authentik initial 2026-01-25 20:22:36 +01:00
baschno
07e4ae31e3 kube-prom-stack 2026-01-25 20:22:18 +01:00
baschno
5e86aafa09 update vault readme 2026-01-25 20:21:19 +01:00
baschno
4444296443 postgres 2026-01-25 20:20:50 +01:00
baschno
9aafb940e9 adding extsecrets + postgres to just 2026-01-12 21:27:22 +01:00
14 changed files with 1116 additions and 2 deletions

View File

@@ -0,0 +1,19 @@
set fallback := true
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "prometheus")
[private]
default:
@just --list --unsorted --list-submodules
add-helm-repo:
@helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
@helm repo update
install:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
--wait \
-f kube-stack-config-values.yaml

View File

@@ -0,0 +1,78 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# -- here you can pass the whole values directly to the kube-prometheus-stack chart
enabled: true
kubeControllerManager:
enabled: false
nodeExporter:
enabled: false
defaultRules:
create: true
rules:
alertmanager: false
etcd: false
configReloaders: false
general: false
k8s: true
kubeApiserver: false
kubeApiserverAvailability: false
kubeApiserverSlos: false
kubelet: true
kubeProxy: false
kubePrometheusGeneral: false
kubePrometheusNodeRecording: false
kubernetesApps: false
kubernetesResources: false
kubernetesStorage: false
kubernetesSystem: false
kubeScheduler: false
kubeStateMetrics: false
network: false
node: true
nodeExporterAlerting: false
nodeExporterRecording: true
prometheus: false
prometheusOperator: false
#nodeSelector:
#workload: monitor
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
probeSelectorNilUsesHelmValues: false
#nodeSelector:
#workload: monitor
grafana:
enabled: true
# -- the grafana admin password
adminPassword: prom-operator
defaultDashboardsEnabled: false
sidecar:
dashboards:
enabled: true
#nodeSelector:
#workload: monitor
alertmanager:
enabled: true
#alertManagerSpec:
#nodeSelector:
#workload: monitor

View File

@@ -128,4 +128,138 @@ These annotations are used to configure the Vault Agent to inject secrets into t
Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```
```
----
Wenn du Kubernetes mit Vault konfiguriert hast, ermöglichst du eine **sichere Integration zwischen deinem Kubernetes-Cluster und HashiCorp Vault**. Hier sind die wichtigsten Szenarien und Vorteile:
## Hauptfunktionen
### 1. **Automatische Pod-Authentifizierung**
Pods können sich automatisch bei Vault authentifizieren, ohne dass du Credentials manuell verteilen musst. Vault nutzt Kubernetes Service Accounts zur Identitätsverifizierung.
### 2. **Dynamische Secrets für Anwendungen**
Anwendungen können zur Laufzeit Secrets von Vault abrufen, statt sie in ConfigMaps oder Kubernetes Secrets zu speichern.
## Praktische Szenarien
### **Szenario 1: Vault Agent Sidecar Injection**
Vault injiziert automatisch einen Sidecar-Container, der Secrets abruft und für deine App bereitstellt:
```yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "myapp"
vault.hashicorp.com/agent-inject-secret-database: "database/creds/myapp-role"
spec:
serviceAccountName: myapp
containers:
- name: app
image: myapp:latest
```
**Ergebnis:** Datenbank-Credentials werden automatisch in `/vault/secrets/database` bereitgestellt.
### **Szenario 2: Dynamische Datenbank-Credentials**
Statt statische DB-Passwörter zu verwenden, generiert Vault temporäre Credentials:
- Jeder Pod bekommt eigene DB-Credentials
- Credentials sind zeitlich begrenzt (z.B. 24h)
- Automatische Rotation
- Einfaches Widerrufen bei Kompromittierung
### **Szenario 3: Externe Secrets Operator (ESO)**
Secrets werden als native Kubernetes Secrets synchronisiert:
```yaml
apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
name: vault-backend
spec:
provider:
vault:
server: "https://vault.test.k8s.schnrbs.work"
path: "secret"
auth:
kubernetes:
mountPath: "kubernetes"
role: "myapp"
```
### **Szenario 4: Verschlüsselung als Service**
Anwendungen können Vault's Transit Engine nutzen:
```bash
# Daten verschlüsseln ohne den Key zu kennen
vault write transit/encrypt/my-key plaintext=$(base64 <<< "sensitive data")
# Daten entschlüsseln
vault write transit/decrypt/my-key ciphertext="vault:v1:abc..."
```
### **Szenario 5: PKI/Zertifikats-Management**
Automatische Ausstellung von TLS-Zertifikaten für Service-to-Service-Kommunikation:
- Kurzlebige Zertifikate (z.B. 1h)
- Automatische Rotation
- Zero-Trust-Netzwerk
### **Szenario 6: Multi-Tenancy**
Verschiedene Namespaces/Teams haben isolierten Zugriff:
```bash
# Team A darf nur auf secret/team-a/* zugreifen
# Team B darf nur auf secret/team-b/* zugreifen
```
## Vorteile gegenüber Kubernetes Secrets
| Aspekt | Kubernetes Secrets | Vault Integration |
|--------|-------------------|-------------------|
| Verschlüsselung at rest | Optional, etcd-Ebene | Immer, zusätzlich verschlüsselt |
| Secret Rotation | Manuell | Automatisch/dynamisch |
| Audit Log | Begrenzt | Detailliert für jeden Zugriff |
| Dynamische Secrets | Nein | Ja (DB, Cloud, etc.) |
| Granulare Policies | Begrenzt | Sehr feinkörnig |
| Encryption-as-a-Service | Nein | Ja |
## Typischer Workflow nach der Konfiguration
1. **Policy erstellen:** Definiere, wer auf welche Secrets zugreifen darf
2. **Role erstellen:** Verknüpfe Kubernetes Service Accounts mit Vault Policies
3. **Secrets bereitstellen:** Nutze Vault Agent Injection oder CSI Driver
4. **Anwendung deployen:** Pods authentifizieren sich automatisch
## Best Practice Setup
Nach der Kubernetes Auth-Aktivierung solltest du:
```bash
# 1. Policy erstellen
vault policy write myapp - <<EOF
path "secret/data/myapp/*" {
capabilities = ["read"]
}
EOF
# 2. Role erstellen
vault write auth/kubernetes/role/myapp \
bound_service_account_names=myapp \
bound_service_account_namespaces=production \
policies=myapp \
ttl=1h
# 3. Service Account in K8s erstellen
kubectl create serviceaccount myapp -n production
```
Möchtest du ein spezifisches Szenario genauer erkunden oder brauchst du Hilfe bei der Konfiguration eines bestimmten Use Cases?

View File

@@ -0,0 +1,51 @@
# External Secrets Operator resource configuration
# Based on Goldilocks recommendations (Burstable QoS)
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
# Main controller
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
certController:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
webhook:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 128Mi
limits:
cpu: 50m
memory: 256Mi

View File

@@ -0,0 +1,65 @@
set fallback := true
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
export EXTERNAL_SECRETS_CHART_VERSION := env("EXTERNAL_SECRETS_CHART_VERSION", "1.1.0")
export EXTERNAL_SECRETS_REFRESH_INTERVAL := env("EXTERNAL_SECRETS_REFRESH_INTERVAL", "1800")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export VAULT_HOST := env("VAULT_HOST", "")
export VAULT_ADDR := "https://" + VAULT_HOST
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add external-secrets https://charts.external-secrets.io
helm repo update
# Remove Helm repository
remove-helm-repo:
helm repo remove external-secrets
# Install External Secrets
install:
just add-helm-repo
helm upgrade --cleanup-on-fail \
--install external-secrets external-secrets/external-secrets \
--version ${EXTERNAL_SECRETS_CHART_VERSION} -n ${EXTERNAL_SECRETS_NAMESPACE} \
--create-namespace --wait \
-f external-secrets-values.yaml
kubectl label namespace ${EXTERNAL_SECRETS_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
just create-external-secrets-role
just create-vault-secret-store
# Uninstall External Secrets
uninstall:
just delete-vault-secret-store
helm uninstall external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} --wait
kubectl delete namespace ${EXTERNAL_SECRETS_NAMESPACE} --ignore-not-found
# Create Vault Secret Store for External Secrets
create-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl apply -f -
# Delete Vault Secret Store for External Secrets
delete-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl delete --ignore-not-found -f -
# Create Vault role for External Secrets
create-external-secrets-role root_token='':
#!/bin/bash
set -euo pipefail
export VAULT_TOKEN="{{ root_token }}"
while [ -z "${VAULT_TOKEN}" ]; do
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
done
vault write auth/kubernetes/role/external-secrets \
bound_service_account_names=external-secrets \
bound_service_account_namespaces=${EXTERNAL_SECRETS_NAMESPACE} \
audience=vault \
policies=admin \
ttl=1h

View File

@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ClusterSecretStore
metadata:
name: vault-secret-store
spec:
provider:
vault:
server: http://vault.{{ .Env.K8S_VAULT_NAMESPACE }}:8200
path: secret
version: v2
auth:
kubernetes:
role: external-secrets
mountPath: kubernetes
serviceAccountRef:
name: external-secrets
namespace: {{ .Env.EXTERNAL_SECRETS_NAMESPACE }}
# Audience must match the audience configured in Vault Kubernetes auth role
# Required for Vault 1.21+ compatibility
audiences:
- vault
refreshInterval: {{ .Env.EXTERNAL_SECRETS_REFRESH_INTERVAL }}

View File

@@ -0,0 +1,27 @@
# Pod Security Context for restricted Pod Security Standards
#podSecurityContext:
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
# fsGroup: 10001
#
## Container Security Context for restricted Pod Security Standards
#containerSecurityContext:
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# runAsUser: 10001
# runAsGroup: 10001
# seccompProfile:
# type: RuntimeDefault
# capabilities:
# drop:
# - ALL
#
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 100m
memory: 256Mi

647
10_Postgres/justfile Normal file
View File

@@ -0,0 +1,647 @@
set fallback := true
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add cnpg https://cloudnative-pg.github.io/charts
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove cnpg
# Install CloudNativePG and create a cluster
install:
@just install-cnpg
@just create-cluster
# Uninstall CloudNativePG and delete the cluster
uninstall:
@just delete-cluster
@just uninstall-cnpg
# Install CloudNativePG
install-cnpg:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
--version ${CNPG_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --create-namespace --wait \
-f cnpg-values.yaml
@kubectl label namespace ${CNPG_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
# Uninstall CloudNativePG
uninstall-cnpg:
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
# Create Postgres cluster
create-cluster:
#!/bin/bash
set -euo pipefail
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
password=$(just utils::random-password)
just vault::put-root postgres/admin username=postgres password="${password}"
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
-n ${CNPG_NAMESPACE} --timeout=60s
else
echo "External Secrets Operator not found. Creating superuser secret directly..."
password=$(just utils::random-password)
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
--from-literal=username=postgres \
--from-literal=password="${password}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put-root postgres/admin username=postgres password="${password}"
fi
fi
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
helm upgrade --install postgres-cluster cnpg/cluster \
--version ${CNPG_CLUSTER_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
echo "Waiting for PostgreSQL cluster to be ready..."
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
-n ${CNPG_NAMESPACE} --timeout=300s
# Delete Postgres cluster
delete-cluster:
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
# Print Postgres username
admin-username:
@echo "postgres"
# Print Postgres password
admin-password:
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
-o jsonpath="{.data.password}" | base64 --decode
@echo
# Create Postgres database
create-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} already exists" >&2
exit
fi
echo "Creating database ${DB_NAME}..."
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} created."
# Delete Postgres database
delete-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
if ! just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} does not exist." >&2
exit
fi
# Terminate all connections to the database
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
# Force disconnect if needed
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}';\""
just psql -c "\"DROP DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} deleted."
# Check if database exists
[no-exit-message]
db-exists db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
echo "Database ${DB_NAME} exists."
else
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
# Create Postgres user
create-user username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} already exists" >&2
exit
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
echo "User ${USERNAME} created."
# Delete Postgres user
delete-user username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit
fi
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
just psql -c "\"DROP USER ${USERNAME};\""
echo "User ${USERNAME} deleted."
# Check if user exists
[no-exit-message]
user-exists username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
echo "User ${USERNAME} exists."
else
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
# Change user password
change-password username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just utils::random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
echo "Password changed for user ${USERNAME}."
# Grant all privileges on database to user
grant db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
echo "Privileges granted."
# Revoke all privileges on database from user
revoke db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
echo "Privileges revoked."
# Create Postgres database and user
create-user-and-db username='' db_name='' password='':
@just create-db "{{ db_name }}"
@just create-user "{{ username }}" "{{ password }}"
@just grant "{{ db_name }}" "{{ username }}"
# Delete Postgres database and user
delete-user-and-db username='' db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
if just db-exists ${DB_NAME} &>/dev/null; then
if just user-exists ${USERNAME} &>/dev/null; then
just revoke "${DB_NAME}" "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping revoke."
fi
just delete-db "${DB_NAME}"
else
echo "Database ${DB_NAME} does not exist, skipping database deletion."
fi
if just user-exists ${USERNAME} &>/dev/null; then
just delete-user "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping user deletion."
fi
echo "Cleanup completed."
# Create logical replication slot for CDC
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
PLUGIN=${PLUGIN:-"{{ plugin }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., airbyte_slot")
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' already exists."
exit 0
fi
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
echo "Replication slot '${SLOT_NAME}' created."
# Delete replication slot
delete-replication-slot slot_name='' db_name='postgres':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' does not exist."
exit 1
fi
echo "Deleting replication slot '${SLOT_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
echo "Replication slot '${SLOT_NAME}' deleted."
# List all replication slots
list-replication-slots:
@echo "Replication slots:"
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -c \
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
# Create publication for CDC
create-publication pub_name='' db_name='' tables='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
TABLES="${TABLES:-{{ tables }}}"
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., airbyte_publication")
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
exit 0
fi
if [ -z "${TABLES}" ]; then
echo "Select tables to include in publication:"
echo "1) All tables (ALL TABLES)"
echo "2) All user tables (exclude system/internal tables)"
echo "3) Specific tables (comma-separated list)"
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
case "${CHOICE}" in
"All tables")
TABLES="ALL TABLES"
;;
"User tables only")
# Get list of user tables (excluding _airbyte* and other system tables)
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT string_agg(tablename, ', ') FROM pg_tables
WHERE schemaname = 'public'
AND tablename NOT LIKE '\_%'
AND tablename NOT LIKE 'pg_%';")
if [ -z "${USER_TABLES}" ]; then
echo "No user tables found in database '${DB_NAME}'"
exit 1
fi
TABLES="TABLE ${USER_TABLES}"
echo "Including tables: ${USER_TABLES}"
;;
"Specific tables")
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
--placeholder="e.g., users, products, orders")
TABLES="TABLE ${TABLES}"
;;
esac
elif [ "${TABLES}" = "ALL" ]; then
TABLES="ALL TABLES"
fi
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
if [ "${TABLES}" != "ALL TABLES" ]; then
echo "Setting REPLICA IDENTITY for included tables..."
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
for table in "${TABLE_ARRAY[@]}"; do
table=$(echo "$table" | xargs) # trim whitespace
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
done
fi
echo "Publication '${PUB_NAME}' created."
# Delete publication
delete-publication pub_name='' db_name='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
exit 1
fi
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"DROP PUBLICATION ${PUB_NAME};"
echo "Publication '${PUB_NAME}' deleted."
# List all publications in a database
list-publications db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Publications in database '${DB_NAME}':"
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
# Grant CDC privileges to user
grant-cdc-privileges username='' db_name='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
echo "Granting schema and table privileges..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
GRANT CREATE ON SCHEMA public TO ${USERNAME};
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
echo "Granting pg_read_all_data role..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
echo "CDC privileges granted to user '${USERNAME}'"
# Setup CDC (Change Data Capture)
setup-cdc db_name='' slot_name='' pub_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Setting up CDC on database '${DB_NAME}'..."
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc "SHOW wal_level;")
if [ "${WAL_LEVEL}" != "logical" ]; then
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
echo "Please ensure PostgreSQL is configured with wal_level=logical"
exit 1
fi
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
just create-publication "${PUB_NAME}" "${DB_NAME}"
if [ -n "${USERNAME}" ]; then
echo ""
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
fi
echo ""
echo "CDC setup completed for database '${DB_NAME}'"
echo " Replication Method: Logical Replication (CDC)"
echo " Replication Slot: ${SLOT_NAME}"
echo " Publication: ${PUB_NAME}"
if [ -n "${USERNAME}" ]; then
echo " User with CDC privileges: ${USERNAME}"
fi
# Cleanup CDC (removes slot and publication)
cleanup-cdc db_name='' slot_name='' pub_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
# Check if slot is active
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc \
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
if [ "${SLOT_ACTIVE}" = "t" ]; then
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
echo "Please stop any active replication connections first."
if ! gum confirm "Proceed with deletion anyway?"; then
echo "Cleanup cancelled"
exit 1
fi
fi
# Delete in correct order: Slot first, then Publication
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
echo "Step 2: Deleting publication '${PUB_NAME}'..."
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
echo "Publication '${PUB_NAME}' not found or already deleted"
echo "CDC cleanup completed for database '${DB_NAME}'"
# Run psql
[no-exit-message]
psql *args='':
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
# Dump Postgres database by pg_dump
[no-cd]
dump db_name file exclude_tables='':
#!/bin/bash
set -euo pipefail
DUMP_OPTIONS="-Fc"
if [ -n "{{ exclude_tables }}" ]; then
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
for table in "${TABLES[@]}"; do
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
done
fi
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
/var/lib/postgresql/data/db.dump"
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
# Restore Postgres database by pg_restore
[no-cd]
restore db_name file:
just postgres::create-db {{ db_name }}
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_restore --clean --if-exists \
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
/var/lib/postgresql/data/db.dump"
# Enable Prometheus monitoring
enable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
# Label namespace to enable monitoring
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
# Enable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
echo "Waiting for PodMonitor to be created..."
sleep 3
# Add release label to PodMonitor
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
echo "✓ PostgreSQL monitoring enabled"
# Disable Prometheus monitoring
disable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
# Disable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
# Remove namespace label
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
echo "✓ PostgreSQL monitoring disabled"

View File

@@ -0,0 +1,9 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -0,0 +1,21 @@
authentik:
secret_key: "PleaseGenerateASecureKey"
# This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.io, and is fully opt-in
error_reporting:
enabled: true
postgresql:
password: "ThisIsNotASecurePassword"
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
postgresql:
enabled: true
auth:
password: "ThisIsNotASecurePassword"

28
12_Authentik/justfile Normal file
View File

@@ -0,0 +1,28 @@
set fallback := true
export AUTHENTIK_NAMESPACE := env("AUTHENTIK_NAMESPACE", "authentik")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add authentik https://charts.goauthentik.io
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove authentik
install:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install authentik authentik/authentik \
-n ${AUTHENTIK_NAMESPACE} --create-namespace --wait \
-f authentik-values.yaml
uninstall:
@helm uninstall authentik -n ${AUTHENTIK_NAMESPACE} --wait
@kubectl delete namespace ${AUTHENTIK_NAMESPACE} --ignore-not-found

View File

@@ -8,3 +8,4 @@ CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
VAULT_HOST={{ .Env.VAULT_HOST }}
AUTHENTIK_HOST={{ .Env.AUTHENTIK_HOST }}

9
env/justfile vendored
View File

@@ -129,6 +129,15 @@ setup:
exit 1
fi
done
while [ -z "${AUTHENTIK_HOST}" ]; do
if ! AUTHENTIK_HOST=$(
gum input --prompt="Authentik hostname: " \
--width=100 --placeholder="authentik"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local

View File

@@ -11,4 +11,7 @@ mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik
mod Longhorn
mod Vault '08_Vault'
mod Vault '08_Vault'
mod ExternalSecrets '09_ExternalSecrets'
mod Postgres '10_Postgres'
mod KubePrometheusStack '07_KubePrometheusStack'