Compare commits
32 Commits
29674ae504
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71348ad7f5 | ||
|
|
40eae4f567 | ||
|
|
e7f648cf57 | ||
|
|
dce92aeb28 | ||
|
|
07e4ae31e3 | ||
|
|
5e86aafa09 | ||
|
|
4444296443 | ||
|
|
9aafb940e9 | ||
|
|
4075203b1e | ||
|
|
92decafc3f | ||
|
|
09e1bbbc52 | ||
|
|
48d930fedc | ||
|
|
1f82ce8d02 | ||
|
|
a551f2e4ca | ||
|
|
a80dce42b0 | ||
|
|
63243c6d2e | ||
|
|
1f9f7e275c | ||
|
|
09026d6812 | ||
|
|
24991fce90 | ||
|
|
65a59d2d0c | ||
|
|
85fb620e39 | ||
|
|
b56e02d2ed | ||
|
|
15cb2ce903 | ||
|
|
b47fe8f66b | ||
|
|
c5810661e5 | ||
|
|
7ddc08d622 | ||
|
|
c5aa7f8105 | ||
|
|
0c6cfedcde | ||
| 2be83a977a | |||
|
|
4f5a18c84c | ||
|
|
7a54346331 | ||
|
|
5abc0de38a |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env.local
|
||||
@@ -40,3 +40,24 @@ Rancher Installation
|
||||
```
|
||||
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
|
||||
```
|
||||
|
||||
# Just Setup // K3sup
|
||||
|
||||
export SERVER_IP=192.168.178.45
|
||||
export AGENT_IP=192.168.178.75
|
||||
export USER=basti
|
||||
|
||||
|
||||
k3sup install \
|
||||
--cluster \
|
||||
--ip 192.168.178.45 \
|
||||
--user $USER \
|
||||
--merge \
|
||||
--local-path $HOME/.kube/config \
|
||||
--context my-k3s
|
||||
|
||||
k3sup join \
|
||||
--ip $AGENT_IP \
|
||||
--server-ip $SERVER_IP \
|
||||
--user $USER
|
||||
|
||||
|
||||
148
01_Basic_Setup/justfile
Normal file
148
01_Basic_Setup/justfile
Normal file
@@ -0,0 +1,148 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
|
||||
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
||||
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
||||
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
||||
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
kubeconfig=""
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
kubeconfig=$(
|
||||
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
|
||||
)
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
args=(
|
||||
"install"
|
||||
"--context" "${context}"
|
||||
"--host" "${K8S_MASTER_NODE_NAME}"
|
||||
"--user" "${username}"
|
||||
"--no-extras" #
|
||||
)
|
||||
|
||||
if [ -n "${kubeconfig}" ]; then
|
||||
mkdir -p "$(dirname "${kubeconfig}")"
|
||||
args+=("--local-path" "${kubeconfig}" "--merge")
|
||||
fi
|
||||
echo "Running: k3sup ${args[@]}"
|
||||
k3sup "${args[@]}"
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
|
||||
echo "Setting up local Docker registry..."
|
||||
|
||||
# Deploy Docker registry to cluster
|
||||
kubectl apply -f ./registry/registry.yaml
|
||||
|
||||
# Set Pod Security Standard for registry namespace
|
||||
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Wait for registry deployment
|
||||
echo "Waiting for registry to be ready..."
|
||||
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
|
||||
|
||||
# Configure registries.yaml for k3s
|
||||
just configure-registry
|
||||
|
||||
echo "✓ Local Docker registry deployed and configured"
|
||||
echo ""
|
||||
echo "Registry accessible at:"
|
||||
echo " localhost:30500"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
|
||||
echo " docker build -t localhost:30500/myapp:latest ."
|
||||
echo " docker push localhost:30500/myapp:latest"
|
||||
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
|
||||
fi
|
||||
|
||||
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
|
||||
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
|
||||
|
||||
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
|
||||
echo "Removing Agent node at ${AGENT_IP}..."
|
||||
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
|
||||
fi
|
||||
|
||||
echo "Removing content of Server node..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
|
||||
echo "Cleaning up kubeconfig entries..."
|
||||
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
|
||||
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
|
||||
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
|
||||
kubectl config delete-context "${K8S_CONTEXT}"
|
||||
echo "Deleted context: ${K8S_CONTEXT}"
|
||||
fi
|
||||
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
|
||||
kubectl config delete-cluster "${cluster_name}"
|
||||
echo "Deleted cluster: ${cluster_name}"
|
||||
fi
|
||||
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
|
||||
kubectl config delete-user "${user_name}"
|
||||
echo "Deleted user: ${user_name}"
|
||||
fi
|
||||
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
|
||||
else
|
||||
echo "Uninstallation cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
add-agent:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
|
||||
|
||||
args=(
|
||||
"join"
|
||||
"--ip" "${new_agent_ip}"
|
||||
"--server-ip" "${SERVER_IP}"
|
||||
"--user" "${username}"
|
||||
)
|
||||
|
||||
|
||||
echo "Running: k3sup ${args[*]}"
|
||||
k3sup "${args[@]}"
|
||||
echo "Agent node at ${new_agent_ip} added to cluster."
|
||||
|
||||
# Configure k3s to use local registry
|
||||
configure-registry:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Configuring k3s registries.yaml..."
|
||||
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
|
||||
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
|
||||
|
||||
echo "Restarting k3s to apply registry configuration..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
|
||||
echo "✓ Registry configuration applied"
|
||||
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
configs:
|
||||
"localhost:30500":
|
||||
tls:
|
||||
insecure_skip_verify: true
|
||||
109
01_Basic_Setup/registry/registry.yaml
Normal file
109
01_Basic_Setup/registry/registry.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
fsGroup: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: http
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 20Gi
|
||||
env:
|
||||
- name: REGISTRY_STORAGE_DELETE_ENABLED
|
||||
value: "true"
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: "0.0.0.0:5000"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- name: registry-data
|
||||
mountPath: /var/lib/registry
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: registry-data
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
name: http
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry-nodeport
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
nodePort: 30500
|
||||
name: http
|
||||
type: NodePort
|
||||
22
07_KubePrometheusStack/justfile
Normal file
22
07_KubePrometheusStack/justfile
Normal file
@@ -0,0 +1,22 @@
|
||||
set fallback := true
|
||||
|
||||
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "prometheus")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
|
||||
add-helm-repo:
|
||||
@helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
@helm repo update
|
||||
|
||||
|
||||
install:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
|
||||
--wait \
|
||||
-f kube-stack-config-values.yaml
|
||||
|
||||
uninstall:
|
||||
helm uninstall kube-prometheus-stack
|
||||
78
07_KubePrometheusStack/kube-stack-config-values.yaml
Normal file
78
07_KubePrometheusStack/kube-stack-config-values.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
#
|
||||
# Copyright © contributors to CloudNativePG, established as
|
||||
# CloudNativePG a Series of LF Projects, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# -- here you can pass the whole values directly to the kube-prometheus-stack chart
|
||||
enabled: true
|
||||
kubeControllerManager:
|
||||
enabled: false
|
||||
nodeExporter:
|
||||
enabled: false
|
||||
defaultRules:
|
||||
create: true
|
||||
rules:
|
||||
alertmanager: false
|
||||
etcd: false
|
||||
configReloaders: false
|
||||
general: false
|
||||
k8s: true
|
||||
kubeApiserver: false
|
||||
kubeApiserverAvailability: false
|
||||
kubeApiserverSlos: false
|
||||
kubelet: true
|
||||
kubeProxy: false
|
||||
kubePrometheusGeneral: false
|
||||
kubePrometheusNodeRecording: false
|
||||
kubernetesApps: false
|
||||
kubernetesResources: false
|
||||
kubernetesStorage: false
|
||||
kubernetesSystem: false
|
||||
kubeScheduler: false
|
||||
kubeStateMetrics: false
|
||||
network: false
|
||||
node: true
|
||||
nodeExporterAlerting: false
|
||||
nodeExporterRecording: true
|
||||
prometheus: false
|
||||
prometheusOperator: false
|
||||
|
||||
#nodeSelector:
|
||||
#workload: monitor
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
podMonitorSelectorNilUsesHelmValues: false
|
||||
ruleSelectorNilUsesHelmValues: false
|
||||
serviceMonitorSelectorNilUsesHelmValues: false
|
||||
probeSelectorNilUsesHelmValues: false
|
||||
#nodeSelector:
|
||||
#workload: monitor
|
||||
grafana:
|
||||
enabled: true
|
||||
# -- the grafana admin password
|
||||
adminPassword: prom-operator
|
||||
defaultDashboardsEnabled: false
|
||||
sidecar:
|
||||
dashboards:
|
||||
enabled: true
|
||||
#nodeSelector:
|
||||
#workload: monitor
|
||||
alertmanager:
|
||||
enabled: true
|
||||
#alertManagerSpec:
|
||||
#nodeSelector:
|
||||
#workload: monitor
|
||||
@@ -129,3 +129,137 @@ Use the below command to check the vault secrets from the pod volume
|
||||
```
|
||||
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
----
|
||||
|
||||
|
||||
Wenn du Kubernetes mit Vault konfiguriert hast, ermöglichst du eine **sichere Integration zwischen deinem Kubernetes-Cluster und HashiCorp Vault**. Hier sind die wichtigsten Szenarien und Vorteile:
|
||||
|
||||
## Hauptfunktionen
|
||||
|
||||
### 1. **Automatische Pod-Authentifizierung**
|
||||
Pods können sich automatisch bei Vault authentifizieren, ohne dass du Credentials manuell verteilen musst. Vault nutzt Kubernetes Service Accounts zur Identitätsverifizierung.
|
||||
|
||||
### 2. **Dynamische Secrets für Anwendungen**
|
||||
Anwendungen können zur Laufzeit Secrets von Vault abrufen, statt sie in ConfigMaps oder Kubernetes Secrets zu speichern.
|
||||
|
||||
## Praktische Szenarien
|
||||
|
||||
### **Szenario 1: Vault Agent Sidecar Injection**
|
||||
Vault injiziert automatisch einen Sidecar-Container, der Secrets abruft und für deine App bereitstellt:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/role: "myapp"
|
||||
vault.hashicorp.com/agent-inject-secret-database: "database/creds/myapp-role"
|
||||
spec:
|
||||
serviceAccountName: myapp
|
||||
containers:
|
||||
- name: app
|
||||
image: myapp:latest
|
||||
```
|
||||
|
||||
**Ergebnis:** Datenbank-Credentials werden automatisch in `/vault/secrets/database` bereitgestellt.
|
||||
|
||||
### **Szenario 2: Dynamische Datenbank-Credentials**
|
||||
Statt statische DB-Passwörter zu verwenden, generiert Vault temporäre Credentials:
|
||||
|
||||
- Jeder Pod bekommt eigene DB-Credentials
|
||||
- Credentials sind zeitlich begrenzt (z.B. 24h)
|
||||
- Automatische Rotation
|
||||
- Einfaches Widerrufen bei Kompromittierung
|
||||
|
||||
### **Szenario 3: Externe Secrets Operator (ESO)**
|
||||
Secrets werden als native Kubernetes Secrets synchronisiert:
|
||||
|
||||
```yaml
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: SecretStore
|
||||
metadata:
|
||||
name: vault-backend
|
||||
spec:
|
||||
provider:
|
||||
vault:
|
||||
server: "https://vault.test.k8s.schnrbs.work"
|
||||
path: "secret"
|
||||
auth:
|
||||
kubernetes:
|
||||
mountPath: "kubernetes"
|
||||
role: "myapp"
|
||||
```
|
||||
|
||||
### **Szenario 4: Verschlüsselung als Service**
|
||||
Anwendungen können Vault's Transit Engine nutzen:
|
||||
|
||||
```bash
|
||||
# Daten verschlüsseln ohne den Key zu kennen
|
||||
vault write transit/encrypt/my-key plaintext=$(base64 <<< "sensitive data")
|
||||
|
||||
# Daten entschlüsseln
|
||||
vault write transit/decrypt/my-key ciphertext="vault:v1:abc..."
|
||||
```
|
||||
|
||||
### **Szenario 5: PKI/Zertifikats-Management**
|
||||
Automatische Ausstellung von TLS-Zertifikaten für Service-to-Service-Kommunikation:
|
||||
|
||||
- Kurzlebige Zertifikate (z.B. 1h)
|
||||
- Automatische Rotation
|
||||
- Zero-Trust-Netzwerk
|
||||
|
||||
### **Szenario 6: Multi-Tenancy**
|
||||
Verschiedene Namespaces/Teams haben isolierten Zugriff:
|
||||
|
||||
```bash
|
||||
# Team A darf nur auf secret/team-a/* zugreifen
|
||||
# Team B darf nur auf secret/team-b/* zugreifen
|
||||
```
|
||||
|
||||
## Vorteile gegenüber Kubernetes Secrets
|
||||
|
||||
| Aspekt | Kubernetes Secrets | Vault Integration |
|
||||
|--------|-------------------|-------------------|
|
||||
| Verschlüsselung at rest | Optional, etcd-Ebene | Immer, zusätzlich verschlüsselt |
|
||||
| Secret Rotation | Manuell | Automatisch/dynamisch |
|
||||
| Audit Log | Begrenzt | Detailliert für jeden Zugriff |
|
||||
| Dynamische Secrets | Nein | Ja (DB, Cloud, etc.) |
|
||||
| Granulare Policies | Begrenzt | Sehr feinkörnig |
|
||||
| Encryption-as-a-Service | Nein | Ja |
|
||||
|
||||
## Typischer Workflow nach der Konfiguration
|
||||
|
||||
1. **Policy erstellen:** Definiere, wer auf welche Secrets zugreifen darf
|
||||
2. **Role erstellen:** Verknüpfe Kubernetes Service Accounts mit Vault Policies
|
||||
3. **Secrets bereitstellen:** Nutze Vault Agent Injection oder CSI Driver
|
||||
4. **Anwendung deployen:** Pods authentifizieren sich automatisch
|
||||
|
||||
## Best Practice Setup
|
||||
|
||||
Nach der Kubernetes Auth-Aktivierung solltest du:
|
||||
|
||||
```bash
|
||||
# 1. Policy erstellen
|
||||
vault policy write myapp - <<EOF
|
||||
path "secret/data/myapp/*" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOF
|
||||
|
||||
# 2. Role erstellen
|
||||
vault write auth/kubernetes/role/myapp \
|
||||
bound_service_account_names=myapp \
|
||||
bound_service_account_namespaces=production \
|
||||
policies=myapp \
|
||||
ttl=1h
|
||||
|
||||
# 3. Service Account in K8s erstellen
|
||||
kubectl create serviceaccount myapp -n production
|
||||
```
|
||||
|
||||
Möchtest du ein spezifisches Szenario genauer erkunden oder brauchst du Hilfe bei der Konfiguration eines bestimmten Use Cases?
|
||||
8
08_Vault/auth-token-secret.yaml
Normal file
8
08_Vault/auth-token-secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: vault-auth-token
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: vault-auth
|
||||
type: kubernetes.io/service-account-token
|
||||
|
||||
126
08_Vault/justfile
Normal file
126
08_Vault/justfile
Normal file
@@ -0,0 +1,126 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export VAULT_CHART_VERSION := env("VAULT_CHART_VERSION", "0.31.0")
|
||||
export VAULT_HOST := env("VAULT_HOST", "")
|
||||
export VAULT_ADDR := "https://" + VAULT_HOST
|
||||
export VAULT_DEBUG := env("VAULT_DEBUG", "false")
|
||||
SECRET_PATH := "secret"
|
||||
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||
helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
helm repo remove hashicorp
|
||||
|
||||
|
||||
# Create Vault namespace
|
||||
create-namespace:
|
||||
@kubectl get namespace ${K8S_VAULT_NAMESPACE} > /dev/null || kubectl create namespace ${K8S_VAULT_NAMESPACE}
|
||||
|
||||
# Delete Vault namespace
|
||||
delete-namespace:
|
||||
@kubectl delete namespace ${K8S_VAULT_NAMESPACE} --ignore-not-found
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
just create-namespace
|
||||
just add-helm-repo
|
||||
|
||||
gomplate -f vault-values.gomplate.yaml -o vault-values.yaml
|
||||
|
||||
helm upgrade \
|
||||
--cleanup-on-fail \
|
||||
--install \
|
||||
vault \
|
||||
hashicorp/vault \
|
||||
--namespace ${K8S_VAULT_NAMESPACE} \
|
||||
--wait \
|
||||
-f vault-values.yaml
|
||||
|
||||
kubectl wait pod --for=condition=PodReadyToStartContainers \
|
||||
-n ${K8S_VAULT_NAMESPACE} vault-0 --timeout=5m
|
||||
|
||||
# Wait for Vault service to be ready to accept connections
|
||||
echo "Waiting for Vault service to be ready..."
|
||||
for i in {1..30}; do
|
||||
if kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault status 2>&1 | grep -qE "(Initialized|Sealed)"; then
|
||||
echo "✓ Vault service is ready"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo "Error: Timeout waiting for Vault service to be ready"
|
||||
exit 1
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
|
||||
init_output=$(kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault operator init -key-shares=1 -key-threshold=1 -format=json || true)
|
||||
|
||||
root_token=""
|
||||
if echo "${init_output}" | grep -q "Vault is already initialized"; then
|
||||
echo "Vault is already initialized"
|
||||
while [ -z "${root_token}" ]; do
|
||||
root_token=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
else
|
||||
unseal_key=$(echo "${init_output}" | jq -r '.unseal_keys_b64[0]')
|
||||
root_token=$(echo "${init_output}" | jq -r '.root_token')
|
||||
kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault operator unseal "${unseal_key}"
|
||||
echo "Vault initialized and unsealed successfully"
|
||||
echo "Root Token: ${root_token}"
|
||||
echo "Unseal Key: ${unseal_key}"
|
||||
echo "Please save these credentials securely!"
|
||||
fi
|
||||
|
||||
# Wait for all vault instances to pass readiness checks and be ready to serve requests
|
||||
kubectl wait pod --for=condition=ready -n ${K8S_VAULT_NAMESPACE} \
|
||||
-l app.kubernetes.io/name=vault --timeout=5m
|
||||
|
||||
just setup-kubernetes-auth "${root_token}"
|
||||
|
||||
|
||||
# Uninstall Vault
|
||||
uninstall delete-ns='false':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
helm uninstall vault -n ${K8S_VAULT_NAMESPACE} --ignore-not-found --wait
|
||||
just delete-namespace
|
||||
|
||||
|
||||
# Setup Kubernetes authentication
|
||||
setup-kubernetes-auth root_token='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
export VAULT_TOKEN="{{ root_token }}"
|
||||
while [ -z "${VAULT_TOKEN}" ]; do
|
||||
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
|
||||
gomplate -f ./serviceaccount.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||
gomplate -f ./rolebinding.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||
kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f ./auth-token-secret.yaml
|
||||
|
||||
SA_SECRET="vault-auth-token"
|
||||
SA_JWT=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
|
||||
SA_CA=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.ca\.crt}' | base64 --decode)
|
||||
|
||||
vault auth list -format=json | jq -e '.["kubernetes/"]' >/dev/null 2>&1 || \
|
||||
vault auth enable kubernetes
|
||||
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="${SA_JWT}" \
|
||||
kubernetes_host="https://kubernetes.default.svc" \
|
||||
kubernetes_ca_cert="${SA_CA}"
|
||||
12
08_Vault/rolebinding.gomplate.yaml
Normal file
12
08_Vault/rolebinding.gomplate.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: vault-auth-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vault-auth
|
||||
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vault-auth
|
||||
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||
16
08_Vault/vault-values.gomplate.yaml
Normal file
16
08_Vault/vault-values.gomplate.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
server:
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
ingressClassName: traefik
|
||||
hosts:
|
||||
- host: {{ .Env.VAULT_HOST }}
|
||||
paths:
|
||||
- /
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Env.VAULT_HOST }}
|
||||
dataStorage:
|
||||
storageClass: longhorn
|
||||
16
08_Vault/vault-values.yaml
Normal file
16
08_Vault/vault-values.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
server:
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
ingressClassName: traefik
|
||||
hosts:
|
||||
- host: vault.test.k8s.schnrbs.work
|
||||
paths:
|
||||
- /
|
||||
tls:
|
||||
- hosts:
|
||||
- vault.test.k8s.schnrbs.work
|
||||
dataStorage:
|
||||
storageClass: longhorn
|
||||
51
09_ExternalSecrets/external-secrets-values.yaml
Normal file
51
09_ExternalSecrets/external-secrets-values.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
# External Secrets Operator resource configuration
|
||||
# Based on Goldilocks recommendations (Burstable QoS)
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
# Main controller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 192Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
|
||||
certController:
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 192Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
|
||||
webhook:
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
65
09_ExternalSecrets/justfile
Normal file
65
09_ExternalSecrets/justfile
Normal file
@@ -0,0 +1,65 @@
|
||||
set fallback := true
|
||||
|
||||
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
|
||||
export EXTERNAL_SECRETS_CHART_VERSION := env("EXTERNAL_SECRETS_CHART_VERSION", "1.1.0")
|
||||
export EXTERNAL_SECRETS_REFRESH_INTERVAL := env("EXTERNAL_SECRETS_REFRESH_INTERVAL", "1800")
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export VAULT_HOST := env("VAULT_HOST", "")
|
||||
export VAULT_ADDR := "https://" + VAULT_HOST
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
helm repo add external-secrets https://charts.external-secrets.io
|
||||
helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
helm repo remove external-secrets
|
||||
|
||||
# Install External Secrets
|
||||
install:
|
||||
just add-helm-repo
|
||||
helm upgrade --cleanup-on-fail \
|
||||
--install external-secrets external-secrets/external-secrets \
|
||||
--version ${EXTERNAL_SECRETS_CHART_VERSION} -n ${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
--create-namespace --wait \
|
||||
-f external-secrets-values.yaml
|
||||
|
||||
kubectl label namespace ${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
just create-external-secrets-role
|
||||
just create-vault-secret-store
|
||||
|
||||
# Uninstall External Secrets
|
||||
uninstall:
|
||||
just delete-vault-secret-store
|
||||
helm uninstall external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} --wait
|
||||
kubectl delete namespace ${EXTERNAL_SECRETS_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Create Vault Secret Store for External Secrets
|
||||
create-vault-secret-store:
|
||||
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
# Delete Vault Secret Store for External Secrets
|
||||
delete-vault-secret-store:
|
||||
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl delete --ignore-not-found -f -
|
||||
|
||||
# Create Vault role for External Secrets
|
||||
create-external-secrets-role root_token='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
export VAULT_TOKEN="{{ root_token }}"
|
||||
while [ -z "${VAULT_TOKEN}" ]; do
|
||||
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
vault write auth/kubernetes/role/external-secrets \
|
||||
bound_service_account_names=external-secrets \
|
||||
bound_service_account_namespaces=${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
audience=vault \
|
||||
policies=admin \
|
||||
ttl=1h
|
||||
22
09_ExternalSecrets/vault-secret-store.gomplate.yaml
Normal file
22
09_ExternalSecrets/vault-secret-store.gomplate.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ClusterSecretStore
|
||||
metadata:
|
||||
name: vault-secret-store
|
||||
spec:
|
||||
provider:
|
||||
vault:
|
||||
server: http://vault.{{ .Env.K8S_VAULT_NAMESPACE }}:8200
|
||||
path: secret
|
||||
version: v2
|
||||
auth:
|
||||
kubernetes:
|
||||
role: external-secrets
|
||||
mountPath: kubernetes
|
||||
serviceAccountRef:
|
||||
name: external-secrets
|
||||
namespace: {{ .Env.EXTERNAL_SECRETS_NAMESPACE }}
|
||||
# Audience must match the audience configured in Vault Kubernetes auth role
|
||||
# Required for Vault 1.21+ compatibility
|
||||
audiences:
|
||||
- vault
|
||||
refreshInterval: {{ .Env.EXTERNAL_SECRETS_REFRESH_INTERVAL }}
|
||||
27
10_Postgres/cnpg-values.yaml
Normal file
27
10_Postgres/cnpg-values.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Pod Security Context for restricted Pod Security Standards
|
||||
#podSecurityContext:
|
||||
# runAsNonRoot: true
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
# fsGroup: 10001
|
||||
#
|
||||
## Container Security Context for restricted Pod Security Standards
|
||||
#containerSecurityContext:
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsUser: 10001
|
||||
# runAsGroup: 10001
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
#
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
|
||||
647
10_Postgres/justfile
Normal file
647
10_Postgres/justfile
Normal file
@@ -0,0 +1,647 @@
|
||||
set fallback := true
|
||||
|
||||
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
|
||||
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
|
||||
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
|
||||
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
|
||||
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
@helm repo add cnpg https://cloudnative-pg.github.io/charts
|
||||
@helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
@helm repo remove cnpg
|
||||
|
||||
# Install CloudNativePG and create a cluster
|
||||
install:
|
||||
@just install-cnpg
|
||||
@just create-cluster
|
||||
|
||||
# Uninstall CloudNativePG and delete the cluster
|
||||
uninstall:
|
||||
@just delete-cluster
|
||||
@just uninstall-cnpg
|
||||
|
||||
# Install CloudNativePG
|
||||
install-cnpg:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
|
||||
--version ${CNPG_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --create-namespace --wait \
|
||||
-f cnpg-values.yaml
|
||||
|
||||
@kubectl label namespace ${CNPG_NAMESPACE} \
|
||||
pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Uninstall CloudNativePG
|
||||
uninstall-cnpg:
|
||||
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
|
||||
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Create Postgres cluster
|
||||
create-cluster:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
|
||||
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
|
||||
password=$(just utils::random-password)
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
|
||||
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
echo "Waiting for ExternalSecret to sync..."
|
||||
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
|
||||
-n ${CNPG_NAMESPACE} --timeout=60s
|
||||
else
|
||||
echo "External Secrets Operator not found. Creating superuser secret directly..."
|
||||
password=$(just utils::random-password)
|
||||
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
|
||||
--from-literal=username=postgres \
|
||||
--from-literal=password="${password}"
|
||||
|
||||
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
fi
|
||||
fi
|
||||
|
||||
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
|
||||
helm upgrade --install postgres-cluster cnpg/cluster \
|
||||
--version ${CNPG_CLUSTER_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
|
||||
|
||||
echo "Waiting for PostgreSQL cluster to be ready..."
|
||||
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
|
||||
-n ${CNPG_NAMESPACE} --timeout=300s
|
||||
|
||||
# Delete Postgres cluster
|
||||
delete-cluster:
|
||||
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
|
||||
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Print Postgres username
|
||||
admin-username:
|
||||
@echo "postgres"
|
||||
|
||||
# Print Postgres password
|
||||
admin-password:
|
||||
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
|
||||
-o jsonpath="{.data.password}" | base64 --decode
|
||||
@echo
|
||||
|
||||
# Create Postgres database
|
||||
create-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
echo "Creating database ${DB_NAME}..."
|
||||
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} created."
|
||||
|
||||
# Delete Postgres database
|
||||
delete-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
if ! just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
# Terminate all connections to the database
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
|
||||
# Force disconnect if needed
|
||||
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"DROP DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} deleted."
|
||||
|
||||
# Check if database exists
|
||||
[no-exit-message]
|
||||
db-exists db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
|
||||
echo "Database ${DB_NAME} exists."
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create Postgres user
|
||||
create-user username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
|
||||
echo "User ${USERNAME} created."
|
||||
|
||||
# Delete Postgres user
|
||||
delete-user username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
|
||||
just psql -c "\"DROP USER ${USERNAME};\""
|
||||
echo "User ${USERNAME} deleted."
|
||||
|
||||
# Check if user exists
|
||||
[no-exit-message]
|
||||
user-exists username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
|
||||
echo "User ${USERNAME} exists."
|
||||
else
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change user password
|
||||
change-password username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just utils::random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
|
||||
echo "Password changed for user ${USERNAME}."
|
||||
|
||||
# Grant all privileges on database to user
|
||||
grant db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
|
||||
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
|
||||
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
|
||||
echo "Privileges granted."
|
||||
|
||||
# Revoke all privileges on database from user
|
||||
revoke db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
|
||||
echo "Privileges revoked."
|
||||
|
||||
# Create Postgres database and user
|
||||
create-user-and-db username='' db_name='' password='':
|
||||
@just create-db "{{ db_name }}"
|
||||
@just create-user "{{ username }}" "{{ password }}"
|
||||
@just grant "{{ db_name }}" "{{ username }}"
|
||||
|
||||
# Delete Postgres database and user
|
||||
delete-user-and-db username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just revoke "${DB_NAME}" "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping revoke."
|
||||
fi
|
||||
just delete-db "${DB_NAME}"
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist, skipping database deletion."
|
||||
fi
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just delete-user "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping user deletion."
|
||||
fi
|
||||
echo "Cleanup completed."
|
||||
|
||||
# Create logical replication slot for CDC
|
||||
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
PLUGIN=${PLUGIN:-"{{ plugin }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_slot")
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' already exists."
|
||||
exit 0
|
||||
fi
|
||||
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
|
||||
echo "Replication slot '${SLOT_NAME}' created."
|
||||
|
||||
# Delete replication slot
|
||||
delete-replication-slot slot_name='' db_name='postgres':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting replication slot '${SLOT_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
|
||||
echo "Replication slot '${SLOT_NAME}' deleted."
|
||||
|
||||
# List all replication slots
|
||||
list-replication-slots:
|
||||
@echo "Replication slots:"
|
||||
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -c \
|
||||
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
|
||||
|
||||
# Create publication for CDC
|
||||
create-publication pub_name='' db_name='' tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
TABLES="${TABLES:-{{ tables }}}"
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_publication")
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "${TABLES}" ]; then
|
||||
echo "Select tables to include in publication:"
|
||||
echo "1) All tables (ALL TABLES)"
|
||||
echo "2) All user tables (exclude system/internal tables)"
|
||||
echo "3) Specific tables (comma-separated list)"
|
||||
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
|
||||
case "${CHOICE}" in
|
||||
"All tables")
|
||||
TABLES="ALL TABLES"
|
||||
;;
|
||||
"User tables only")
|
||||
# Get list of user tables (excluding _airbyte* and other system tables)
|
||||
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT string_agg(tablename, ', ') FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename NOT LIKE '\_%'
|
||||
AND tablename NOT LIKE 'pg_%';")
|
||||
if [ -z "${USER_TABLES}" ]; then
|
||||
echo "No user tables found in database '${DB_NAME}'"
|
||||
exit 1
|
||||
fi
|
||||
TABLES="TABLE ${USER_TABLES}"
|
||||
echo "Including tables: ${USER_TABLES}"
|
||||
;;
|
||||
"Specific tables")
|
||||
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
|
||||
--placeholder="e.g., users, products, orders")
|
||||
TABLES="TABLE ${TABLES}"
|
||||
;;
|
||||
esac
|
||||
elif [ "${TABLES}" = "ALL" ]; then
|
||||
TABLES="ALL TABLES"
|
||||
fi
|
||||
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
|
||||
if [ "${TABLES}" != "ALL TABLES" ]; then
|
||||
echo "Setting REPLICA IDENTITY for included tables..."
|
||||
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
|
||||
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
|
||||
for table in "${TABLE_ARRAY[@]}"; do
|
||||
table=$(echo "$table" | xargs) # trim whitespace
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
echo "Publication '${PUB_NAME}' created."
|
||||
|
||||
# Delete publication
|
||||
delete-publication pub_name='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"DROP PUBLICATION ${PUB_NAME};"
|
||||
echo "Publication '${PUB_NAME}' deleted."
|
||||
|
||||
# List all publications in a database
|
||||
list-publications db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Publications in database '${DB_NAME}':"
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
|
||||
|
||||
# Grant CDC privileges to user
|
||||
grant-cdc-privileges username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
|
||||
echo "Granting schema and table privileges..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT CREATE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
|
||||
echo "Granting pg_read_all_data role..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
|
||||
echo "CDC privileges granted to user '${USERNAME}'"
|
||||
|
||||
# Setup CDC (Change Data Capture)
|
||||
setup-cdc db_name='' slot_name='' pub_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Setting up CDC on database '${DB_NAME}'..."
|
||||
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc "SHOW wal_level;")
|
||||
if [ "${WAL_LEVEL}" != "logical" ]; then
|
||||
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
|
||||
echo "Please ensure PostgreSQL is configured with wal_level=logical"
|
||||
exit 1
|
||||
fi
|
||||
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
|
||||
just create-publication "${PUB_NAME}" "${DB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo ""
|
||||
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
|
||||
fi
|
||||
echo ""
|
||||
echo "CDC setup completed for database '${DB_NAME}'"
|
||||
echo " Replication Method: Logical Replication (CDC)"
|
||||
echo " Replication Slot: ${SLOT_NAME}"
|
||||
echo " Publication: ${PUB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo " User with CDC privileges: ${USERNAME}"
|
||||
fi
|
||||
|
||||
# Cleanup CDC (removes slot and publication)
|
||||
cleanup-cdc db_name='' slot_name='' pub_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
|
||||
|
||||
# Check if slot is active
|
||||
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc \
|
||||
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
|
||||
if [ "${SLOT_ACTIVE}" = "t" ]; then
|
||||
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
|
||||
echo "Please stop any active replication connections first."
|
||||
if ! gum confirm "Proceed with deletion anyway?"; then
|
||||
echo "Cleanup cancelled"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete in correct order: Slot first, then Publication
|
||||
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
|
||||
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
|
||||
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
|
||||
|
||||
echo "Step 2: Deleting publication '${PUB_NAME}'..."
|
||||
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
|
||||
echo "Publication '${PUB_NAME}' not found or already deleted"
|
||||
|
||||
echo "CDC cleanup completed for database '${DB_NAME}'"
|
||||
|
||||
# Run psql
|
||||
[no-exit-message]
|
||||
psql *args='':
|
||||
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
|
||||
|
||||
# Dump Postgres database by pg_dump
|
||||
[no-cd]
|
||||
dump db_name file exclude_tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
DUMP_OPTIONS="-Fc"
|
||||
if [ -n "{{ exclude_tables }}" ]; then
|
||||
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
|
||||
for table in "${TABLES[@]}"; do
|
||||
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
|
||||
done
|
||||
fi
|
||||
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
|
||||
|
||||
# Restore Postgres database by pg_restore
|
||||
[no-cd]
|
||||
restore db_name file:
|
||||
just postgres::create-db {{ db_name }}
|
||||
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_restore --clean --if-exists \
|
||||
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
|
||||
# Enable Prometheus monitoring
|
||||
enable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Label namespace to enable monitoring
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
|
||||
|
||||
# Enable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
|
||||
|
||||
echo "Waiting for PodMonitor to be created..."
|
||||
sleep 3
|
||||
|
||||
# Add release label to PodMonitor
|
||||
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
|
||||
|
||||
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
|
||||
echo "✓ PostgreSQL monitoring enabled"
|
||||
|
||||
# Disable Prometheus monitoring
|
||||
disable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Disable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
|
||||
|
||||
# Remove namespace label
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
|
||||
|
||||
echo "✓ PostgreSQL monitoring disabled"
|
||||
9
10_Postgres/pgdb-example.yaml
Normal file
9
10_Postgres/pgdb-example.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
instances: 3
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
9
11_storage_tests/foo-pv.yaml
Normal file
9
11_storage_tests/foo-pv.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: foo-pv
|
||||
spec:
|
||||
storageClassName: "longhorn"
|
||||
claimRef:
|
||||
name: foo-pvc
|
||||
namespace: foo
|
||||
0
11_storage_tests/foo-pvc.yaml
Normal file
0
11_storage_tests/foo-pvc.yaml
Normal file
@@ -9,7 +9,7 @@ spec:
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain # Optionally, 'Delete' oder 'Recycle'
|
||||
persistentVolumeReclaimPolicy: Delete # Optionally, 'Delete' oder 'Recycle'
|
||||
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
|
||||
csi:
|
||||
driver: driver.longhorn.io # Der Longhorn CSI-Treiber
|
||||
|
||||
@@ -1,16 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: foo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: longhorn-nginx-pvc
|
||||
namespace: foo
|
||||
spec:
|
||||
storageClassName: longhorn # Die gleiche StorageClass wie im PV
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi # Die angeforderte Größe sollte mit der des PV übereinstimmen
|
||||
# volumeName: longhorn-test-pv # Der Name des PV, das für diesen PVC verwendet werden soll
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: longhorn-demo
|
||||
namespace: default
|
||||
namespace: foo
|
||||
spec:
|
||||
containers:
|
||||
- name: demo-container
|
||||
image: nginx:latest
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
volumeMounts:
|
||||
- mountPath: /usr/share/nginx/html
|
||||
name: longhorn-volume
|
||||
volumes:
|
||||
- name: longhorn-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: longhorn-test-pvc
|
||||
claimName: longhorn-nginx-pvc
|
||||
|
||||
1
12_Authentik/README.md
Normal file
1
12_Authentik/README.md
Normal file
@@ -0,0 +1 @@
|
||||
https://docs.goauthentik.io/install-config/install/kubernetes/#install-authentik-helm-chart
|
||||
10
12_Authentik/authentik-pgdb.yaml
Normal file
10
12_Authentik/authentik-pgdb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: authentik-pgdb
|
||||
namespace: authentik
|
||||
spec:
|
||||
instances: 3
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
21
12_Authentik/authentik-values.gomplate.yaml
Normal file
21
12_Authentik/authentik-values.gomplate.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
authentik:
|
||||
secret_key: "PleaseGenerateASecureKey"
|
||||
# This sends anonymous usage-data, stack traces on errors and
|
||||
# performance data to sentry.io, and is fully opt-in
|
||||
error_reporting:
|
||||
enabled: true
|
||||
postgresql:
|
||||
password: "ThisIsNotASecurePassword"
|
||||
|
||||
server:
|
||||
ingress:
|
||||
# Specify kubernetes ingress controller class name
|
||||
ingressClassName: nginx | traefik | kong
|
||||
enabled: true
|
||||
hosts:
|
||||
- authentik.domain.tld
|
||||
|
||||
postgresql:
|
||||
enabled: true
|
||||
auth:
|
||||
password: "ThisIsNotASecurePassword"
|
||||
28
12_Authentik/justfile
Normal file
28
12_Authentik/justfile
Normal file
@@ -0,0 +1,28 @@
|
||||
set fallback := true
|
||||
|
||||
export AUTHENTIK_NAMESPACE := env("AUTHENTIK_NAMESPACE", "authentik")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
@helm repo add authentik https://charts.goauthentik.io
|
||||
@helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
@helm repo remove authentik
|
||||
|
||||
|
||||
install:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install authentik authentik/authentik \
|
||||
-n ${AUTHENTIK_NAMESPACE} --create-namespace --wait \
|
||||
-f authentik-values.yaml
|
||||
|
||||
|
||||
uninstall:
|
||||
@helm uninstall authentik -n ${AUTHENTIK_NAMESPACE} --wait
|
||||
@kubectl delete namespace ${AUTHENTIK_NAMESPACE} --ignore-not-found
|
||||
@@ -167,7 +167,8 @@ Mit diesen Schritten hast du ein Persistent Volume (PV) und einen Persistent Vol
|
||||
|
||||
|
||||
## Disable Localpath as default
|
||||
```
|
||||
kubectl get storageclass
|
||||
|
||||
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
|
||||
```
|
||||
1
Longhorn/auth
Normal file
1
Longhorn/auth
Normal file
@@ -0,0 +1 @@
|
||||
basti:$apr1$N23gJpBe$CYlDcwTfp8YsQMq0UcADQ0
|
||||
67
Longhorn/justfile
Normal file
67
Longhorn/justfile
Normal file
@@ -0,0 +1,67 @@
|
||||
set fallback:=true
|
||||
|
||||
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
|
||||
export LONGHORN_VERSION := env("LONGHORN_VERSION","1.10.1")
|
||||
|
||||
add-helm-repo:
|
||||
helm repo add longhorn https://charts.longhorn.io --force-update
|
||||
helm repo update
|
||||
|
||||
# Delete namespace
|
||||
delete-namespace:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if kubectl get namespace ${LONGHORN_NAMESPACE} &>/dev/null; then
|
||||
kubectl delete namespace ${LONGHORN_NAMESPACE} --ignore-not-found
|
||||
else
|
||||
echo "Namespace ${LONGHORN_NAMESPACE} does not exist."
|
||||
fi
|
||||
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
just add-helm-repo
|
||||
|
||||
helm upgrade longhorn longhorn/longhorn \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${LONGHORN_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--version ${LONGHORN_VERSION} \
|
||||
--values longhorn-values.yaml
|
||||
|
||||
# remove default storage class annotation from local-path storage class
|
||||
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
for crd in $(kubectl get crd -o name | grep longhorn); do
|
||||
kubectl patch $crd -p '{"metadata":{"finalizers":[]}}' --type=merge
|
||||
done
|
||||
|
||||
kubectl -n ${LONGHORN_NAMESPACE} patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag || true
|
||||
|
||||
helm uninstall longhorn --namespace ${LONGHORN_NAMESPACE} || true
|
||||
just delete-namespace
|
||||
|
||||
|
||||
install-dashboard-ingress:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
echo "Deploying Longhorn Dashboard Ingress with EXTERNAL_DOMAIN=${EXTERNAL_DOMAIN}"
|
||||
gomplate -f longhorn-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f longhorn-ingressroute-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
uninstall-dashboard-ingress:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
kubectl delete -f longhorn-ingressroute-gomplate.yaml || true
|
||||
kubectl delete -f longhorn-certificate-gomplate.yaml || true
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
spec:
|
||||
secretName: longhorn-web-ui-tls
|
||||
dnsNames:
|
||||
- longhorn-dashboard.k8s.schnrbs.work
|
||||
- longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
|
||||
- match: Host(`longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: longhorn-frontend
|
||||
@@ -1,18 +1,6 @@
|
||||
global:
|
||||
nodeSelector:
|
||||
node.longhorn.io/create-default-disk: "true"
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: NodePort
|
||||
nodePort: 30050
|
||||
manager:
|
||||
type: ClusterIP
|
||||
|
||||
# Replica count for the default Longhorn StorageClass.
|
||||
persistence:
|
||||
defaultClass: false
|
||||
defaultFsType: ext4
|
||||
defaultClassReplicaCount: 2
|
||||
reclaimPolicy: Delete
|
||||
|
||||
@@ -25,12 +13,10 @@ csi:
|
||||
|
||||
# Default replica count and storage path
|
||||
defaultSettings:
|
||||
upgradeChecker: false
|
||||
kubernetesClusterAutoscalerEnabled: false
|
||||
allowCollectingLonghornUsageMetrics: false
|
||||
createDefaultDiskLabeledNodes: true
|
||||
defaultReplicaCount: 2
|
||||
defaultDataPath: "/k8s-data"
|
||||
# defaultDataPath: "/k8s-data"
|
||||
|
||||
longhornUI:
|
||||
replicas: 1
|
||||
40
Longhorn/pod_with_pvc.yaml
Normal file
40
Longhorn/pod_with_pvc.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: longhorn-volv-pvc
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: longhorn
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: volume-test
|
||||
namespace: default
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: volume-test
|
||||
image: nginx:stable-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- ls
|
||||
- /data/lost+found
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumeMounts:
|
||||
- name: volv
|
||||
mountPath: /data
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumes:
|
||||
- name: volv
|
||||
persistentVolumeClaim:
|
||||
claimName: longhorn-volv-pvc
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,5 +5,4 @@ metadata:
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
# - 192.168.178.220-192.168.178.225 #pve-82
|
||||
- 192.168.178.160-192.168.178.180 #pve-83
|
||||
- {{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
66
Metallb_Setup/justfile
Normal file
66
Metallb_Setup/justfile
Normal file
@@ -0,0 +1,66 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
METALLB_VERSION="v0.15.3"
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
|
||||
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
|
||||
echo "MetalLB ${METALLB_VERSION} installed successfully."
|
||||
|
||||
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
|
||||
echo "Address pool configured."
|
||||
|
||||
kubectl apply -f advertisement.yaml
|
||||
echo "Advertisement created."
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
|
||||
|
||||
test-deployment:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl apply -f test-deployment.yaml
|
||||
|
||||
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
|
||||
|
||||
echo "To clean up, run 'just test-deployment-cleanup'."
|
||||
|
||||
test-deployment-cleanup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl delete -f test-deployment.yaml
|
||||
echo "Test deployment and service deleted."
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- schnipo.k8s.schnrbs.work
|
||||
- schnipo.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/dishes-deployment.yaml
Normal file
43
Test-Deployment/dishes-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: dishes
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dishes
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
containers:
|
||||
- name: dish-schnipo
|
||||
image: bschnorbus/dish-schnipo
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: dishes
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
@@ -7,10 +7,12 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`schnipo.k8s.schnrbs.work`)
|
||||
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: schnipo
|
||||
port: 8080
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
tls:
|
||||
secretName: schnipo-certificate-secret
|
||||
|
||||
37
Test-Deployment/justfile
Normal file
37
Test-Deployment/justfile
Normal file
@@ -0,0 +1,37 @@
|
||||
set fallback:=true
|
||||
|
||||
export EXTERNAL := env("EXTERNAL_DOMAIN", "")
|
||||
|
||||
install-nginx:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
if [ -z "${EXTERNAL}" ]; then
|
||||
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl apply -f nginx-deployment.yaml
|
||||
gomplate -f nginx-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f nginx-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
install-dishes:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
if [ -z "${EXTERNAL}" ]; then
|
||||
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl apply -f dishes-deployment.yaml
|
||||
gomplate -f dishes-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f dishes-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
remove-nginx:
|
||||
kubectl delete ns test || true
|
||||
|
||||
remove-dishes:
|
||||
kubectl delete ns dishes || true
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/nginx-deployment.yaml
Normal file
43
Test-Deployment/nginx-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`nginx-test.k8s.schnrbs.work`)
|
||||
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: nginx
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: nginx-test.k8s.schnrbs.work
|
||||
- host: nginx-test.int.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -19,5 +19,5 @@ spec:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.int.schnrbs.work
|
||||
secretName: nginx-certificate-secret
|
||||
@@ -24,13 +24,15 @@ i.e. general issuer for all namespaces in cluster.
|
||||
|
||||
|
||||
## Test Deployment
|
||||
```
|
||||
k create ns test
|
||||
kubectl create deploy nginx --image=nginx -n test
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
k scale --replicas=3 deployment/nginx -n test
|
||||
|
||||
```
|
||||
|
||||
## Install Traefik & Cert-Manager
|
||||
```
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
@@ -40,23 +42,25 @@ helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||
|
||||
|
||||
k apply cert-manager-issuer-secret.yaml
|
||||
k apply -f cert-manager-issuer-secret.yaml
|
||||
k get secret -n cert-manager
|
||||
|
||||
k apply -f cert-manager-cluster-issuer.yaml
|
||||
```
|
||||
|
||||
|
||||
## Switch Test Deployment to https
|
||||
|
||||
```
|
||||
k apply -f test/nginx-certificate.yaml
|
||||
k apply -f test/nginx-ingress.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Troubleshooting steps
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get po -n test -o wide
|
||||
k create svc -n test clusterip nginx
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
@@ -70,12 +74,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
|
||||
k get svc -n test
|
||||
k get ingress
|
||||
k get ingress -n test
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get svc ingressRoute
|
||||
k get svc ingressRoutes
|
||||
k get svc ingressroutes.traefik.io
|
||||
@@ -90,3 +93,4 @@ k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
|
||||
k get secret
|
||||
k get secrets
|
||||
k get clusterissuers.cert-manager.io
|
||||
```
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: cloudflare-cluster-issuer
|
||||
spec:
|
||||
acme:
|
||||
email: hello@schnorbus.net
|
||||
email: {{ .Env.ACME_EMAIL }}
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: cloudflare-acme-key
|
||||
@@ -5,4 +5,4 @@ metadata:
|
||||
namespace: cert-manager
|
||||
type: Opaque
|
||||
stringData:
|
||||
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
|
||||
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}
|
||||
62
Traefik/justfile
Normal file
62
Traefik/justfile
Normal file
@@ -0,0 +1,62 @@
|
||||
set fallback:=true
|
||||
|
||||
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
|
||||
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
|
||||
|
||||
add-helm-repos:
|
||||
helm repo add traefik https://helm.traefik.io/traefik --force-update
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm repo update
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
just add-helm-repos
|
||||
|
||||
helm upgrade traefik traefik/traefik \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${TRAEFIK_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values traefik-values.yaml
|
||||
|
||||
helm upgrade cert-manager jetstack/cert-manager \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${CERT_MANAGER_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values cert-manager-values.yaml
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
|
||||
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
|
||||
|
||||
setup-cluster-issuer:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
# Get status of cert-manager components
|
||||
status:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "=== cert-manager Components Status ==="
|
||||
echo ""
|
||||
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
|
||||
echo ""
|
||||
echo "Pods:"
|
||||
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "Services:"
|
||||
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "CRDs:"
|
||||
kubectl get crd | grep cert-manager.io
|
||||
@@ -11,5 +11,5 @@ ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
entryPoints: [web, websecure]
|
||||
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
|
||||
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)
|
||||
|
||||
11
env/env.local.gomplate
vendored
Normal file
11
env/env.local.gomplate
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# shellcheck disable=all
|
||||
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
|
||||
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
||||
SERVER_IP={{ .Env.SERVER_IP }}
|
||||
AGENT_IP={{ .Env.AGENT_IP }}
|
||||
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
|
||||
ACME_EMAIL={{ .Env.ACME_EMAIL}}
|
||||
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
|
||||
VAULT_HOST={{ .Env.VAULT_HOST }}
|
||||
AUTHENTIK_HOST={{ .Env.AUTHENTIK_HOST }}
|
||||
144
env/justfile
vendored
Normal file
144
env/justfile
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
set fallback := true
|
||||
|
||||
export ENV_FILE := ".env.local"
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export SERVER_IP := env("SERVER_IP", "")
|
||||
export AGENT_IP := env("AGENT_IP", "")
|
||||
|
||||
check:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -z "${K8S_CONTEXT}" ]; then
|
||||
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
|
||||
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SERVER_IP}" ]; then
|
||||
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${AGENT_IP}" ]; then
|
||||
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
setup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -f ../.env.local ]; then
|
||||
echo ".env.local already exists." >&2
|
||||
if gum confirm "Do you want to overwrite it?"; then
|
||||
K8S_CONTEXT=""
|
||||
SERVER_IP=""
|
||||
AGENT_IP=""
|
||||
elif [[ $? -eq 130 ]]; then
|
||||
echo "Setup cancelled by user." >&2
|
||||
exit 1
|
||||
else
|
||||
echo "Aborting setup." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
while [ -z "${K8S_CONTEXT}" ]; do
|
||||
if ! K8S_CONTEXT=$(
|
||||
gum input --prompt="Context name: " \
|
||||
--width=100 --placeholder="context"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
|
||||
if ! K8S_MASTER_NODE_NAME=$(
|
||||
gum input --prompt="Master Node Hostname: " \
|
||||
--width=100 --placeholder="Master Node Name"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${SERVER_IP}" ]; do
|
||||
if ! SERVER_IP=$(
|
||||
gum input --prompt="IP of Server/Master Node: " \
|
||||
--width=100 --placeholder="Master Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AGENT_IP}" ]; do
|
||||
if ! AGENT_IP=$(
|
||||
gum input --prompt="IP of Agent Node: " \
|
||||
--width=100 --placeholder="Agent Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
|
||||
if ! METALLB_ADDRESS_RANGE=$(
|
||||
gum input --prompt="IP Range for LoadBalancer: " \
|
||||
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
|
||||
if ! CLOUDFLARE_API_TOKEN=$(
|
||||
gum input --prompt="Cloudflare API Token: " \
|
||||
--width=100 --placeholder="API Token" --password
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${ACME_EMAIL}" ]; do
|
||||
if ! ACME_EMAIL=$(
|
||||
gum input --prompt="ACME Email for Cert-Manager: " \
|
||||
--width=100 --placeholder="Email"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${EXTERNAL_DOMAIN}" ]; do
|
||||
if ! EXTERNAL_DOMAIN=$(
|
||||
gum input --prompt="External Domain: " \
|
||||
--width=100 --placeholder="Domain"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${VAULT_HOST}" ]; do
|
||||
if ! VAULT_HOST=$(
|
||||
gum input --prompt="Vault hostname: " \
|
||||
--width=100 --placeholder="vault"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AUTHENTIK_HOST}" ]; do
|
||||
if ! AUTHENTIK_HOST=$(
|
||||
gum input --prompt="Authentik hostname: " \
|
||||
--width=100 --placeholder="authentik"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Generating .env.local file..."
|
||||
rm -f ../.env.local
|
||||
gomplate -f env.local.gomplate -o ../.env.local
|
||||
@@ -14,3 +14,4 @@ flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner b
|
||||
|
||||
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
||||
|
||||
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."
|
||||
17
justfile
Normal file
17
justfile
Normal file
@@ -0,0 +1,17 @@
|
||||
set dotenv-filename := ".env.local"
|
||||
|
||||
export PATH := "./node_modules/.bin:" + env_var('PATH')
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
mod env
|
||||
mod BasicSetup '01_Basic_Setup'
|
||||
mod MetalLbSetup 'Metallb_Setup'
|
||||
mod Traefik
|
||||
mod Longhorn
|
||||
mod Vault '08_Vault'
|
||||
mod ExternalSecrets '09_ExternalSecrets'
|
||||
mod Postgres '10_Postgres'
|
||||
mod KubePrometheusStack '07_KubePrometheusStack'
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-web-ui
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: longhorn.k8s.internal.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: longhorn-frontend
|
||||
port:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- longhorn.k8s.internal.schnrbs.work
|
||||
secretName: longhorn-web-ui-tls
|
||||
|
||||
Reference in New Issue
Block a user