Compare commits
46 Commits
f58fad216a
...
wip/VPA
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9363e38267 | ||
|
|
7e47ce2787 | ||
|
|
2c23ac85ce | ||
|
|
c6d2b3de3c | ||
|
|
a5d220418e | ||
|
|
3226e527f4 | ||
|
|
71348ad7f5 | ||
|
|
40eae4f567 | ||
|
|
e7f648cf57 | ||
|
|
dce92aeb28 | ||
|
|
07e4ae31e3 | ||
|
|
5e86aafa09 | ||
|
|
4444296443 | ||
|
|
9aafb940e9 | ||
|
|
4075203b1e | ||
|
|
92decafc3f | ||
|
|
09e1bbbc52 | ||
|
|
48d930fedc | ||
|
|
1f82ce8d02 | ||
|
|
a551f2e4ca | ||
|
|
a80dce42b0 | ||
|
|
63243c6d2e | ||
|
|
1f9f7e275c | ||
|
|
09026d6812 | ||
|
|
24991fce90 | ||
|
|
65a59d2d0c | ||
|
|
85fb620e39 | ||
|
|
b56e02d2ed | ||
|
|
15cb2ce903 | ||
|
|
b47fe8f66b | ||
|
|
c5810661e5 | ||
|
|
7ddc08d622 | ||
|
|
c5aa7f8105 | ||
|
|
0c6cfedcde | ||
| 2be83a977a | |||
|
|
4f5a18c84c | ||
|
|
7a54346331 | ||
|
|
5abc0de38a | ||
|
|
29674ae504 | ||
|
|
6abe5d1a8f | ||
|
|
67a6c414f2 | ||
|
|
08212c26a6 | ||
|
|
e4adbfd0b2 | ||
|
|
d7db562a23 | ||
|
|
7896130d05 | ||
|
|
efcb4ee172 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env.local
|
||||
@@ -34,4 +34,30 @@ Rancher Installation
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
|
||||
|
||||
# Prevent scheduling on master (optional)
|
||||
|
||||
|
||||
```
|
||||
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
|
||||
```
|
||||
|
||||
# Just Setup // K3sup
|
||||
|
||||
export SERVER_IP=192.168.178.45
|
||||
export AGENT_IP=192.168.178.75
|
||||
export USER=basti
|
||||
|
||||
|
||||
k3sup install \
|
||||
--cluster \
|
||||
--ip 192.168.178.45 \
|
||||
--user $USER \
|
||||
--merge \
|
||||
--local-path $HOME/.kube/config \
|
||||
--context my-k3s
|
||||
|
||||
k3sup join \
|
||||
--ip $AGENT_IP \
|
||||
--server-ip $SERVER_IP \
|
||||
--user $USER
|
||||
|
||||
|
||||
148
01_Basic_Setup/justfile
Normal file
148
01_Basic_Setup/justfile
Normal file
@@ -0,0 +1,148 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
|
||||
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
||||
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
||||
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
||||
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
kubeconfig=""
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
kubeconfig=$(
|
||||
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
|
||||
)
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
args=(
|
||||
"install"
|
||||
"--context" "${context}"
|
||||
"--host" "${K8S_MASTER_NODE_NAME}"
|
||||
"--user" "${username}"
|
||||
"--no-extras" #
|
||||
)
|
||||
|
||||
if [ -n "${kubeconfig}" ]; then
|
||||
mkdir -p "$(dirname "${kubeconfig}")"
|
||||
args+=("--local-path" "${kubeconfig}" "--merge")
|
||||
fi
|
||||
echo "Running: k3sup ${args[@]}"
|
||||
k3sup "${args[@]}"
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
|
||||
echo "Setting up local Docker registry..."
|
||||
|
||||
# Deploy Docker registry to cluster
|
||||
kubectl apply -f ./registry/registry.yaml
|
||||
|
||||
# Set Pod Security Standard for registry namespace
|
||||
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Wait for registry deployment
|
||||
echo "Waiting for registry to be ready..."
|
||||
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
|
||||
|
||||
# Configure registries.yaml for k3s
|
||||
just configure-registry
|
||||
|
||||
echo "✓ Local Docker registry deployed and configured"
|
||||
echo ""
|
||||
echo "Registry accessible at:"
|
||||
echo " localhost:30500"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
|
||||
echo " docker build -t localhost:30500/myapp:latest ."
|
||||
echo " docker push localhost:30500/myapp:latest"
|
||||
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
|
||||
fi
|
||||
|
||||
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
|
||||
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
|
||||
|
||||
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
|
||||
echo "Removing Agent node at ${AGENT_IP}..."
|
||||
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
|
||||
fi
|
||||
|
||||
echo "Removing content of Server node..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
|
||||
echo "Cleaning up kubeconfig entries..."
|
||||
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
|
||||
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
|
||||
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
|
||||
kubectl config delete-context "${K8S_CONTEXT}"
|
||||
echo "Deleted context: ${K8S_CONTEXT}"
|
||||
fi
|
||||
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
|
||||
kubectl config delete-cluster "${cluster_name}"
|
||||
echo "Deleted cluster: ${cluster_name}"
|
||||
fi
|
||||
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
|
||||
kubectl config delete-user "${user_name}"
|
||||
echo "Deleted user: ${user_name}"
|
||||
fi
|
||||
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
|
||||
else
|
||||
echo "Uninstallation cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
add-agent:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
|
||||
|
||||
args=(
|
||||
"join"
|
||||
"--ip" "${new_agent_ip}"
|
||||
"--server-ip" "${SERVER_IP}"
|
||||
"--user" "${username}"
|
||||
)
|
||||
|
||||
|
||||
echo "Running: k3sup ${args[*]}"
|
||||
k3sup "${args[@]}"
|
||||
echo "Agent node at ${new_agent_ip} added to cluster."
|
||||
|
||||
# Configure k3s to use local registry
|
||||
configure-registry:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Configuring k3s registries.yaml..."
|
||||
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
|
||||
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
|
||||
|
||||
echo "Restarting k3s to apply registry configuration..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
|
||||
echo "✓ Registry configuration applied"
|
||||
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
configs:
|
||||
"localhost:30500":
|
||||
tls:
|
||||
insecure_skip_verify: true
|
||||
109
01_Basic_Setup/registry/registry.yaml
Normal file
109
01_Basic_Setup/registry/registry.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
fsGroup: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: http
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 20Gi
|
||||
env:
|
||||
- name: REGISTRY_STORAGE_DELETE_ENABLED
|
||||
value: "true"
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: "0.0.0.0:5000"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- name: registry-data
|
||||
mountPath: /var/lib/registry
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: registry-data
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
name: http
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry-nodeport
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
nodePort: 30500
|
||||
name: http
|
||||
type: NodePort
|
||||
12
07_KubePrometheusStack/grafana-certificate.gomplate.yaml
Normal file
12
07_KubePrometheusStack/grafana-certificate.gomplate.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: grafana-ingress-certificate
|
||||
namespace: {{.Env.PROMETHEUS_NAMESPACE}}
|
||||
spec:
|
||||
secretName: grafana-certificate-secret
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- {{.Env.GRAFANA_HOST}}
|
||||
38
07_KubePrometheusStack/justfile
Normal file
38
07_KubePrometheusStack/justfile
Normal file
@@ -0,0 +1,38 @@
|
||||
set fallback := true
|
||||
|
||||
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring")
|
||||
export GRAFANA_HOST := env("GRAFANA_HOST", "")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
|
||||
add-helm-repo:
|
||||
@helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
@helm repo update
|
||||
|
||||
|
||||
install:
|
||||
just add-helm-repo
|
||||
|
||||
gomplate -f kube-stack-config-values.gomplate.yaml -o kube-stack-config-values.yaml
|
||||
|
||||
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
|
||||
--namespace ${PROMETHEUS_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--wait \
|
||||
-f kube-stack-config-values.yaml
|
||||
|
||||
echo "kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n ${PROMETHEUS_NAMESPACE}"
|
||||
echo "kubectl port-forward svc/kube-prometheus-stack-prometheus 9090 -n ${PROMETHEUS_NAMESPACE}"
|
||||
echo "kubectl port-forward svc/kube-prometheus-stack-alertmanager 9093 -n ${PROMETHEUS_NAMESPACE}"
|
||||
|
||||
echo "Get Grafana Password:"
|
||||
echo "kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath=\"{.items[0].data.admin-password}\" | base64 --decode ; echo"
|
||||
|
||||
gomplate -f ./grafana-certificate.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
|
||||
uninstall:
|
||||
helm uninstall kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE}
|
||||
@@ -0,0 +1,23 @@
|
||||
grafana:
|
||||
enabled: true
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: traefik
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
hosts:
|
||||
- {{ .Env.GRAFANA_HOST }}
|
||||
tls:
|
||||
- secretName: grafana-certificate-secret
|
||||
- hosts:
|
||||
- {{ .Env.GRAFANA_HOST }}
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
traefik.ingress.kubernetes.io/router.tls.certresolver: "" # empty = use secretName, not its own resolver
|
||||
|
||||
grafana.ini:
|
||||
server:
|
||||
domain: {{ .Env.GRAFANA_HOST }}
|
||||
root_url: https://{{ .Env.GRAFANA_HOST }}
|
||||
serve_from_sub_path: false
|
||||
265
08_Vault/README.md
Normal file
265
08_Vault/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
# Helm
|
||||
|
||||
## Installation
|
||||
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||
|
||||
helm install vault hashicorp/vault \
|
||||
--set='server.dev.enabled=true' \
|
||||
--set='ui.enabled=true' \
|
||||
--set='ui.serviceType=LoadBalancer' \
|
||||
--namespace vault \
|
||||
--create-namespace
|
||||
|
||||
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
|
||||
|
||||
|
||||
## Output
|
||||
```
|
||||
$ kubectl get all -n vault
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/vault-0 1/1 Running 0 2m39s
|
||||
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
|
||||
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
|
||||
|
||||
NAME READY AGE
|
||||
statefulset.apps/vault 1/1 2m40s
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
## Enter Pod
|
||||
|
||||
kubectl exec -it vault-0 -n vault -- /bin/sh
|
||||
|
||||
## Create policy
|
||||
```
|
||||
cat <<EOF > /home/vault/read-policy.hcl
|
||||
path "secret*" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
## Apply
|
||||
|
||||
```
|
||||
vault policy write read-policy /home/vault/read-policy.hcl
|
||||
```
|
||||
|
||||
## Enable Kubernetes
|
||||
```
|
||||
vault auth enable kubernetes
|
||||
```
|
||||
|
||||
## Configure Kubernetes Auth
|
||||
|
||||
Configure to communicate with API server
|
||||
```
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
|
||||
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
|
||||
```
|
||||
|
||||
## Create a Role
|
||||
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
|
||||
|
||||
```
|
||||
vault write auth/kubernetes/role/vault-role \
|
||||
bound_service_account_names=vault-serviceaccount \
|
||||
bound_service_account_namespaces=vault \
|
||||
policies=read-policy \
|
||||
ttl=1h
|
||||
```
|
||||
|
||||
# Create Secrets
|
||||
|
||||
## Via CLI
|
||||
|
||||
```
|
||||
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
|
||||
```
|
||||
|
||||
## Via UI
|
||||
|
||||
Now you can login to vault using the Token method, initially use Token=`root` to login.
|
||||
|
||||
|
||||
# Accessing Secrets in Pods
|
||||
|
||||
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
|
||||
|
||||
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
|
||||
|
||||
First, let’s create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
|
||||
|
||||
Apply the above manifest using the below command
|
||||
```
|
||||
kubectl apply -f vault-sa.yaml -n vault
|
||||
```
|
||||
|
||||
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
|
||||
|
||||
|
||||
```
|
||||
kubectl apply -f vault-secret-test-deploy.yaml -n vault
|
||||
```
|
||||
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
|
||||
|
||||
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
|
||||
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
|
||||
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
|
||||
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
|
||||
|
||||
Use the below command to check the vault secrets from the pod volume
|
||||
```
|
||||
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
----
|
||||
|
||||
|
||||
Wenn du Kubernetes mit Vault konfiguriert hast, ermöglichst du eine **sichere Integration zwischen deinem Kubernetes-Cluster und HashiCorp Vault**. Hier sind die wichtigsten Szenarien und Vorteile:
|
||||
|
||||
## Hauptfunktionen
|
||||
|
||||
### 1. **Automatische Pod-Authentifizierung**
|
||||
Pods können sich automatisch bei Vault authentifizieren, ohne dass du Credentials manuell verteilen musst. Vault nutzt Kubernetes Service Accounts zur Identitätsverifizierung.
|
||||
|
||||
### 2. **Dynamische Secrets für Anwendungen**
|
||||
Anwendungen können zur Laufzeit Secrets von Vault abrufen, statt sie in ConfigMaps oder Kubernetes Secrets zu speichern.
|
||||
|
||||
## Praktische Szenarien
|
||||
|
||||
### **Szenario 1: Vault Agent Sidecar Injection**
|
||||
Vault injiziert automatisch einen Sidecar-Container, der Secrets abruft und für deine App bereitstellt:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/role: "myapp"
|
||||
vault.hashicorp.com/agent-inject-secret-database: "database/creds/myapp-role"
|
||||
spec:
|
||||
serviceAccountName: myapp
|
||||
containers:
|
||||
- name: app
|
||||
image: myapp:latest
|
||||
```
|
||||
|
||||
**Ergebnis:** Datenbank-Credentials werden automatisch in `/vault/secrets/database` bereitgestellt.
|
||||
|
||||
### **Szenario 2: Dynamische Datenbank-Credentials**
|
||||
Statt statische DB-Passwörter zu verwenden, generiert Vault temporäre Credentials:
|
||||
|
||||
- Jeder Pod bekommt eigene DB-Credentials
|
||||
- Credentials sind zeitlich begrenzt (z.B. 24h)
|
||||
- Automatische Rotation
|
||||
- Einfaches Widerrufen bei Kompromittierung
|
||||
|
||||
### **Szenario 3: Externe Secrets Operator (ESO)**
|
||||
Secrets werden als native Kubernetes Secrets synchronisiert:
|
||||
|
||||
```yaml
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: SecretStore
|
||||
metadata:
|
||||
name: vault-backend
|
||||
spec:
|
||||
provider:
|
||||
vault:
|
||||
server: "https://vault.test.k8s.schnrbs.work"
|
||||
path: "secret"
|
||||
auth:
|
||||
kubernetes:
|
||||
mountPath: "kubernetes"
|
||||
role: "myapp"
|
||||
```
|
||||
|
||||
### **Szenario 4: Verschlüsselung als Service**
|
||||
Anwendungen können Vault's Transit Engine nutzen:
|
||||
|
||||
```bash
|
||||
# Daten verschlüsseln ohne den Key zu kennen
|
||||
vault write transit/encrypt/my-key plaintext=$(base64 <<< "sensitive data")
|
||||
|
||||
# Daten entschlüsseln
|
||||
vault write transit/decrypt/my-key ciphertext="vault:v1:abc..."
|
||||
```
|
||||
|
||||
### **Szenario 5: PKI/Zertifikats-Management**
|
||||
Automatische Ausstellung von TLS-Zertifikaten für Service-to-Service-Kommunikation:
|
||||
|
||||
- Kurzlebige Zertifikate (z.B. 1h)
|
||||
- Automatische Rotation
|
||||
- Zero-Trust-Netzwerk
|
||||
|
||||
### **Szenario 6: Multi-Tenancy**
|
||||
Verschiedene Namespaces/Teams haben isolierten Zugriff:
|
||||
|
||||
```bash
|
||||
# Team A darf nur auf secret/team-a/* zugreifen
|
||||
# Team B darf nur auf secret/team-b/* zugreifen
|
||||
```
|
||||
|
||||
## Vorteile gegenüber Kubernetes Secrets
|
||||
|
||||
| Aspekt | Kubernetes Secrets | Vault Integration |
|
||||
|--------|-------------------|-------------------|
|
||||
| Verschlüsselung at rest | Optional, etcd-Ebene | Immer, zusätzlich verschlüsselt |
|
||||
| Secret Rotation | Manuell | Automatisch/dynamisch |
|
||||
| Audit Log | Begrenzt | Detailliert für jeden Zugriff |
|
||||
| Dynamische Secrets | Nein | Ja (DB, Cloud, etc.) |
|
||||
| Granulare Policies | Begrenzt | Sehr feinkörnig |
|
||||
| Encryption-as-a-Service | Nein | Ja |
|
||||
|
||||
## Typischer Workflow nach der Konfiguration
|
||||
|
||||
1. **Policy erstellen:** Definiere, wer auf welche Secrets zugreifen darf
|
||||
2. **Role erstellen:** Verknüpfe Kubernetes Service Accounts mit Vault Policies
|
||||
3. **Secrets bereitstellen:** Nutze Vault Agent Injection oder CSI Driver
|
||||
4. **Anwendung deployen:** Pods authentifizieren sich automatisch
|
||||
|
||||
## Best Practice Setup
|
||||
|
||||
Nach der Kubernetes Auth-Aktivierung solltest du:
|
||||
|
||||
```bash
|
||||
# 1. Policy erstellen
|
||||
vault policy write myapp - <<EOF
|
||||
path "secret/data/myapp/*" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOF
|
||||
|
||||
# 2. Role erstellen
|
||||
vault write auth/kubernetes/role/myapp \
|
||||
bound_service_account_names=myapp \
|
||||
bound_service_account_namespaces=production \
|
||||
policies=myapp \
|
||||
ttl=1h
|
||||
|
||||
# 3. Service Account in K8s erstellen
|
||||
kubectl create serviceaccount myapp -n production
|
||||
```
|
||||
|
||||
Möchtest du ein spezifisches Szenario genauer erkunden oder brauchst du Hilfe bei der Konfiguration eines bestimmten Use Cases?
|
||||
8
08_Vault/auth-token-secret.yaml
Normal file
8
08_Vault/auth-token-secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: vault-auth-token
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: vault-auth
|
||||
type: kubernetes.io/service-account-token
|
||||
|
||||
126
08_Vault/justfile
Normal file
126
08_Vault/justfile
Normal file
@@ -0,0 +1,126 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export VAULT_CHART_VERSION := env("VAULT_CHART_VERSION", "0.31.0")
|
||||
export VAULT_HOST := env("VAULT_HOST", "")
|
||||
export VAULT_ADDR := "https://" + VAULT_HOST
|
||||
export VAULT_DEBUG := env("VAULT_DEBUG", "false")
|
||||
SECRET_PATH := "secret"
|
||||
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||
helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
helm repo remove hashicorp
|
||||
|
||||
|
||||
# Create Vault namespace
|
||||
create-namespace:
|
||||
@kubectl get namespace ${K8S_VAULT_NAMESPACE} > /dev/null || kubectl create namespace ${K8S_VAULT_NAMESPACE}
|
||||
|
||||
# Delete Vault namespace
|
||||
delete-namespace:
|
||||
@kubectl delete namespace ${K8S_VAULT_NAMESPACE} --ignore-not-found
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
just create-namespace
|
||||
just add-helm-repo
|
||||
|
||||
gomplate -f vault-values.gomplate.yaml -o vault-values.yaml
|
||||
|
||||
helm upgrade \
|
||||
--cleanup-on-fail \
|
||||
--install \
|
||||
vault \
|
||||
hashicorp/vault \
|
||||
--namespace ${K8S_VAULT_NAMESPACE} \
|
||||
--wait \
|
||||
-f vault-values.yaml
|
||||
|
||||
kubectl wait pod --for=condition=PodReadyToStartContainers \
|
||||
-n ${K8S_VAULT_NAMESPACE} vault-0 --timeout=5m
|
||||
|
||||
# Wait for Vault service to be ready to accept connections
|
||||
echo "Waiting for Vault service to be ready..."
|
||||
for i in {1..30}; do
|
||||
if kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault status 2>&1 | grep -qE "(Initialized|Sealed)"; then
|
||||
echo "✓ Vault service is ready"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo "Error: Timeout waiting for Vault service to be ready"
|
||||
exit 1
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
|
||||
init_output=$(kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault operator init -key-shares=1 -key-threshold=1 -format=json || true)
|
||||
|
||||
root_token=""
|
||||
if echo "${init_output}" | grep -q "Vault is already initialized"; then
|
||||
echo "Vault is already initialized"
|
||||
while [ -z "${root_token}" ]; do
|
||||
root_token=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
else
|
||||
unseal_key=$(echo "${init_output}" | jq -r '.unseal_keys_b64[0]')
|
||||
root_token=$(echo "${init_output}" | jq -r '.root_token')
|
||||
kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||
vault operator unseal "${unseal_key}"
|
||||
echo "Vault initialized and unsealed successfully"
|
||||
echo "Root Token: ${root_token}"
|
||||
echo "Unseal Key: ${unseal_key}"
|
||||
echo "Please save these credentials securely!"
|
||||
fi
|
||||
|
||||
# Wait for all vault instances to pass readiness checks and be ready to serve requests
|
||||
kubectl wait pod --for=condition=ready -n ${K8S_VAULT_NAMESPACE} \
|
||||
-l app.kubernetes.io/name=vault --timeout=5m
|
||||
|
||||
just setup-kubernetes-auth "${root_token}"
|
||||
|
||||
|
||||
# Uninstall Vault
|
||||
uninstall delete-ns='false':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
helm uninstall vault -n ${K8S_VAULT_NAMESPACE} --ignore-not-found --wait
|
||||
just delete-namespace
|
||||
|
||||
|
||||
# Setup Kubernetes authentication
|
||||
setup-kubernetes-auth root_token='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
export VAULT_TOKEN="{{ root_token }}"
|
||||
while [ -z "${VAULT_TOKEN}" ]; do
|
||||
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
|
||||
gomplate -f ./serviceaccount.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||
gomplate -f ./rolebinding.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||
kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f ./auth-token-secret.yaml
|
||||
|
||||
SA_SECRET="vault-auth-token"
|
||||
SA_JWT=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
|
||||
SA_CA=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.ca\.crt}' | base64 --decode)
|
||||
|
||||
vault auth list -format=json | jq -e '.["kubernetes/"]' >/dev/null 2>&1 || \
|
||||
vault auth enable kubernetes
|
||||
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="${SA_JWT}" \
|
||||
kubernetes_host="https://kubernetes.default.svc" \
|
||||
kubernetes_ca_cert="${SA_CA}"
|
||||
12
08_Vault/rolebinding.gomplate.yaml
Normal file
12
08_Vault/rolebinding.gomplate.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: vault-auth-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vault-auth
|
||||
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vault-auth
|
||||
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||
6
08_Vault/vault-sa.yaml
Normal file
6
08_Vault/vault-sa.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vault-serviceaccount
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vault-test
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: read-vault-secret
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-inject-status: "update"
|
||||
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
|
||||
vault.hashicorp.com/agent-inject-template-login: |
|
||||
{{- with secret "secret/login" -}}
|
||||
pattoken={{ .Data.data.pattoken }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
|
||||
vault.hashicorp.com/agent-inject-template-my-first-secret: |
|
||||
{{- with secret "secret/my-first-secret" -}}
|
||||
username={{ .Data.data.username }}
|
||||
password={{ .Data.data.password }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/role: "vault-role"
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
serviceAccountName: vault-serviceaccount
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
16
08_Vault/vault-values.gomplate.yaml
Normal file
16
08_Vault/vault-values.gomplate.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
server:
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
ingressClassName: traefik
|
||||
hosts:
|
||||
- host: {{ .Env.VAULT_HOST }}
|
||||
paths:
|
||||
- /
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ .Env.VAULT_HOST }}
|
||||
dataStorage:
|
||||
storageClass: longhorn
|
||||
16
08_Vault/vault-values.yaml
Normal file
16
08_Vault/vault-values.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
server:
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
ingressClassName: traefik
|
||||
hosts:
|
||||
- host: vault.test.k8s.schnrbs.work
|
||||
paths:
|
||||
- /
|
||||
tls:
|
||||
- hosts:
|
||||
- vault.test.k8s.schnrbs.work
|
||||
dataStorage:
|
||||
storageClass: longhorn
|
||||
51
09_ExternalSecrets/external-secrets-values.yaml
Normal file
51
09_ExternalSecrets/external-secrets-values.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
# External Secrets Operator resource configuration
|
||||
# Based on Goldilocks recommendations (Burstable QoS)
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
# Main controller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 192Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
|
||||
certController:
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 192Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
|
||||
webhook:
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
resources:
|
||||
requests:
|
||||
cpu: 15m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
65
09_ExternalSecrets/justfile
Normal file
65
09_ExternalSecrets/justfile
Normal file
@@ -0,0 +1,65 @@
|
||||
set fallback := true
|
||||
|
||||
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
|
||||
export EXTERNAL_SECRETS_CHART_VERSION := env("EXTERNAL_SECRETS_CHART_VERSION", "1.1.0")
|
||||
export EXTERNAL_SECRETS_REFRESH_INTERVAL := env("EXTERNAL_SECRETS_REFRESH_INTERVAL", "1800")
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export VAULT_HOST := env("VAULT_HOST", "")
|
||||
export VAULT_ADDR := "https://" + VAULT_HOST
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
helm repo add external-secrets https://charts.external-secrets.io
|
||||
helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
helm repo remove external-secrets
|
||||
|
||||
# Install External Secrets
|
||||
install:
|
||||
just add-helm-repo
|
||||
helm upgrade --cleanup-on-fail \
|
||||
--install external-secrets external-secrets/external-secrets \
|
||||
--version ${EXTERNAL_SECRETS_CHART_VERSION} -n ${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
--create-namespace --wait \
|
||||
-f external-secrets-values.yaml
|
||||
|
||||
kubectl label namespace ${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
just create-external-secrets-role
|
||||
just create-vault-secret-store
|
||||
|
||||
# Uninstall External Secrets
|
||||
uninstall:
|
||||
just delete-vault-secret-store
|
||||
helm uninstall external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} --wait
|
||||
kubectl delete namespace ${EXTERNAL_SECRETS_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Create Vault Secret Store for External Secrets
|
||||
create-vault-secret-store:
|
||||
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
# Delete Vault Secret Store for External Secrets
|
||||
delete-vault-secret-store:
|
||||
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl delete --ignore-not-found -f -
|
||||
|
||||
# Create Vault role for External Secrets
|
||||
create-external-secrets-role root_token='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
export VAULT_TOKEN="{{ root_token }}"
|
||||
while [ -z "${VAULT_TOKEN}" ]; do
|
||||
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||
done
|
||||
vault write auth/kubernetes/role/external-secrets \
|
||||
bound_service_account_names=external-secrets \
|
||||
bound_service_account_namespaces=${EXTERNAL_SECRETS_NAMESPACE} \
|
||||
audience=vault \
|
||||
policies=admin \
|
||||
ttl=1h
|
||||
22
09_ExternalSecrets/vault-secret-store.gomplate.yaml
Normal file
22
09_ExternalSecrets/vault-secret-store.gomplate.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ClusterSecretStore
|
||||
metadata:
|
||||
name: vault-secret-store
|
||||
spec:
|
||||
provider:
|
||||
vault:
|
||||
server: http://vault.{{ .Env.K8S_VAULT_NAMESPACE }}:8200
|
||||
path: secret
|
||||
version: v2
|
||||
auth:
|
||||
kubernetes:
|
||||
role: external-secrets
|
||||
mountPath: kubernetes
|
||||
serviceAccountRef:
|
||||
name: external-secrets
|
||||
namespace: {{ .Env.EXTERNAL_SECRETS_NAMESPACE }}
|
||||
# Audience must match the audience configured in Vault Kubernetes auth role
|
||||
# Required for Vault 1.21+ compatibility
|
||||
audiences:
|
||||
- vault
|
||||
refreshInterval: {{ .Env.EXTERNAL_SECRETS_REFRESH_INTERVAL }}
|
||||
@@ -46,6 +46,13 @@ data:
|
||||
url: http://pi.hole
|
||||
version: 6
|
||||
key: 5ipI9bvB
|
||||
- Paperless NGX:
|
||||
icon: paperless-ng.png
|
||||
href: https://ppl.homeee.schnorbus.net
|
||||
widgets:
|
||||
- type: paperlessngx
|
||||
url: https://ppl.homeee.schnorbus.net
|
||||
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
|
||||
|
||||
- My Second Group:
|
||||
@@ -61,24 +68,33 @@ data:
|
||||
icon: proxmox.png
|
||||
href: https://pve-83.fritz.box:8006
|
||||
description: Homepage is the best
|
||||
widgets:
|
||||
- type: proxmox
|
||||
url: https://pve-83.fritz.box:8006
|
||||
username: homepage_api@pam!homepage_api
|
||||
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871
|
||||
# widgets:
|
||||
# - type: proxmox
|
||||
# url: https://pve-83.fritz.box:8006
|
||||
# username: homepage_api@pam!homepage_api
|
||||
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
- Longhorn:
|
||||
icon: longhorn.png
|
||||
href: https://longhorn-dashboard.k8s.schnrbs.work
|
||||
description: Longhorn volume provisioning
|
||||
|
||||
- Party Time:
|
||||
- Immich:
|
||||
icon: immich.png
|
||||
href: https://immich.homeee.schnorbus.net
|
||||
description: Immich is awesome
|
||||
widgets:
|
||||
- type: immich
|
||||
url: https://immich.homeee.schnorbus.net
|
||||
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
|
||||
version: 2
|
||||
- Linkwarden:
|
||||
icon: linkwarden.png
|
||||
href: https://lw.homeee.schnorbus.net
|
||||
description: Homepage isssss 😎
|
||||
widgets:
|
||||
- type: linkwarden
|
||||
url: https://lw.homeee.schnorbus.net
|
||||
url: http://docker-host-02.fritz.box:9595
|
||||
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
|
||||
- Nginx Proxy Manager:
|
||||
icon: nginx-proxy-manager.png
|
||||
|
||||
27
10_Postgres/cnpg-values.yaml
Normal file
27
10_Postgres/cnpg-values.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Pod Security Context for restricted Pod Security Standards
|
||||
#podSecurityContext:
|
||||
# runAsNonRoot: true
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
# fsGroup: 10001
|
||||
#
|
||||
## Container Security Context for restricted Pod Security Standards
|
||||
#containerSecurityContext:
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsUser: 10001
|
||||
# runAsGroup: 10001
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
#
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
|
||||
647
10_Postgres/justfile
Normal file
647
10_Postgres/justfile
Normal file
@@ -0,0 +1,647 @@
|
||||
set fallback := true
|
||||
|
||||
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
|
||||
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
|
||||
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
|
||||
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
|
||||
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
@helm repo add cnpg https://cloudnative-pg.github.io/charts
|
||||
@helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
@helm repo remove cnpg
|
||||
|
||||
# Install CloudNativePG and create a cluster
|
||||
install:
|
||||
@just install-cnpg
|
||||
@just create-cluster
|
||||
|
||||
# Uninstall CloudNativePG and delete the cluster
|
||||
uninstall:
|
||||
@just delete-cluster
|
||||
@just uninstall-cnpg
|
||||
|
||||
# Install CloudNativePG
|
||||
install-cnpg:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
|
||||
--version ${CNPG_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --create-namespace --wait \
|
||||
-f cnpg-values.yaml
|
||||
|
||||
@kubectl label namespace ${CNPG_NAMESPACE} \
|
||||
pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Uninstall CloudNativePG
|
||||
uninstall-cnpg:
|
||||
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
|
||||
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Create Postgres cluster
|
||||
create-cluster:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
|
||||
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
|
||||
password=$(just utils::random-password)
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
|
||||
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
echo "Waiting for ExternalSecret to sync..."
|
||||
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
|
||||
-n ${CNPG_NAMESPACE} --timeout=60s
|
||||
else
|
||||
echo "External Secrets Operator not found. Creating superuser secret directly..."
|
||||
password=$(just utils::random-password)
|
||||
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
|
||||
--from-literal=username=postgres \
|
||||
--from-literal=password="${password}"
|
||||
|
||||
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
fi
|
||||
fi
|
||||
|
||||
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
|
||||
helm upgrade --install postgres-cluster cnpg/cluster \
|
||||
--version ${CNPG_CLUSTER_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
|
||||
|
||||
echo "Waiting for PostgreSQL cluster to be ready..."
|
||||
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
|
||||
-n ${CNPG_NAMESPACE} --timeout=300s
|
||||
|
||||
# Delete Postgres cluster
|
||||
delete-cluster:
|
||||
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
|
||||
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Print Postgres username
|
||||
admin-username:
|
||||
@echo "postgres"
|
||||
|
||||
# Print Postgres password
|
||||
admin-password:
|
||||
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
|
||||
-o jsonpath="{.data.password}" | base64 --decode
|
||||
@echo
|
||||
|
||||
# Create Postgres database
|
||||
create-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
echo "Creating database ${DB_NAME}..."
|
||||
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} created."
|
||||
|
||||
# Delete Postgres database
|
||||
delete-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
if ! just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
# Terminate all connections to the database
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
|
||||
# Force disconnect if needed
|
||||
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"DROP DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} deleted."
|
||||
|
||||
# Check if database exists
|
||||
[no-exit-message]
|
||||
db-exists db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
|
||||
echo "Database ${DB_NAME} exists."
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create Postgres user
|
||||
create-user username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
|
||||
echo "User ${USERNAME} created."
|
||||
|
||||
# Delete Postgres user
|
||||
delete-user username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
|
||||
just psql -c "\"DROP USER ${USERNAME};\""
|
||||
echo "User ${USERNAME} deleted."
|
||||
|
||||
# Check if user exists
|
||||
[no-exit-message]
|
||||
user-exists username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
|
||||
echo "User ${USERNAME} exists."
|
||||
else
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change user password
|
||||
change-password username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just utils::random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
|
||||
echo "Password changed for user ${USERNAME}."
|
||||
|
||||
# Grant all privileges on database to user
|
||||
grant db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
|
||||
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
|
||||
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
|
||||
echo "Privileges granted."
|
||||
|
||||
# Revoke all privileges on database from user
|
||||
revoke db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
|
||||
echo "Privileges revoked."
|
||||
|
||||
# Create Postgres database and user
|
||||
create-user-and-db username='' db_name='' password='':
|
||||
@just create-db "{{ db_name }}"
|
||||
@just create-user "{{ username }}" "{{ password }}"
|
||||
@just grant "{{ db_name }}" "{{ username }}"
|
||||
|
||||
# Delete Postgres database and user
|
||||
delete-user-and-db username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just revoke "${DB_NAME}" "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping revoke."
|
||||
fi
|
||||
just delete-db "${DB_NAME}"
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist, skipping database deletion."
|
||||
fi
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just delete-user "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping user deletion."
|
||||
fi
|
||||
echo "Cleanup completed."
|
||||
|
||||
# Create logical replication slot for CDC
|
||||
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
PLUGIN=${PLUGIN:-"{{ plugin }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_slot")
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' already exists."
|
||||
exit 0
|
||||
fi
|
||||
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
|
||||
echo "Replication slot '${SLOT_NAME}' created."
|
||||
|
||||
# Delete replication slot
|
||||
delete-replication-slot slot_name='' db_name='postgres':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting replication slot '${SLOT_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
|
||||
echo "Replication slot '${SLOT_NAME}' deleted."
|
||||
|
||||
# List all replication slots
|
||||
list-replication-slots:
|
||||
@echo "Replication slots:"
|
||||
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -c \
|
||||
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
|
||||
|
||||
# Create publication for CDC
|
||||
create-publication pub_name='' db_name='' tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
TABLES="${TABLES:-{{ tables }}}"
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_publication")
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "${TABLES}" ]; then
|
||||
echo "Select tables to include in publication:"
|
||||
echo "1) All tables (ALL TABLES)"
|
||||
echo "2) All user tables (exclude system/internal tables)"
|
||||
echo "3) Specific tables (comma-separated list)"
|
||||
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
|
||||
case "${CHOICE}" in
|
||||
"All tables")
|
||||
TABLES="ALL TABLES"
|
||||
;;
|
||||
"User tables only")
|
||||
# Get list of user tables (excluding _airbyte* and other system tables)
|
||||
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT string_agg(tablename, ', ') FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename NOT LIKE '\_%'
|
||||
AND tablename NOT LIKE 'pg_%';")
|
||||
if [ -z "${USER_TABLES}" ]; then
|
||||
echo "No user tables found in database '${DB_NAME}'"
|
||||
exit 1
|
||||
fi
|
||||
TABLES="TABLE ${USER_TABLES}"
|
||||
echo "Including tables: ${USER_TABLES}"
|
||||
;;
|
||||
"Specific tables")
|
||||
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
|
||||
--placeholder="e.g., users, products, orders")
|
||||
TABLES="TABLE ${TABLES}"
|
||||
;;
|
||||
esac
|
||||
elif [ "${TABLES}" = "ALL" ]; then
|
||||
TABLES="ALL TABLES"
|
||||
fi
|
||||
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
|
||||
if [ "${TABLES}" != "ALL TABLES" ]; then
|
||||
echo "Setting REPLICA IDENTITY for included tables..."
|
||||
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
|
||||
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
|
||||
for table in "${TABLE_ARRAY[@]}"; do
|
||||
table=$(echo "$table" | xargs) # trim whitespace
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
echo "Publication '${PUB_NAME}' created."
|
||||
|
||||
# Delete publication
|
||||
delete-publication pub_name='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"DROP PUBLICATION ${PUB_NAME};"
|
||||
echo "Publication '${PUB_NAME}' deleted."
|
||||
|
||||
# List all publications in a database
|
||||
list-publications db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Publications in database '${DB_NAME}':"
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
|
||||
|
||||
# Grant CDC privileges to user
|
||||
grant-cdc-privileges username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
|
||||
echo "Granting schema and table privileges..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT CREATE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
|
||||
echo "Granting pg_read_all_data role..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
|
||||
echo "CDC privileges granted to user '${USERNAME}'"
|
||||
|
||||
# Setup CDC (Change Data Capture)
|
||||
setup-cdc db_name='' slot_name='' pub_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Setting up CDC on database '${DB_NAME}'..."
|
||||
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc "SHOW wal_level;")
|
||||
if [ "${WAL_LEVEL}" != "logical" ]; then
|
||||
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
|
||||
echo "Please ensure PostgreSQL is configured with wal_level=logical"
|
||||
exit 1
|
||||
fi
|
||||
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
|
||||
just create-publication "${PUB_NAME}" "${DB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo ""
|
||||
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
|
||||
fi
|
||||
echo ""
|
||||
echo "CDC setup completed for database '${DB_NAME}'"
|
||||
echo " Replication Method: Logical Replication (CDC)"
|
||||
echo " Replication Slot: ${SLOT_NAME}"
|
||||
echo " Publication: ${PUB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo " User with CDC privileges: ${USERNAME}"
|
||||
fi
|
||||
|
||||
# Cleanup CDC (removes slot and publication)
|
||||
cleanup-cdc db_name='' slot_name='' pub_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
|
||||
|
||||
# Check if slot is active
|
||||
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc \
|
||||
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
|
||||
if [ "${SLOT_ACTIVE}" = "t" ]; then
|
||||
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
|
||||
echo "Please stop any active replication connections first."
|
||||
if ! gum confirm "Proceed with deletion anyway?"; then
|
||||
echo "Cleanup cancelled"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete in correct order: Slot first, then Publication
|
||||
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
|
||||
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
|
||||
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
|
||||
|
||||
echo "Step 2: Deleting publication '${PUB_NAME}'..."
|
||||
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
|
||||
echo "Publication '${PUB_NAME}' not found or already deleted"
|
||||
|
||||
echo "CDC cleanup completed for database '${DB_NAME}'"
|
||||
|
||||
# Run psql
|
||||
[no-exit-message]
|
||||
psql *args='':
|
||||
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
|
||||
|
||||
# Dump Postgres database by pg_dump
|
||||
[no-cd]
|
||||
dump db_name file exclude_tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
DUMP_OPTIONS="-Fc"
|
||||
if [ -n "{{ exclude_tables }}" ]; then
|
||||
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
|
||||
for table in "${TABLES[@]}"; do
|
||||
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
|
||||
done
|
||||
fi
|
||||
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
|
||||
|
||||
# Restore Postgres database by pg_restore
|
||||
[no-cd]
|
||||
restore db_name file:
|
||||
just postgres::create-db {{ db_name }}
|
||||
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_restore --clean --if-exists \
|
||||
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
|
||||
# Enable Prometheus monitoring
|
||||
enable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Label namespace to enable monitoring
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
|
||||
|
||||
# Enable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
|
||||
|
||||
echo "Waiting for PodMonitor to be created..."
|
||||
sleep 3
|
||||
|
||||
# Add release label to PodMonitor
|
||||
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
|
||||
|
||||
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
|
||||
echo "✓ PostgreSQL monitoring enabled"
|
||||
|
||||
# Disable Prometheus monitoring
|
||||
disable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Disable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
|
||||
|
||||
# Remove namespace label
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
|
||||
|
||||
echo "✓ PostgreSQL monitoring disabled"
|
||||
9
10_Postgres/pgdb-example.yaml
Normal file
9
10_Postgres/pgdb-example.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
instances: 3
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
9
11_storage_tests/foo-pv.yaml
Normal file
9
11_storage_tests/foo-pv.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: foo-pv
|
||||
spec:
|
||||
storageClassName: "longhorn"
|
||||
claimRef:
|
||||
name: foo-pvc
|
||||
namespace: foo
|
||||
0
11_storage_tests/foo-pvc.yaml
Normal file
0
11_storage_tests/foo-pvc.yaml
Normal file
@@ -9,7 +9,7 @@ spec:
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain # Optionally, 'Delete' oder 'Recycle'
|
||||
persistentVolumeReclaimPolicy: Delete # Optionally, 'Delete' oder 'Recycle'
|
||||
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
|
||||
csi:
|
||||
driver: driver.longhorn.io # Der Longhorn CSI-Treiber
|
||||
|
||||
@@ -1,16 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: foo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: longhorn-nginx-pvc
|
||||
namespace: foo
|
||||
spec:
|
||||
storageClassName: longhorn # Die gleiche StorageClass wie im PV
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi # Die angeforderte Größe sollte mit der des PV übereinstimmen
|
||||
# volumeName: longhorn-test-pv # Der Name des PV, das für diesen PVC verwendet werden soll
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: longhorn-demo
|
||||
namespace: default
|
||||
namespace: foo
|
||||
spec:
|
||||
containers:
|
||||
- name: demo-container
|
||||
image: nginx:latest
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
volumeMounts:
|
||||
- mountPath: /usr/share/nginx/html
|
||||
name: longhorn-volume
|
||||
volumes:
|
||||
- name: longhorn-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: longhorn-test-pvc
|
||||
claimName: longhorn-nginx-pvc
|
||||
|
||||
4
12_Authentik/README.md
Normal file
4
12_Authentik/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
https://docs.goauthentik.io/install-config/install/kubernetes/#install-authentik-helm-chart
|
||||
|
||||
|
||||
https://nohup.no/posts/authentik-on-k8s/
|
||||
10
12_Authentik/authentik-pgdb.yaml
Normal file
10
12_Authentik/authentik-pgdb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: authentik-pgdb
|
||||
namespace: authentik
|
||||
spec:
|
||||
instances: 3
|
||||
|
||||
storage:
|
||||
size: 1Gi
|
||||
71
12_Authentik/authentik-values.gomplate.yaml
Normal file
71
12_Authentik/authentik-values.gomplate.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
authentik:
|
||||
secret_key: "PleaseGenerateASecureKey"
|
||||
# This sends anonymous usage-data, stack traces on errors and
|
||||
# performance data to sentry.io, and is fully opt-in
|
||||
error_reporting:
|
||||
enabled: true
|
||||
postgresql:
|
||||
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
|
||||
name: "authentik"
|
||||
port: 5432
|
||||
existingSecret: "authentik-credentials" # if you want to use a secret
|
||||
server:
|
||||
ingress:
|
||||
# Specify kubernetes ingress controller class name
|
||||
ingressClassName: nginx | traefik | kong
|
||||
enabled: true
|
||||
hosts:
|
||||
- authentik.domain.tld
|
||||
|
||||
# Disable the built-in PostgreSQL
|
||||
postgresql:
|
||||
enabled: false
|
||||
auth:
|
||||
password: "ThisIsNotASecurePassword" postgresql:
|
||||
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
|
||||
name: "authentik"
|
||||
port: 5432
|
||||
existingSecret: "authentik-credentials" # if you want to use a secret
|
||||
server:
|
||||
ingress:
|
||||
# Specify kubernetes ingress controller class name
|
||||
ingressClassName: nginx | traefik | kong
|
||||
enabled: true postgresql:
|
||||
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
|
||||
name: "authentik"
|
||||
port: 5432
|
||||
existingSecret: "authentik-credentials" # if you want to use a secret
|
||||
server:
|
||||
ingress:
|
||||
# Specify kubernetes ingress controller class name
|
||||
ingressClassName: nginx | traefik | kong
|
||||
enabled: true
|
||||
hosts:
|
||||
- authentik.domain.tld
|
||||
|
||||
# Disable the built-in PostgreSQL
|
||||
postgresql:
|
||||
enabled: false
|
||||
|
||||
hosts:
|
||||
- authentik.domain.tld
|
||||
|
||||
# Disable the built-in PostgreSQL
|
||||
postgresql:
|
||||
enabled: false
|
||||
postgresql:
|
||||
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
|
||||
name: "authentik"
|
||||
port: 5432
|
||||
existingSecret: "authentik-credentials" # if you want to use a secret
|
||||
server:
|
||||
ingress:
|
||||
# Specify kubernetes ingress controller class name
|
||||
ingressClassName: nginx | traefik | kong
|
||||
enabled: true
|
||||
hosts:
|
||||
- authentik.domain.tld
|
||||
|
||||
# Disable the built-in PostgreSQL
|
||||
postgresql:
|
||||
enabled: false
|
||||
28
12_Authentik/justfile
Normal file
28
12_Authentik/justfile
Normal file
@@ -0,0 +1,28 @@
|
||||
set fallback := true
|
||||
|
||||
export AUTHENTIK_NAMESPACE := env("AUTHENTIK_NAMESPACE", "authentik")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
@helm repo add authentik https://charts.goauthentik.io
|
||||
@helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
@helm repo remove authentik
|
||||
|
||||
|
||||
install:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install authentik authentik/authentik \
|
||||
-n ${AUTHENTIK_NAMESPACE} --create-namespace --wait \
|
||||
-f authentik-values.yaml
|
||||
|
||||
|
||||
uninstall:
|
||||
@helm uninstall authentik -n ${AUTHENTIK_NAMESPACE} --wait
|
||||
@kubectl delete namespace ${AUTHENTIK_NAMESPACE} --ignore-not-found
|
||||
@@ -2,3 +2,9 @@
|
||||
|
||||
helm install reloader stakater/reloader --namespace reloader --create-namespace
|
||||
|
||||
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
|
||||
|
||||
flux create helmrelease my-reloader --chart stakater/reloader \
|
||||
--source HelmRepository/stakater \
|
||||
--chart-version 2.1.3 \
|
||||
--namespace reloader
|
||||
@@ -8,11 +8,12 @@ Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert i
|
||||
|
||||
#### Node Labeling
|
||||
|
||||
In the case not all nodes should provide disk
|
||||
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
|
||||
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
|
||||
```
|
||||
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
|
||||
```
|
||||
|
||||
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
|
||||
|
||||
|
||||
#### Mit Helm:
|
||||
@@ -166,7 +167,8 @@ Mit diesen Schritten hast du ein Persistent Volume (PV) und einen Persistent Vol
|
||||
|
||||
|
||||
## Disable Localpath as default
|
||||
```
|
||||
kubectl get storageclass
|
||||
|
||||
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
|
||||
```
|
||||
1
Longhorn/auth
Normal file
1
Longhorn/auth
Normal file
@@ -0,0 +1 @@
|
||||
basti:$apr1$N23gJpBe$CYlDcwTfp8YsQMq0UcADQ0
|
||||
67
Longhorn/justfile
Normal file
67
Longhorn/justfile
Normal file
@@ -0,0 +1,67 @@
|
||||
set fallback:=true
|
||||
|
||||
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
|
||||
export LONGHORN_VERSION := env("LONGHORN_VERSION","1.10.1")
|
||||
|
||||
add-helm-repo:
|
||||
helm repo add longhorn https://charts.longhorn.io --force-update
|
||||
helm repo update
|
||||
|
||||
# Delete namespace
|
||||
delete-namespace:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if kubectl get namespace ${LONGHORN_NAMESPACE} &>/dev/null; then
|
||||
kubectl delete namespace ${LONGHORN_NAMESPACE} --ignore-not-found
|
||||
else
|
||||
echo "Namespace ${LONGHORN_NAMESPACE} does not exist."
|
||||
fi
|
||||
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
just add-helm-repo
|
||||
|
||||
helm upgrade longhorn longhorn/longhorn \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${LONGHORN_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--version ${LONGHORN_VERSION} \
|
||||
--values longhorn-values.yaml
|
||||
|
||||
# remove default storage class annotation from local-path storage class
|
||||
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
for crd in $(kubectl get crd -o name | grep longhorn); do
|
||||
kubectl patch $crd -p '{"metadata":{"finalizers":[]}}' --type=merge
|
||||
done
|
||||
|
||||
kubectl -n ${LONGHORN_NAMESPACE} patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag || true
|
||||
|
||||
helm uninstall longhorn --namespace ${LONGHORN_NAMESPACE} || true
|
||||
just delete-namespace
|
||||
|
||||
|
||||
install-dashboard-ingress:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
echo "Deploying Longhorn Dashboard Ingress with EXTERNAL_DOMAIN=${EXTERNAL_DOMAIN}"
|
||||
gomplate -f longhorn-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f longhorn-ingressroute-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
uninstall-dashboard-ingress:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
kubectl delete -f longhorn-ingressroute-gomplate.yaml || true
|
||||
kubectl delete -f longhorn-certificate-gomplate.yaml || true
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
spec:
|
||||
secretName: longhorn-web-ui-tls
|
||||
dnsNames:
|
||||
- longhorn-dashboard.k8s.schnrbs.work
|
||||
- longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
|
||||
- match: Host(`longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: longhorn-frontend
|
||||
@@ -1,18 +1,6 @@
|
||||
global:
|
||||
nodeSelector:
|
||||
node.longhorn.io/create-default-disk: "true"
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: NodePort
|
||||
nodePort: 30050
|
||||
manager:
|
||||
type: ClusterIP
|
||||
|
||||
# Replica count for the default Longhorn StorageClass.
|
||||
persistence:
|
||||
defaultClass: false
|
||||
defaultFsType: ext4
|
||||
defaultClassReplicaCount: 2
|
||||
reclaimPolicy: Delete
|
||||
|
||||
@@ -25,12 +13,10 @@ csi:
|
||||
|
||||
# Default replica count and storage path
|
||||
defaultSettings:
|
||||
upgradeChecker: false
|
||||
kubernetesClusterAutoscalerEnabled: false
|
||||
allowCollectingLonghornUsageMetrics: false
|
||||
createDefaultDiskLabeledNodes: true
|
||||
defaultReplicaCount: 2
|
||||
defaultDataPath: "/k8s-data"
|
||||
# defaultDataPath: "/k8s-data"
|
||||
|
||||
longhornUI:
|
||||
replicas: 1
|
||||
40
Longhorn/pod_with_pvc.yaml
Normal file
40
Longhorn/pod_with_pvc.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: longhorn-volv-pvc
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: longhorn
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: volume-test
|
||||
namespace: default
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: volume-test
|
||||
image: nginx:stable-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- ls
|
||||
- /data/lost+found
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumeMounts:
|
||||
- name: volv
|
||||
mountPath: /data
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumes:
|
||||
- name: volv
|
||||
persistentVolumeClaim:
|
||||
claimName: longhorn-volv-pvc
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,5 +5,4 @@ metadata:
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
# - 192.168.178.220-192.168.178.225 #pve-82
|
||||
- 192.168.178.226-192.168.178.240 #pve-83
|
||||
- {{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
66
Metallb_Setup/justfile
Normal file
66
Metallb_Setup/justfile
Normal file
@@ -0,0 +1,66 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
METALLB_VERSION="v0.15.3"
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
|
||||
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
|
||||
echo "MetalLB ${METALLB_VERSION} installed successfully."
|
||||
|
||||
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
|
||||
echo "Address pool configured."
|
||||
|
||||
kubectl apply -f advertisement.yaml
|
||||
echo "Advertisement created."
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
|
||||
|
||||
test-deployment:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl apply -f test-deployment.yaml
|
||||
|
||||
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
|
||||
|
||||
echo "To clean up, run 'just test-deployment-cleanup'."
|
||||
|
||||
test-deployment-cleanup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl delete -f test-deployment.yaml
|
||||
echo "Test deployment and service deleted."
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- schnipo.k8s.schnrbs.work
|
||||
- schnipo.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/dishes-deployment.yaml
Normal file
43
Test-Deployment/dishes-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: dishes
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dishes
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
containers:
|
||||
- name: dish-schnipo
|
||||
image: bschnorbus/dish-schnipo
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: dishes
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
@@ -7,10 +7,12 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`schnipo.k8s.schnrbs.work`)
|
||||
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: schnipo
|
||||
port: 8080
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
tls:
|
||||
secretName: schnipo-certificate-secret
|
||||
|
||||
37
Test-Deployment/justfile
Normal file
37
Test-Deployment/justfile
Normal file
@@ -0,0 +1,37 @@
|
||||
set fallback:=true
|
||||
|
||||
export EXTERNAL := env("EXTERNAL_DOMAIN", "")
|
||||
|
||||
install-nginx:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
if [ -z "${EXTERNAL}" ]; then
|
||||
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl apply -f nginx-deployment.yaml
|
||||
gomplate -f nginx-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f nginx-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
install-dishes:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
if [ -z "${EXTERNAL}" ]; then
|
||||
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl apply -f dishes-deployment.yaml
|
||||
gomplate -f dishes-certificate-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f dishes-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
remove-nginx:
|
||||
kubectl delete ns test || true
|
||||
|
||||
remove-dishes:
|
||||
kubectl delete ns dishes || true
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/nginx-deployment.yaml
Normal file
43
Test-Deployment/nginx-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`nginx-test.k8s.schnrbs.work`)
|
||||
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: nginx
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: nginx-test.k8s.schnrbs.work
|
||||
- host: nginx-test.int.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -19,5 +19,5 @@ spec:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.int.schnrbs.work
|
||||
secretName: nginx-certificate-secret
|
||||
@@ -4,7 +4,7 @@
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
|
||||
## Cert-Manager
|
||||
@@ -24,13 +24,15 @@ i.e. general issuer for all namespaces in cluster.
|
||||
|
||||
|
||||
## Test Deployment
|
||||
```
|
||||
k create ns test
|
||||
kubectl create deploy nginx --image=nginx -n test
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
k scale --replicas=3 deployment/nginx -n test
|
||||
|
||||
```
|
||||
|
||||
## Install Traefik & Cert-Manager
|
||||
```
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
@@ -40,23 +42,25 @@ helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||
|
||||
|
||||
k apply cert-manager-issuer-secret.yaml
|
||||
k apply -f cert-manager-issuer-secret.yaml
|
||||
k get secret -n cert-manager
|
||||
|
||||
k apply -f cert-manager-cluster-issuer.yaml
|
||||
```
|
||||
|
||||
|
||||
## Switch Test Deployment to https
|
||||
|
||||
```
|
||||
k apply -f test/nginx-certificate.yaml
|
||||
k apply -f test/nginx-ingress.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Troubleshooting steps
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get po -n test -o wide
|
||||
k create svc -n test clusterip nginx
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
@@ -70,12 +74,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
|
||||
k get svc -n test
|
||||
k get ingress
|
||||
k get ingress -n test
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get svc ingressRoute
|
||||
k get svc ingressRoutes
|
||||
k get svc ingressroutes.traefik.io
|
||||
@@ -90,3 +93,4 @@ k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
|
||||
k get secret
|
||||
k get secrets
|
||||
k get clusterissuers.cert-manager.io
|
||||
```
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: cloudflare-cluster-issuer
|
||||
spec:
|
||||
acme:
|
||||
email: hello@schnorbus.net
|
||||
email: {{ .Env.ACME_EMAIL }}
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: cloudflare-acme-key
|
||||
@@ -5,4 +5,4 @@ metadata:
|
||||
namespace: cert-manager
|
||||
type: Opaque
|
||||
stringData:
|
||||
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
|
||||
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}
|
||||
62
Traefik/justfile
Normal file
62
Traefik/justfile
Normal file
@@ -0,0 +1,62 @@
|
||||
set fallback:=true
|
||||
|
||||
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
|
||||
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
|
||||
|
||||
add-helm-repos:
|
||||
helm repo add traefik https://helm.traefik.io/traefik --force-update
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm repo update
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
just add-helm-repos
|
||||
|
||||
helm upgrade traefik traefik/traefik \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${TRAEFIK_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values traefik-values.yaml
|
||||
|
||||
helm upgrade cert-manager jetstack/cert-manager \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${CERT_MANAGER_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values cert-manager-values.yaml
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
|
||||
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
|
||||
|
||||
setup-cluster-issuer:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
# Get status of cert-manager components
|
||||
status:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "=== cert-manager Components Status ==="
|
||||
echo ""
|
||||
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
|
||||
echo ""
|
||||
echo "Pods:"
|
||||
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "Services:"
|
||||
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "CRDs:"
|
||||
kubectl get crd | grep cert-manager.io
|
||||
@@ -11,5 +11,5 @@ ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
entryPoints: [web, websecure]
|
||||
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
|
||||
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)
|
||||
|
||||
647
VPA/justfile
Normal file
647
VPA/justfile
Normal file
@@ -0,0 +1,647 @@
|
||||
set fallback := true
|
||||
|
||||
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
|
||||
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
|
||||
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
|
||||
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
|
||||
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
|
||||
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
# Add Helm repository
|
||||
add-helm-repo:
|
||||
@helm repo add autoscaler https://kubernetes.github.io/autoscaler
|
||||
@helm repo update
|
||||
|
||||
# Remove Helm repository
|
||||
remove-helm-repo:
|
||||
@helm repo remove autoscaler
|
||||
|
||||
# Install autoscaler
|
||||
install:
|
||||
@just install-cnpg
|
||||
@just create-cluster
|
||||
|
||||
# Uninstall CloudNativePG and delete the cluster
|
||||
uninstall:
|
||||
@just delete-cluster
|
||||
@just uninstall-cnpg
|
||||
|
||||
# Install CloudNativePG
|
||||
install-cnpg:
|
||||
@just add-helm-repo
|
||||
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
|
||||
--version ${CNPG_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --create-namespace --wait \
|
||||
-f cnpg-values.yaml
|
||||
|
||||
@kubectl label namespace ${CNPG_NAMESPACE} \
|
||||
pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Uninstall CloudNativePG
|
||||
uninstall-cnpg:
|
||||
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
|
||||
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Create Postgres cluster
|
||||
create-cluster:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
|
||||
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
|
||||
password=$(just utils::random-password)
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
|
||||
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
|
||||
|
||||
echo "Waiting for ExternalSecret to sync..."
|
||||
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
|
||||
-n ${CNPG_NAMESPACE} --timeout=60s
|
||||
else
|
||||
echo "External Secrets Operator not found. Creating superuser secret directly..."
|
||||
password=$(just utils::random-password)
|
||||
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
|
||||
--from-literal=username=postgres \
|
||||
--from-literal=password="${password}"
|
||||
|
||||
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
|
||||
just vault::put-root postgres/admin username=postgres password="${password}"
|
||||
fi
|
||||
fi
|
||||
|
||||
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
|
||||
helm upgrade --install postgres-cluster cnpg/cluster \
|
||||
--version ${CNPG_CLUSTER_CHART_VERSION} \
|
||||
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
|
||||
|
||||
echo "Waiting for PostgreSQL cluster to be ready..."
|
||||
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
|
||||
-n ${CNPG_NAMESPACE} --timeout=300s
|
||||
|
||||
# Delete Postgres cluster
|
||||
delete-cluster:
|
||||
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
|
||||
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
|
||||
|
||||
# Print Postgres username
|
||||
admin-username:
|
||||
@echo "postgres"
|
||||
|
||||
# Print Postgres password
|
||||
admin-password:
|
||||
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
|
||||
-o jsonpath="{.data.password}" | base64 --decode
|
||||
@echo
|
||||
|
||||
# Create Postgres database
|
||||
create-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
echo "Creating database ${DB_NAME}..."
|
||||
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} created."
|
||||
|
||||
# Delete Postgres database
|
||||
delete-db db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
if ! just db-exists ${DB_NAME} &>/dev/null; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
# Terminate all connections to the database
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
|
||||
# Force disconnect if needed
|
||||
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
|
||||
WHERE datname = '${DB_NAME}';\""
|
||||
just psql -c "\"DROP DATABASE ${DB_NAME};\""
|
||||
echo "Database ${DB_NAME} deleted."
|
||||
|
||||
# Check if database exists
|
||||
[no-exit-message]
|
||||
db-exists db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-{{ db_name }}}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
|
||||
echo "Database ${DB_NAME} exists."
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create Postgres user
|
||||
create-user username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} already exists" >&2
|
||||
exit
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
|
||||
echo "User ${USERNAME} created."
|
||||
|
||||
# Delete Postgres user
|
||||
delete-user username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit
|
||||
fi
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
|
||||
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
|
||||
just psql -c "\"DROP USER ${USERNAME};\""
|
||||
echo "User ${USERNAME} deleted."
|
||||
|
||||
# Check if user exists
|
||||
[no-exit-message]
|
||||
user-exists username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
|
||||
echo "User ${USERNAME} exists."
|
||||
else
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change user password
|
||||
change-password username='' password='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
PASSWORD=${PASSWORD:-"{{ password }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just user-exists ${USERNAME} &>/dev/null; then
|
||||
echo "User ${USERNAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
|
||||
--placeholder="Empty to generate a random password")
|
||||
fi
|
||||
if [ -z "${PASSWORD}" ]; then
|
||||
PASSWORD=$(just utils::random-password)
|
||||
echo "Generated random password: ${PASSWORD}"
|
||||
fi
|
||||
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
|
||||
echo "Password changed for user ${USERNAME}."
|
||||
|
||||
# Grant all privileges on database to user
|
||||
grant db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
|
||||
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
|
||||
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
|
||||
echo "Privileges granted."
|
||||
|
||||
# Revoke all privileges on database from user
|
||||
revoke db_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username: " --width=100)
|
||||
done
|
||||
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
|
||||
echo "Database ${DB_NAME} does not exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
|
||||
echo "Privileges revoked."
|
||||
|
||||
# Create Postgres database and user
|
||||
create-user-and-db username='' db_name='' password='':
|
||||
@just create-db "{{ db_name }}"
|
||||
@just create-user "{{ username }}" "{{ password }}"
|
||||
@just grant "{{ db_name }}" "{{ username }}"
|
||||
|
||||
# Delete Postgres database and user
|
||||
delete-user-and-db username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
if just db-exists ${DB_NAME} &>/dev/null; then
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just revoke "${DB_NAME}" "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping revoke."
|
||||
fi
|
||||
just delete-db "${DB_NAME}"
|
||||
else
|
||||
echo "Database ${DB_NAME} does not exist, skipping database deletion."
|
||||
fi
|
||||
if just user-exists ${USERNAME} &>/dev/null; then
|
||||
just delete-user "${USERNAME}"
|
||||
else
|
||||
echo "User ${USERNAME} does not exist, skipping user deletion."
|
||||
fi
|
||||
echo "Cleanup completed."
|
||||
|
||||
# Create logical replication slot for CDC
|
||||
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
PLUGIN=${PLUGIN:-"{{ plugin }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_slot")
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' already exists."
|
||||
exit 0
|
||||
fi
|
||||
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
|
||||
echo "Replication slot '${SLOT_NAME}' created."
|
||||
|
||||
# Delete replication slot
|
||||
delete-replication-slot slot_name='' db_name='postgres':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
|
||||
echo "Replication slot '${SLOT_NAME}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting replication slot '${SLOT_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
|
||||
echo "Replication slot '${SLOT_NAME}' deleted."
|
||||
|
||||
# List all replication slots
|
||||
list-replication-slots:
|
||||
@echo "Replication slots:"
|
||||
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -c \
|
||||
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
|
||||
|
||||
# Create publication for CDC
|
||||
create-publication pub_name='' db_name='' tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
TABLES="${TABLES:-{{ tables }}}"
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., airbyte_publication")
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "${TABLES}" ]; then
|
||||
echo "Select tables to include in publication:"
|
||||
echo "1) All tables (ALL TABLES)"
|
||||
echo "2) All user tables (exclude system/internal tables)"
|
||||
echo "3) Specific tables (comma-separated list)"
|
||||
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
|
||||
case "${CHOICE}" in
|
||||
"All tables")
|
||||
TABLES="ALL TABLES"
|
||||
;;
|
||||
"User tables only")
|
||||
# Get list of user tables (excluding _airbyte* and other system tables)
|
||||
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT string_agg(tablename, ', ') FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename NOT LIKE '\_%'
|
||||
AND tablename NOT LIKE 'pg_%';")
|
||||
if [ -z "${USER_TABLES}" ]; then
|
||||
echo "No user tables found in database '${DB_NAME}'"
|
||||
exit 1
|
||||
fi
|
||||
TABLES="TABLE ${USER_TABLES}"
|
||||
echo "Including tables: ${USER_TABLES}"
|
||||
;;
|
||||
"Specific tables")
|
||||
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
|
||||
--placeholder="e.g., users, products, orders")
|
||||
TABLES="TABLE ${TABLES}"
|
||||
;;
|
||||
esac
|
||||
elif [ "${TABLES}" = "ALL" ]; then
|
||||
TABLES="ALL TABLES"
|
||||
fi
|
||||
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
|
||||
if [ "${TABLES}" != "ALL TABLES" ]; then
|
||||
echo "Setting REPLICA IDENTITY for included tables..."
|
||||
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
|
||||
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
|
||||
for table in "${TABLE_ARRAY[@]}"; do
|
||||
table=$(echo "$table" | xargs) # trim whitespace
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
echo "Publication '${PUB_NAME}' created."
|
||||
|
||||
# Delete publication
|
||||
delete-publication pub_name='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -tAc \
|
||||
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
|
||||
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
|
||||
exit 1
|
||||
fi
|
||||
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"DROP PUBLICATION ${PUB_NAME};"
|
||||
echo "Publication '${PUB_NAME}' deleted."
|
||||
|
||||
# List all publications in a database
|
||||
list-publications db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Publications in database '${DB_NAME}':"
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
|
||||
|
||||
# Grant CDC privileges to user
|
||||
grant-cdc-privileges username='' db_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
while [ -z "${USERNAME}" ]; do
|
||||
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
|
||||
done
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name: " --width=100)
|
||||
done
|
||||
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
|
||||
echo "Granting schema and table privileges..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c \
|
||||
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT CREATE ON SCHEMA public TO ${USERNAME};
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
|
||||
echo "Granting pg_read_all_data role..."
|
||||
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
|
||||
echo "CDC privileges granted to user '${USERNAME}'"
|
||||
|
||||
# Setup CDC (Change Data Capture)
|
||||
setup-cdc db_name='' slot_name='' pub_name='' username='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
USERNAME=${USERNAME:-"{{ username }}"}
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Setting up CDC on database '${DB_NAME}'..."
|
||||
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc "SHOW wal_level;")
|
||||
if [ "${WAL_LEVEL}" != "logical" ]; then
|
||||
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
|
||||
echo "Please ensure PostgreSQL is configured with wal_level=logical"
|
||||
exit 1
|
||||
fi
|
||||
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
|
||||
just create-publication "${PUB_NAME}" "${DB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo ""
|
||||
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
|
||||
fi
|
||||
echo ""
|
||||
echo "CDC setup completed for database '${DB_NAME}'"
|
||||
echo " Replication Method: Logical Replication (CDC)"
|
||||
echo " Replication Slot: ${SLOT_NAME}"
|
||||
echo " Publication: ${PUB_NAME}"
|
||||
if [ -n "${USERNAME}" ]; then
|
||||
echo " User with CDC privileges: ${USERNAME}"
|
||||
fi
|
||||
|
||||
# Cleanup CDC (removes slot and publication)
|
||||
cleanup-cdc db_name='' slot_name='' pub_name='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
DB_NAME=${DB_NAME:-"{{ db_name }}"}
|
||||
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
|
||||
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
|
||||
|
||||
while [ -z "${DB_NAME}" ]; do
|
||||
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
|
||||
done
|
||||
while [ -z "${SLOT_NAME}" ]; do
|
||||
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_slot")
|
||||
done
|
||||
while [ -z "${PUB_NAME}" ]; do
|
||||
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
|
||||
--placeholder="e.g., demo_pub")
|
||||
done
|
||||
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
|
||||
|
||||
# Check if slot is active
|
||||
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
|
||||
psql -U postgres -d postgres -tAc \
|
||||
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
|
||||
if [ "${SLOT_ACTIVE}" = "t" ]; then
|
||||
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
|
||||
echo "Please stop any active replication connections first."
|
||||
if ! gum confirm "Proceed with deletion anyway?"; then
|
||||
echo "Cleanup cancelled"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete in correct order: Slot first, then Publication
|
||||
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
|
||||
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
|
||||
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
|
||||
|
||||
echo "Step 2: Deleting publication '${PUB_NAME}'..."
|
||||
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
|
||||
echo "Publication '${PUB_NAME}' not found or already deleted"
|
||||
|
||||
echo "CDC cleanup completed for database '${DB_NAME}'"
|
||||
|
||||
# Run psql
|
||||
[no-exit-message]
|
||||
psql *args='':
|
||||
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
|
||||
|
||||
# Dump Postgres database by pg_dump
|
||||
[no-cd]
|
||||
dump db_name file exclude_tables='':
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
DUMP_OPTIONS="-Fc"
|
||||
if [ -n "{{ exclude_tables }}" ]; then
|
||||
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
|
||||
for table in "${TABLES[@]}"; do
|
||||
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
|
||||
done
|
||||
fi
|
||||
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
|
||||
|
||||
# Restore Postgres database by pg_restore
|
||||
[no-cd]
|
||||
restore db_name file:
|
||||
just postgres::create-db {{ db_name }}
|
||||
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
|
||||
postgres-cluster-1:/var/lib/postgresql/data/db.dump
|
||||
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
|
||||
"pg_restore --clean --if-exists \
|
||||
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
|
||||
/var/lib/postgresql/data/db.dump"
|
||||
|
||||
# Enable Prometheus monitoring
|
||||
enable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Label namespace to enable monitoring
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
|
||||
|
||||
# Enable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
|
||||
|
||||
echo "Waiting for PodMonitor to be created..."
|
||||
sleep 3
|
||||
|
||||
# Add release label to PodMonitor
|
||||
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
|
||||
|
||||
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
|
||||
echo "✓ PostgreSQL monitoring enabled"
|
||||
|
||||
# Disable Prometheus monitoring
|
||||
disable-monitoring:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
|
||||
|
||||
# Disable PodMonitor
|
||||
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
|
||||
|
||||
# Remove namespace label
|
||||
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
|
||||
|
||||
echo "✓ PostgreSQL monitoring disabled"
|
||||
11
env/env.local.gomplate
vendored
Normal file
11
env/env.local.gomplate
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# shellcheck disable=all
|
||||
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
|
||||
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
||||
SERVER_IP={{ .Env.SERVER_IP }}
|
||||
AGENT_IP={{ .Env.AGENT_IP }}
|
||||
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
|
||||
ACME_EMAIL={{ .Env.ACME_EMAIL}}
|
||||
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
|
||||
VAULT_HOST={{ .Env.VAULT_HOST }}
|
||||
AUTHENTIK_HOST={{ .Env.AUTHENTIK_HOST }}
|
||||
144
env/justfile
vendored
Normal file
144
env/justfile
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
set fallback := true
|
||||
|
||||
export ENV_FILE := ".env.local"
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export SERVER_IP := env("SERVER_IP", "")
|
||||
export AGENT_IP := env("AGENT_IP", "")
|
||||
|
||||
check:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -z "${K8S_CONTEXT}" ]; then
|
||||
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
|
||||
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SERVER_IP}" ]; then
|
||||
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${AGENT_IP}" ]; then
|
||||
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
setup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -f ../.env.local ]; then
|
||||
echo ".env.local already exists." >&2
|
||||
if gum confirm "Do you want to overwrite it?"; then
|
||||
K8S_CONTEXT=""
|
||||
SERVER_IP=""
|
||||
AGENT_IP=""
|
||||
elif [[ $? -eq 130 ]]; then
|
||||
echo "Setup cancelled by user." >&2
|
||||
exit 1
|
||||
else
|
||||
echo "Aborting setup." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
while [ -z "${K8S_CONTEXT}" ]; do
|
||||
if ! K8S_CONTEXT=$(
|
||||
gum input --prompt="Context name: " \
|
||||
--width=100 --placeholder="context"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
|
||||
if ! K8S_MASTER_NODE_NAME=$(
|
||||
gum input --prompt="Master Node Hostname: " \
|
||||
--width=100 --placeholder="Master Node Name"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${SERVER_IP}" ]; do
|
||||
if ! SERVER_IP=$(
|
||||
gum input --prompt="IP of Server/Master Node: " \
|
||||
--width=100 --placeholder="Master Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AGENT_IP}" ]; do
|
||||
if ! AGENT_IP=$(
|
||||
gum input --prompt="IP of Agent Node: " \
|
||||
--width=100 --placeholder="Agent Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
|
||||
if ! METALLB_ADDRESS_RANGE=$(
|
||||
gum input --prompt="IP Range for LoadBalancer: " \
|
||||
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
|
||||
if ! CLOUDFLARE_API_TOKEN=$(
|
||||
gum input --prompt="Cloudflare API Token: " \
|
||||
--width=100 --placeholder="API Token" --password
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${ACME_EMAIL}" ]; do
|
||||
if ! ACME_EMAIL=$(
|
||||
gum input --prompt="ACME Email for Cert-Manager: " \
|
||||
--width=100 --placeholder="Email"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${EXTERNAL_DOMAIN}" ]; do
|
||||
if ! EXTERNAL_DOMAIN=$(
|
||||
gum input --prompt="External Domain: " \
|
||||
--width=100 --placeholder="Domain"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${VAULT_HOST}" ]; do
|
||||
if ! VAULT_HOST=$(
|
||||
gum input --prompt="Vault hostname: " \
|
||||
--width=100 --placeholder="vault"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AUTHENTIK_HOST}" ]; do
|
||||
if ! AUTHENTIK_HOST=$(
|
||||
gum input --prompt="Authentik hostname: " \
|
||||
--width=100 --placeholder="authentik"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Generating .env.local file..."
|
||||
rm -f ../.env.local
|
||||
gomplate -f env.local.gomplate -o ../.env.local
|
||||
@@ -2,5 +2,16 @@ https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on
|
||||
|
||||
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
|
||||
|
||||
# Setup using internal Gitea server
|
||||
## Create a Gitea personal access token and export it as an env var
|
||||
```
|
||||
export GITEA_TOKEN=<my-token>
|
||||
```
|
||||
## Bootstrap
|
||||
```
|
||||
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
|
||||
```
|
||||
|
||||
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
||||
|
||||
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."
|
||||
17
justfile
Normal file
17
justfile
Normal file
@@ -0,0 +1,17 @@
|
||||
set dotenv-filename := ".env.local"
|
||||
|
||||
export PATH := "./node_modules/.bin:" + env_var('PATH')
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
mod env
|
||||
mod BasicSetup '01_Basic_Setup'
|
||||
mod MetalLbSetup 'Metallb_Setup'
|
||||
mod Traefik
|
||||
mod Longhorn
|
||||
mod Vault '08_Vault'
|
||||
mod ExternalSecrets '09_ExternalSecrets'
|
||||
mod Postgres '10_Postgres'
|
||||
mod KubePrometheusStack '07_KubePrometheusStack'
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-web-ui
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: longhorn.k8s.internal.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: longhorn-frontend
|
||||
port:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- longhorn.k8s.internal.schnrbs.work
|
||||
secretName: longhorn-web-ui-tls
|
||||
|
||||
8
mise.toml
Normal file
8
mise.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[tools]
|
||||
jq = '1.8.1'
|
||||
k3sup = '0.13.11'
|
||||
helm = '3.19.0'
|
||||
gum = '0.16.2'
|
||||
gomplate = '4.3.3'
|
||||
just = "1.42.4"
|
||||
vault = "1.20.2"
|
||||
@@ -19,14 +19,16 @@ helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
|
||||
|
||||
Accessing UIs via PortForwarding
|
||||
```
|
||||
kubectl port-forward svc/prometheus-grafana 8080:80 -n monitoring
|
||||
kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n monitoring
|
||||
kubectl port-forward svc/prometheus-kube-prometheus-prometheus 9090 -n monitoring
|
||||
kubectl port-forward svc/prometheus-kube-prometheus-alertmanager 9093 -n monitoring
|
||||
```
|
||||
|
||||
This will make Grafana accessible on http://localhost:8080, Prometheus on http://localhost:9090 and Alert Manager on http://localhost:9093
|
||||
|
||||
Get Grafana Password via:
|
||||
```
|
||||
kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath="{.items[0].data.admin-password}" | base64 --decode ; echo
|
||||
```
|
||||
Login for Grafana:
|
||||
**User:** admin
|
||||
**Pwd:** prom-operator
|
||||
|
||||
|
||||
Reference in New Issue
Block a user