68 Commits

Author SHA1 Message Date
baschno
9363e38267 wip/vpa 2026-03-20 15:44:04 +01:00
baschno
7e47ce2787 kubeprom 2026-03-20 15:43:24 +01:00
baschno
2c23ac85ce prometheus stack 2026-02-23 23:11:04 +01:00
baschno
c6d2b3de3c auth update 2026-02-12 22:20:11 +01:00
baschno
a5d220418e authentik update 2026-02-12 22:16:00 +01:00
baschno
3226e527f4 added doku 2026-02-05 20:53:14 +01:00
baschno
71348ad7f5 kubeprom 2026-02-02 23:51:48 +01:00
baschno
40eae4f567 add pg stuff 2026-02-01 22:09:00 +01:00
baschno
e7f648cf57 ext-secrets initial 2026-01-25 20:23:01 +01:00
baschno
dce92aeb28 authentik initial 2026-01-25 20:22:36 +01:00
baschno
07e4ae31e3 kube-prom-stack 2026-01-25 20:22:18 +01:00
baschno
5e86aafa09 update vault readme 2026-01-25 20:21:19 +01:00
baschno
4444296443 postgres 2026-01-25 20:20:50 +01:00
baschno
9aafb940e9 adding extsecrets + postgres to just 2026-01-12 21:27:22 +01:00
baschno
4075203b1e initial add of enabling k8s with vault 2026-01-11 20:27:54 +01:00
baschno
92decafc3f adding vault client 2026-01-11 20:27:28 +01:00
baschno
09e1bbbc52 longhorn savegame 2026-01-11 10:21:14 +01:00
baschno
48d930fedc longhorn savegame 2026-01-03 20:35:36 +01:00
baschno
1f82ce8d02 longhorn savegame 2026-01-03 20:35:10 +01:00
baschno
a551f2e4ca Longhorn: use values yaml for helm to reduce replicas 2025-12-30 20:10:56 +01:00
baschno
a80dce42b0 add support for Longhorn setup 2025-12-30 20:03:23 +01:00
baschno
63243c6d2e fix formatting 2025-12-29 23:57:28 +01:00
baschno
1f9f7e275c add justfile for test deployment 2025-12-29 18:41:02 +01:00
baschno
09026d6812 move test deployment to different justfile 2025-12-29 18:33:46 +01:00
baschno
24991fce90 add setup-cluster-issuer 2025-12-28 17:04:24 +01:00
baschno
65a59d2d0c WIP: cert manager 2025-12-28 16:19:08 +01:00
baschno
85fb620e39 add module traefik 2025-12-28 11:19:30 +01:00
baschno
b56e02d2ed fix formatting 2025-12-28 11:19:12 +01:00
baschno
15cb2ce903 adding test deployment 2025-12-28 11:18:46 +01:00
baschno
b47fe8f66b fix formatting 2025-12-27 20:38:12 +01:00
baschno
c5810661e5 Add support for metallb installation 2025-12-27 20:32:16 +01:00
baschno
7ddc08d622 add local docker registry config 2025-12-27 09:58:15 +01:00
baschno
c5aa7f8105 fix context name parameter 2025-12-26 20:15:41 +01:00
baschno
0c6cfedcde update manual readme 2025-12-22 20:48:17 +01:00
2be83a977a Merge pull request 'just enabled' (#1) from just into master
Reviewed-on: #1
2025-12-22 19:47:19 +00:00
baschno
4f5a18c84c install incl agent ready 2025-12-22 20:41:06 +01:00
baschno
7a54346331 add local container registry 2025-12-22 20:15:48 +01:00
baschno
5abc0de38a add just and mise tool support 2025-12-22 11:21:20 +01:00
baschno
29674ae504 adding vault in dev mode 2025-12-20 11:32:56 +01:00
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
baschno
f58fad216a add prometheus helm 2025-08-20 19:27:05 +02:00
baschno
90e0de0804 add reloader component 2025-08-20 19:27:05 +02:00
baschno
8cb83ffd9c updsate 2025-08-11 20:31:16 +02:00
baschno
cca6f599d5 add statefulset stuff 2025-06-13 21:26:58 +02:00
baschno
506a199c95 longorn other namespace 2025-06-13 21:26:58 +02:00
baschno
d2a16bd55b helm prometheus 2025-06-09 19:24:40 +02:00
baschno
d25c9227c7 longhorn configure additional disk 2025-06-08 23:09:39 +02:00
baschno
45c61d5130 streamlined homepage deployment 2025-05-23 19:46:11 +02:00
baschno
82c19ff12c updating steps for traefik 2025-05-23 19:10:27 +02:00
baschno
9695376a0a adding pihole to homepage 2025-05-19 21:48:21 +02:00
baschno
84fd560675 update docu 2025-05-19 21:47:01 +02:00
baschno
5708f841e7 add linkwarden to homepage 2025-05-19 19:47:34 +02:00
baschno
97ef02c1da adding proxmox widgets 2025-04-27 12:53:14 +02:00
baschno
65e99a9f83 enabling reloader component for homepage 2025-04-27 01:56:36 +02:00
baschno
77ad59eae5 fixing longhorn ui certificate 2025-04-26 23:15:31 +02:00
baschno
a13663754d fix nginx pm icon 2025-04-26 21:34:21 +02:00
baschno
5e30b1e83d adding services to homepage 2025-04-26 21:21:24 +02:00
baschno
5514b5687f longhorn and echopod tests 2025-04-26 19:57:56 +02:00
baschno
a3404bba2b homepage setup without helm 2025-04-26 19:56:50 +02:00
baschno
0e4ddcefdf longhorn nummer 2 2025-04-21 21:18:23 +02:00
baschno
12546a9669 neu ist der mai 2025-04-21 00:21:28 +02:00
baschno
a6ac7b84e4 savegame 2025-04-10 22:56:27 +02:00
87 changed files with 3970 additions and 2007 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env.local

View File

@@ -34,4 +34,30 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```
# Just Setup // K3sup
export SERVER_IP=192.168.178.45
export AGENT_IP=192.168.178.75
export USER=basti
k3sup install \
--cluster \
--ip 192.168.178.45 \
--user $USER \
--merge \
--local-path $HOME/.kube/config \
--context my-k3s
k3sup join \
--ip $AGENT_IP \
--server-ip $SERVER_IP \
--user $USER

148
01_Basic_Setup/justfile Normal file
View File

@@ -0,0 +1,148 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
kubeconfig=""
context=""
if gum confirm "Update KUBECONFIG?"; then
kubeconfig=$(
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
)
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
args=(
"install"
"--context" "${context}"
"--host" "${K8S_MASTER_NODE_NAME}"
"--user" "${username}"
"--no-extras" #
)
if [ -n "${kubeconfig}" ]; then
mkdir -p "$(dirname "${kubeconfig}")"
args+=("--local-path" "${kubeconfig}" "--merge")
fi
echo "Running: k3sup ${args[@]}"
k3sup "${args[@]}"
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
echo "Setting up local Docker registry..."
# Deploy Docker registry to cluster
kubectl apply -f ./registry/registry.yaml
# Set Pod Security Standard for registry namespace
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
# Wait for registry deployment
echo "Waiting for registry to be ready..."
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
# Configure registries.yaml for k3s
just configure-registry
echo "✓ Local Docker registry deployed and configured"
echo ""
echo "Registry accessible at:"
echo " localhost:30500"
echo ""
echo "Usage:"
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
echo " docker build -t localhost:30500/myapp:latest ."
echo " docker push localhost:30500/myapp:latest"
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
fi
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
uninstall:
#!/bin/bash
set -euo pipefail
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
echo "Removing Agent node at ${AGENT_IP}..."
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
fi
echo "Removing content of Server node..."
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
echo "Cleaning up kubeconfig entries..."
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
kubectl config delete-context "${K8S_CONTEXT}"
echo "Deleted context: ${K8S_CONTEXT}"
fi
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
kubectl config delete-cluster "${cluster_name}"
echo "Deleted cluster: ${cluster_name}"
fi
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
kubectl config delete-user "${user_name}"
echo "Deleted user: ${user_name}"
fi
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
else
echo "Uninstallation cancelled." >&2
exit 1
fi
add-agent:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
args=(
"join"
"--ip" "${new_agent_ip}"
"--server-ip" "${SERVER_IP}"
"--user" "${username}"
)
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
echo "Agent node at ${new_agent_ip} added to cluster."
# Configure k3s to use local registry
configure-registry:
#!/bin/bash
set -euo pipefail
echo "Configuring k3s registries.yaml..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
echo "Restarting k3s to apply registry configuration..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
echo "✓ Registry configuration applied"

View File

@@ -0,0 +1,4 @@
configs:
"localhost:30500":
tls:
insecure_skip_verify: true

View File

@@ -0,0 +1,109 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
seccompProfile:
type: RuntimeDefault
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
cpu: 2000m
memory: 20Gi
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
capabilities:
drop:
- ALL
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: registry-data
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
name: http
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: registry-nodeport
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
nodePort: 30500
name: http
type: NodePort

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: grafana-ingress-certificate
namespace: {{.Env.PROMETHEUS_NAMESPACE}}
spec:
secretName: grafana-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- {{.Env.GRAFANA_HOST}}

View File

@@ -0,0 +1,38 @@
set fallback := true
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring")
export GRAFANA_HOST := env("GRAFANA_HOST", "")
[private]
default:
@just --list --unsorted --list-submodules
add-helm-repo:
@helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
@helm repo update
install:
just add-helm-repo
gomplate -f kube-stack-config-values.gomplate.yaml -o kube-stack-config-values.yaml
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
--namespace ${PROMETHEUS_NAMESPACE} \
--create-namespace \
--wait \
-f kube-stack-config-values.yaml
echo "kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n ${PROMETHEUS_NAMESPACE}"
echo "kubectl port-forward svc/kube-prometheus-stack-prometheus 9090 -n ${PROMETHEUS_NAMESPACE}"
echo "kubectl port-forward svc/kube-prometheus-stack-alertmanager 9093 -n ${PROMETHEUS_NAMESPACE}"
echo "Get Grafana Password:"
echo "kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath=\"{.items[0].data.admin-password}\" | base64 --decode ; echo"
gomplate -f ./grafana-certificate.gomplate.yaml | kubectl apply -f -
uninstall:
helm uninstall kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE}

View File

@@ -0,0 +1,23 @@
grafana:
enabled: true
ingress:
enabled: true
ingressClassName: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
- {{ .Env.GRAFANA_HOST }}
tls:
- secretName: grafana-certificate-secret
- hosts:
- {{ .Env.GRAFANA_HOST }}
annotations:
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: "" # empty = use secretName, not its own resolver
grafana.ini:
server:
domain: {{ .Env.GRAFANA_HOST }}
root_url: https://{{ .Env.GRAFANA_HOST }}
serve_from_sub_path: false

265
08_Vault/README.md Normal file
View File

@@ -0,0 +1,265 @@
# Helm
## Installation
helm repo add hashicorp https://helm.releases.hashicorp.com
helm install vault hashicorp/vault \
--set='server.dev.enabled=true' \
--set='ui.enabled=true' \
--set='ui.serviceType=LoadBalancer' \
--namespace vault \
--create-namespace
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
## Output
```
$ kubectl get all -n vault
NAME READY STATUS RESTARTS AGE
pod/vault-0 1/1 Running 0 2m39s
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
NAME DESIRED CURRENT READY AGE
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
NAME READY AGE
statefulset.apps/vault 1/1 2m40s
```
# Configuration
## Enter Pod
kubectl exec -it vault-0 -n vault -- /bin/sh
## Create policy
```
cat <<EOF > /home/vault/read-policy.hcl
path "secret*" {
capabilities = ["read"]
}
EOF
```
## Apply
```
vault policy write read-policy /home/vault/read-policy.hcl
```
## Enable Kubernetes
```
vault auth enable kubernetes
```
## Configure Kubernetes Auth
Configure to communicate with API server
```
vault write auth/kubernetes/config \
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
```
## Create a Role
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
```
vault write auth/kubernetes/role/vault-role \
bound_service_account_names=vault-serviceaccount \
bound_service_account_namespaces=vault \
policies=read-policy \
ttl=1h
```
# Create Secrets
## Via CLI
```
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
```
## Via UI
Now you can login to vault using the Token method, initially use Token=`root` to login.
# Accessing Secrets in Pods
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
First, lets create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
Apply the above manifest using the below command
```
kubectl apply -f vault-sa.yaml -n vault
```
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
```
kubectl apply -f vault-secret-test-deploy.yaml -n vault
```
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```
----
Wenn du Kubernetes mit Vault konfiguriert hast, ermöglichst du eine **sichere Integration zwischen deinem Kubernetes-Cluster und HashiCorp Vault**. Hier sind die wichtigsten Szenarien und Vorteile:
## Hauptfunktionen
### 1. **Automatische Pod-Authentifizierung**
Pods können sich automatisch bei Vault authentifizieren, ohne dass du Credentials manuell verteilen musst. Vault nutzt Kubernetes Service Accounts zur Identitätsverifizierung.
### 2. **Dynamische Secrets für Anwendungen**
Anwendungen können zur Laufzeit Secrets von Vault abrufen, statt sie in ConfigMaps oder Kubernetes Secrets zu speichern.
## Praktische Szenarien
### **Szenario 1: Vault Agent Sidecar Injection**
Vault injiziert automatisch einen Sidecar-Container, der Secrets abruft und für deine App bereitstellt:
```yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "myapp"
vault.hashicorp.com/agent-inject-secret-database: "database/creds/myapp-role"
spec:
serviceAccountName: myapp
containers:
- name: app
image: myapp:latest
```
**Ergebnis:** Datenbank-Credentials werden automatisch in `/vault/secrets/database` bereitgestellt.
### **Szenario 2: Dynamische Datenbank-Credentials**
Statt statische DB-Passwörter zu verwenden, generiert Vault temporäre Credentials:
- Jeder Pod bekommt eigene DB-Credentials
- Credentials sind zeitlich begrenzt (z.B. 24h)
- Automatische Rotation
- Einfaches Widerrufen bei Kompromittierung
### **Szenario 3: Externe Secrets Operator (ESO)**
Secrets werden als native Kubernetes Secrets synchronisiert:
```yaml
apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
name: vault-backend
spec:
provider:
vault:
server: "https://vault.test.k8s.schnrbs.work"
path: "secret"
auth:
kubernetes:
mountPath: "kubernetes"
role: "myapp"
```
### **Szenario 4: Verschlüsselung als Service**
Anwendungen können Vault's Transit Engine nutzen:
```bash
# Daten verschlüsseln ohne den Key zu kennen
vault write transit/encrypt/my-key plaintext=$(base64 <<< "sensitive data")
# Daten entschlüsseln
vault write transit/decrypt/my-key ciphertext="vault:v1:abc..."
```
### **Szenario 5: PKI/Zertifikats-Management**
Automatische Ausstellung von TLS-Zertifikaten für Service-to-Service-Kommunikation:
- Kurzlebige Zertifikate (z.B. 1h)
- Automatische Rotation
- Zero-Trust-Netzwerk
### **Szenario 6: Multi-Tenancy**
Verschiedene Namespaces/Teams haben isolierten Zugriff:
```bash
# Team A darf nur auf secret/team-a/* zugreifen
# Team B darf nur auf secret/team-b/* zugreifen
```
## Vorteile gegenüber Kubernetes Secrets
| Aspekt | Kubernetes Secrets | Vault Integration |
|--------|-------------------|-------------------|
| Verschlüsselung at rest | Optional, etcd-Ebene | Immer, zusätzlich verschlüsselt |
| Secret Rotation | Manuell | Automatisch/dynamisch |
| Audit Log | Begrenzt | Detailliert für jeden Zugriff |
| Dynamische Secrets | Nein | Ja (DB, Cloud, etc.) |
| Granulare Policies | Begrenzt | Sehr feinkörnig |
| Encryption-as-a-Service | Nein | Ja |
## Typischer Workflow nach der Konfiguration
1. **Policy erstellen:** Definiere, wer auf welche Secrets zugreifen darf
2. **Role erstellen:** Verknüpfe Kubernetes Service Accounts mit Vault Policies
3. **Secrets bereitstellen:** Nutze Vault Agent Injection oder CSI Driver
4. **Anwendung deployen:** Pods authentifizieren sich automatisch
## Best Practice Setup
Nach der Kubernetes Auth-Aktivierung solltest du:
```bash
# 1. Policy erstellen
vault policy write myapp - <<EOF
path "secret/data/myapp/*" {
capabilities = ["read"]
}
EOF
# 2. Role erstellen
vault write auth/kubernetes/role/myapp \
bound_service_account_names=myapp \
bound_service_account_namespaces=production \
policies=myapp \
ttl=1h
# 3. Service Account in K8s erstellen
kubectl create serviceaccount myapp -n production
```
Möchtest du ein spezifisches Szenario genauer erkunden oder brauchst du Hilfe bei der Konfiguration eines bestimmten Use Cases?

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: vault-auth-token
annotations:
kubernetes.io/service-account.name: vault-auth
type: kubernetes.io/service-account-token

126
08_Vault/justfile Normal file
View File

@@ -0,0 +1,126 @@
set fallback := true
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export VAULT_CHART_VERSION := env("VAULT_CHART_VERSION", "0.31.0")
export VAULT_HOST := env("VAULT_HOST", "")
export VAULT_ADDR := "https://" + VAULT_HOST
export VAULT_DEBUG := env("VAULT_DEBUG", "false")
SECRET_PATH := "secret"
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
# Remove Helm repository
remove-helm-repo:
helm repo remove hashicorp
# Create Vault namespace
create-namespace:
@kubectl get namespace ${K8S_VAULT_NAMESPACE} > /dev/null || kubectl create namespace ${K8S_VAULT_NAMESPACE}
# Delete Vault namespace
delete-namespace:
@kubectl delete namespace ${K8S_VAULT_NAMESPACE} --ignore-not-found
install:
#!/bin/bash
set -eu
just create-namespace
just add-helm-repo
gomplate -f vault-values.gomplate.yaml -o vault-values.yaml
helm upgrade \
--cleanup-on-fail \
--install \
vault \
hashicorp/vault \
--namespace ${K8S_VAULT_NAMESPACE} \
--wait \
-f vault-values.yaml
kubectl wait pod --for=condition=PodReadyToStartContainers \
-n ${K8S_VAULT_NAMESPACE} vault-0 --timeout=5m
# Wait for Vault service to be ready to accept connections
echo "Waiting for Vault service to be ready..."
for i in {1..30}; do
if kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault status 2>&1 | grep -qE "(Initialized|Sealed)"; then
echo "✓ Vault service is ready"
break
fi
if [ $i -eq 30 ]; then
echo "Error: Timeout waiting for Vault service to be ready"
exit 1
fi
sleep 3
done
init_output=$(kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault operator init -key-shares=1 -key-threshold=1 -format=json || true)
root_token=""
if echo "${init_output}" | grep -q "Vault is already initialized"; then
echo "Vault is already initialized"
while [ -z "${root_token}" ]; do
root_token=$(gum input --prompt="Vault root token: " --password --width=100)
done
else
unseal_key=$(echo "${init_output}" | jq -r '.unseal_keys_b64[0]')
root_token=$(echo "${init_output}" | jq -r '.root_token')
kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
vault operator unseal "${unseal_key}"
echo "Vault initialized and unsealed successfully"
echo "Root Token: ${root_token}"
echo "Unseal Key: ${unseal_key}"
echo "Please save these credentials securely!"
fi
# Wait for all vault instances to pass readiness checks and be ready to serve requests
kubectl wait pod --for=condition=ready -n ${K8S_VAULT_NAMESPACE} \
-l app.kubernetes.io/name=vault --timeout=5m
just setup-kubernetes-auth "${root_token}"
# Uninstall Vault
uninstall delete-ns='false':
#!/bin/bash
set -euo pipefail
helm uninstall vault -n ${K8S_VAULT_NAMESPACE} --ignore-not-found --wait
just delete-namespace
# Setup Kubernetes authentication
setup-kubernetes-auth root_token='':
#!/bin/bash
set -euo pipefail
export VAULT_TOKEN="{{ root_token }}"
while [ -z "${VAULT_TOKEN}" ]; do
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
done
gomplate -f ./serviceaccount.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
gomplate -f ./rolebinding.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f ./auth-token-secret.yaml
SA_SECRET="vault-auth-token"
SA_JWT=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
SA_CA=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.ca\.crt}' | base64 --decode)
vault auth list -format=json | jq -e '.["kubernetes/"]' >/dev/null 2>&1 || \
vault auth enable kubernetes
vault write auth/kubernetes/config \
token_reviewer_jwt="${SA_JWT}" \
kubernetes_host="https://kubernetes.default.svc" \
kubernetes_ca_cert="${SA_CA}"

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: vault-auth-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: vault-auth
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-auth
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}

6
08_Vault/vault-sa.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-serviceaccount
labels:
app: read-vault-secret

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-test
labels:
app: read-vault-secret
spec:
selector:
matchLabels:
app: read-vault-secret
replicas: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-status: "update"
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
vault.hashicorp.com/agent-inject-template-login: |
{{- with secret "secret/login" -}}
pattoken={{ .Data.data.pattoken }}
{{- end }}
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
vault.hashicorp.com/agent-inject-template-my-first-secret: |
{{- with secret "secret/my-first-secret" -}}
username={{ .Data.data.username }}
password={{ .Data.data.password }}
{{- end }}
vault.hashicorp.com/role: "vault-role"
labels:
app: read-vault-secret
spec:
serviceAccountName: vault-serviceaccount
containers:
- name: nginx
image: nginx

View File

@@ -0,0 +1,16 @@
server:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ingressClassName: traefik
hosts:
- host: {{ .Env.VAULT_HOST }}
paths:
- /
tls:
- hosts:
- {{ .Env.VAULT_HOST }}
dataStorage:
storageClass: longhorn

View File

@@ -0,0 +1,16 @@
server:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ingressClassName: traefik
hosts:
- host: vault.test.k8s.schnrbs.work
paths:
- /
tls:
- hosts:
- vault.test.k8s.schnrbs.work
dataStorage:
storageClass: longhorn

View File

@@ -0,0 +1,51 @@
# External Secrets Operator resource configuration
# Based on Goldilocks recommendations (Burstable QoS)
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
# Main controller
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
certController:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 192Mi
limits:
cpu: 50m
memory: 256Mi
webhook:
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 15m
memory: 128Mi
limits:
cpu: 50m
memory: 256Mi

View File

@@ -0,0 +1,65 @@
set fallback := true
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
export EXTERNAL_SECRETS_CHART_VERSION := env("EXTERNAL_SECRETS_CHART_VERSION", "1.1.0")
export EXTERNAL_SECRETS_REFRESH_INTERVAL := env("EXTERNAL_SECRETS_REFRESH_INTERVAL", "1800")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export VAULT_HOST := env("VAULT_HOST", "")
export VAULT_ADDR := "https://" + VAULT_HOST
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add external-secrets https://charts.external-secrets.io
helm repo update
# Remove Helm repository
remove-helm-repo:
helm repo remove external-secrets
# Install External Secrets
install:
just add-helm-repo
helm upgrade --cleanup-on-fail \
--install external-secrets external-secrets/external-secrets \
--version ${EXTERNAL_SECRETS_CHART_VERSION} -n ${EXTERNAL_SECRETS_NAMESPACE} \
--create-namespace --wait \
-f external-secrets-values.yaml
kubectl label namespace ${EXTERNAL_SECRETS_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
just create-external-secrets-role
just create-vault-secret-store
# Uninstall External Secrets
uninstall:
just delete-vault-secret-store
helm uninstall external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} --wait
kubectl delete namespace ${EXTERNAL_SECRETS_NAMESPACE} --ignore-not-found
# Create Vault Secret Store for External Secrets
create-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl apply -f -
# Delete Vault Secret Store for External Secrets
delete-vault-secret-store:
gomplate -f ./vault-secret-store.gomplate.yaml | kubectl delete --ignore-not-found -f -
# Create Vault role for External Secrets
create-external-secrets-role root_token='':
#!/bin/bash
set -euo pipefail
export VAULT_TOKEN="{{ root_token }}"
while [ -z "${VAULT_TOKEN}" ]; do
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
done
vault write auth/kubernetes/role/external-secrets \
bound_service_account_names=external-secrets \
bound_service_account_namespaces=${EXTERNAL_SECRETS_NAMESPACE} \
audience=vault \
policies=admin \
ttl=1h

View File

@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ClusterSecretStore
metadata:
name: vault-secret-store
spec:
provider:
vault:
server: http://vault.{{ .Env.K8S_VAULT_NAMESPACE }}:8200
path: secret
version: v2
auth:
kubernetes:
role: external-secrets
mountPath: kubernetes
serviceAccountRef:
name: external-secrets
namespace: {{ .Env.EXTERNAL_SECRETS_NAMESPACE }}
# Audience must match the audience configured in Vault Kubernetes auth role
# Required for Vault 1.21+ compatibility
audiences:
- vault
refreshInterval: {{ .Env.EXTERNAL_SECRETS_REFRESH_INTERVAL }}

View File

@@ -0,0 +1,180 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
secrets:
- name: homepage
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
kubernetes.io/service-account.name: homepage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- nodes
verbs:
- get
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- apiGroups:
- traefik.io
resources:
- ingressroutes
verbs:
- get
- list
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
- gateways
verbs:
- get
- list
- apiGroups:
- metrics.k8s.io
resources:
- nodes
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: homepage
subjects:
- kind: ServiceAccount
name: homepage
namespace: homepage
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/search: "true"
secret.reloader.stakater.com/reload: "homepage"
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
serviceAccountName: homepage
automountServiceAccountToken: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
containers:
- name: homepage
image: "ghcr.io/gethomepage/homepage:latest"
imagePullPolicy: Always
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: homepage.k8s.schnrbs.work # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: homepage-config
subPath: custom.js
- mountPath: /app/config/custom.css
name: homepage-config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: homepage-config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: homepage-config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: homepage-config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: homepage-config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: homepage-config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: homepage-config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: homepage-config
configMap:
name: homepage
- name: logs
emptyDir: {}

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- homepage.k8s.internal.schnrbs.work
- homepage.k8s.schnrbs.work

View File

@@ -0,0 +1,24 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
gethomepage.dev/description: Dynamically Detected Homepage
gethomepage.dev/enabled: "true"
gethomepage.dev/group: Cluster Management
gethomepage.dev/icon: homepage.png
gethomepage.dev/name: Homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -0,0 +1,150 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/match: "true"
data:
kubernetes.yaml: |
mode: cluster
settings.yaml: |
background: https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80
cardBlur: xs
providers:
longhorn:
url: https://longhorn-dashboard.k8s.schnrbs.work
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services.yaml: |
- Smart Home:
- Home Assistant:
icon: home-assistant.png
href: https://ha.homeee.schnorbus.net
description: Home Assistant is awesome
widgets:
- type: homeassistant
url: https://ha.homeee.schnorbus.net
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
- Zigbee2MQTT:
icon: zigbee2mqtt.png
href: http://muckibude.fritz.box:8383
description: Zigbee2MQTT is awesome
- Pihole:
icon: pi-hole.png
href: http://pi.hole
description: Pi-hole
widgets:
- type: pihole
url: http://pi.hole
version: 6
key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group:
- Proxmox pve-81:
icon: proxmox.png
href: http://pve-81.fritz.box:8006
description: Homepage is the best
- Proxmox pve-82:
icon: proxmox.png
href: http://pve-82.fritz.box:8006
description: Homepage is the best
- Proxmox pve-83:
icon: proxmox.png
href: https://pve-83.fritz.box:8006
description: Homepage is the best
# widgets:
# - type: proxmox
# url: https://pve-83.fritz.box:8006
# username: homepage_api@pam!homepage_api
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn:
icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning
- Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden:
icon: linkwarden.png
href: https://lw.homeee.schnorbus.net
description: Homepage isssss 😎
widgets:
- type: linkwarden
url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager:
icon: nginx-proxy-manager.png
href: http://192.168.178.42:8181
description: Nginx Proxy Manager is awesome
widgets:
- type: npm
url: http://192.168.178.42:8181
username: bastian@schnorbus.net
password: abcd1234
- Plex:
icon: plex.png
href: http://diskstation.fritz.box:32400/web/index.html#!/
description: Watch movies and TV shows.
server: http://diskstation.fritz.box:32400/web/index.html#!/
container: plex
widgets:
- type: plex
url: http://diskstation.fritz.box:32400
key: aNcUss31qsVsea5bsDf9
widgets.yaml: |
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- longhorn:
# Show the expanded view
expanded: true
# Shows a node representing the aggregate values
total: true
# Shows the node names as labels
labels: true
# Show the nodes
nodes: true
- resources:
backend: resources
expanded: true
cpu: true
memory: true
network: default
- search:
provider: duckduckgo
target: _blank
docker.yaml: ""

View File

@@ -1,6 +1,35 @@
Install via helm:
## Installation
### Install via helm
https://gethomepage.dev/installation/k8s/#install-with-helm
```
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
```
### Install via deployment
```
k create ns homepage
k apply -f 01_homepage-deployment.yaml
```
## Setup Https & Certificate
```
k apply -f 02_homepage-certificate.yaml
k apply -f 03_homepage-ingress-route.yaml
```
## Upload Content
```
k apply -f 04_homepage-configmap.yaml
```
## Test
Open Browser and navigate to:
https://homepage.k8s.schnrbs.work

View File

@@ -1,16 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.internal.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -1,72 +0,0 @@
config:
bookmarks:
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services:
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
widgets:
# show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- pihole:
show: true
url: http://192.168.178.202
key: 1eae9e87f4b4710981639ee591b7d75734811d61697092110cb748c3244e01cc
- fritzbox:
show: true
url: http://192.168.178.1
- search:
provider: duckduckgo
target: _blank
kubernetes:
mode: cluster
settings:
# The service account is necessary to allow discovery of other services
serviceAccount:
create: true
name: homepage
# This enables the service account to access the necessary resources
enableRbac: true
ingress:
main:
enabled: false
annotations:
# Example annotations to add Homepage to your Homepage!
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Homepage"
gethomepage.dev/description: "Dynamically Detected Homepage"
gethomepage.dev/group: "Dynamic"
gethomepage.dev/icon: "homepage.png"
hosts:
- host: homepage.k8s.internal.schnrbs.work
paths:
- path: /
pathType: Prefix

View File

@@ -0,0 +1,27 @@
# Pod Security Context for restricted Pod Security Standards
#podSecurityContext:
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
# fsGroup: 10001
#
## Container Security Context for restricted Pod Security Standards
#containerSecurityContext:
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# runAsUser: 10001
# runAsGroup: 10001
# seccompProfile:
# type: RuntimeDefault
# capabilities:
# drop:
# - ALL
#
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 100m
memory: 256Mi

647
10_Postgres/justfile Normal file
View File

@@ -0,0 +1,647 @@
set fallback := true
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add cnpg https://cloudnative-pg.github.io/charts
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove cnpg
# Install CloudNativePG and create a cluster
install:
@just install-cnpg
@just create-cluster
# Uninstall CloudNativePG and delete the cluster
uninstall:
@just delete-cluster
@just uninstall-cnpg
# Install CloudNativePG
install-cnpg:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
--version ${CNPG_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --create-namespace --wait \
-f cnpg-values.yaml
@kubectl label namespace ${CNPG_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
# Uninstall CloudNativePG
uninstall-cnpg:
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
# Create Postgres cluster
create-cluster:
#!/bin/bash
set -euo pipefail
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
password=$(just utils::random-password)
just vault::put-root postgres/admin username=postgres password="${password}"
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
-n ${CNPG_NAMESPACE} --timeout=60s
else
echo "External Secrets Operator not found. Creating superuser secret directly..."
password=$(just utils::random-password)
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
--from-literal=username=postgres \
--from-literal=password="${password}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put-root postgres/admin username=postgres password="${password}"
fi
fi
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
helm upgrade --install postgres-cluster cnpg/cluster \
--version ${CNPG_CLUSTER_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
echo "Waiting for PostgreSQL cluster to be ready..."
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
-n ${CNPG_NAMESPACE} --timeout=300s
# Delete Postgres cluster
delete-cluster:
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
# Print Postgres username
admin-username:
@echo "postgres"
# Print Postgres password
admin-password:
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
-o jsonpath="{.data.password}" | base64 --decode
@echo
# Create Postgres database
create-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} already exists" >&2
exit
fi
echo "Creating database ${DB_NAME}..."
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} created."
# Delete Postgres database
delete-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
if ! just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} does not exist." >&2
exit
fi
# Terminate all connections to the database
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
# Force disconnect if needed
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}';\""
just psql -c "\"DROP DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} deleted."
# Check if database exists
[no-exit-message]
db-exists db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
echo "Database ${DB_NAME} exists."
else
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
# Create Postgres user
create-user username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} already exists" >&2
exit
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
echo "User ${USERNAME} created."
# Delete Postgres user
delete-user username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit
fi
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
just psql -c "\"DROP USER ${USERNAME};\""
echo "User ${USERNAME} deleted."
# Check if user exists
[no-exit-message]
user-exists username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
echo "User ${USERNAME} exists."
else
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
# Change user password
change-password username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just utils::random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
echo "Password changed for user ${USERNAME}."
# Grant all privileges on database to user
grant db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
echo "Privileges granted."
# Revoke all privileges on database from user
revoke db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
echo "Privileges revoked."
# Create Postgres database and user
create-user-and-db username='' db_name='' password='':
@just create-db "{{ db_name }}"
@just create-user "{{ username }}" "{{ password }}"
@just grant "{{ db_name }}" "{{ username }}"
# Delete Postgres database and user
delete-user-and-db username='' db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
if just db-exists ${DB_NAME} &>/dev/null; then
if just user-exists ${USERNAME} &>/dev/null; then
just revoke "${DB_NAME}" "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping revoke."
fi
just delete-db "${DB_NAME}"
else
echo "Database ${DB_NAME} does not exist, skipping database deletion."
fi
if just user-exists ${USERNAME} &>/dev/null; then
just delete-user "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping user deletion."
fi
echo "Cleanup completed."
# Create logical replication slot for CDC
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
PLUGIN=${PLUGIN:-"{{ plugin }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., airbyte_slot")
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' already exists."
exit 0
fi
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
echo "Replication slot '${SLOT_NAME}' created."
# Delete replication slot
delete-replication-slot slot_name='' db_name='postgres':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' does not exist."
exit 1
fi
echo "Deleting replication slot '${SLOT_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
echo "Replication slot '${SLOT_NAME}' deleted."
# List all replication slots
list-replication-slots:
@echo "Replication slots:"
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -c \
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
# Create publication for CDC
create-publication pub_name='' db_name='' tables='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
TABLES="${TABLES:-{{ tables }}}"
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., airbyte_publication")
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
exit 0
fi
if [ -z "${TABLES}" ]; then
echo "Select tables to include in publication:"
echo "1) All tables (ALL TABLES)"
echo "2) All user tables (exclude system/internal tables)"
echo "3) Specific tables (comma-separated list)"
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
case "${CHOICE}" in
"All tables")
TABLES="ALL TABLES"
;;
"User tables only")
# Get list of user tables (excluding _airbyte* and other system tables)
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT string_agg(tablename, ', ') FROM pg_tables
WHERE schemaname = 'public'
AND tablename NOT LIKE '\_%'
AND tablename NOT LIKE 'pg_%';")
if [ -z "${USER_TABLES}" ]; then
echo "No user tables found in database '${DB_NAME}'"
exit 1
fi
TABLES="TABLE ${USER_TABLES}"
echo "Including tables: ${USER_TABLES}"
;;
"Specific tables")
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
--placeholder="e.g., users, products, orders")
TABLES="TABLE ${TABLES}"
;;
esac
elif [ "${TABLES}" = "ALL" ]; then
TABLES="ALL TABLES"
fi
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
if [ "${TABLES}" != "ALL TABLES" ]; then
echo "Setting REPLICA IDENTITY for included tables..."
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
for table in "${TABLE_ARRAY[@]}"; do
table=$(echo "$table" | xargs) # trim whitespace
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
done
fi
echo "Publication '${PUB_NAME}' created."
# Delete publication
delete-publication pub_name='' db_name='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
exit 1
fi
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"DROP PUBLICATION ${PUB_NAME};"
echo "Publication '${PUB_NAME}' deleted."
# List all publications in a database
list-publications db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Publications in database '${DB_NAME}':"
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
# Grant CDC privileges to user
grant-cdc-privileges username='' db_name='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
echo "Granting schema and table privileges..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
GRANT CREATE ON SCHEMA public TO ${USERNAME};
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
echo "Granting pg_read_all_data role..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
echo "CDC privileges granted to user '${USERNAME}'"
# Setup CDC (Change Data Capture)
setup-cdc db_name='' slot_name='' pub_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Setting up CDC on database '${DB_NAME}'..."
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc "SHOW wal_level;")
if [ "${WAL_LEVEL}" != "logical" ]; then
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
echo "Please ensure PostgreSQL is configured with wal_level=logical"
exit 1
fi
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
just create-publication "${PUB_NAME}" "${DB_NAME}"
if [ -n "${USERNAME}" ]; then
echo ""
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
fi
echo ""
echo "CDC setup completed for database '${DB_NAME}'"
echo " Replication Method: Logical Replication (CDC)"
echo " Replication Slot: ${SLOT_NAME}"
echo " Publication: ${PUB_NAME}"
if [ -n "${USERNAME}" ]; then
echo " User with CDC privileges: ${USERNAME}"
fi
# Cleanup CDC (removes slot and publication)
cleanup-cdc db_name='' slot_name='' pub_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
# Check if slot is active
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc \
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
if [ "${SLOT_ACTIVE}" = "t" ]; then
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
echo "Please stop any active replication connections first."
if ! gum confirm "Proceed with deletion anyway?"; then
echo "Cleanup cancelled"
exit 1
fi
fi
# Delete in correct order: Slot first, then Publication
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
echo "Step 2: Deleting publication '${PUB_NAME}'..."
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
echo "Publication '${PUB_NAME}' not found or already deleted"
echo "CDC cleanup completed for database '${DB_NAME}'"
# Run psql
[no-exit-message]
psql *args='':
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
# Dump Postgres database by pg_dump
[no-cd]
dump db_name file exclude_tables='':
#!/bin/bash
set -euo pipefail
DUMP_OPTIONS="-Fc"
if [ -n "{{ exclude_tables }}" ]; then
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
for table in "${TABLES[@]}"; do
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
done
fi
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
/var/lib/postgresql/data/db.dump"
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
# Restore Postgres database by pg_restore
[no-cd]
restore db_name file:
just postgres::create-db {{ db_name }}
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_restore --clean --if-exists \
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
/var/lib/postgresql/data/db.dump"
# Enable Prometheus monitoring
enable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
# Label namespace to enable monitoring
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
# Enable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
echo "Waiting for PodMonitor to be created..."
sleep 3
# Add release label to PodMonitor
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
echo "✓ PostgreSQL monitoring enabled"
# Disable Prometheus monitoring
disable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
# Disable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
# Remove namespace label
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
echo "✓ PostgreSQL monitoring disabled"

View File

@@ -0,0 +1,9 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: foo-pv
spec:
storageClassName: "longhorn"
claimRef:
name: foo-pvc
namespace: foo

View File

View File

@@ -2,14 +2,14 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: longhorn-test-pv
namespace: default
namespace: test
spec:
capacity:
storage: 10Gi # Setze die gewünschte Speichergröße
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain # Optionally, 'Delete' oder 'Recycle'
persistentVolumeReclaimPolicy: Delete # Optionally, 'Delete' oder 'Recycle'
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
csi:
driver: driver.longhorn.io # Der Longhorn CSI-Treiber

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-test-pvc
namespace: default
namespace: test
spec:
accessModes:
- ReadWriteOnce

View File

@@ -1,16 +1,42 @@
apiVersion: v1
kind: Namespace
metadata:
name: foo
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-nginx-pvc
namespace: foo
spec:
storageClassName: longhorn # Die gleiche StorageClass wie im PV
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # Die angeforderte Größe sollte mit der des PV übereinstimmen
# volumeName: longhorn-test-pv # Der Name des PV, das für diesen PVC verwendet werden soll
---
apiVersion: v1
kind: Pod
metadata:
name: longhorn-demo
namespace: default
namespace: foo
spec:
containers:
- name: demo-container
image: nginx:latest
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- mountPath: /usr/share/nginx/html
name: longhorn-volume
volumes:
- name: longhorn-volume
persistentVolumeClaim:
claimName: longhorn-test-pvc
claimName: longhorn-nginx-pvc

4
12_Authentik/README.md Normal file
View File

@@ -0,0 +1,4 @@
https://docs.goauthentik.io/install-config/install/kubernetes/#install-authentik-helm-chart
https://nohup.no/posts/authentik-on-k8s/

View File

@@ -0,0 +1,10 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-pgdb
namespace: authentik
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -0,0 +1,71 @@
authentik:
secret_key: "PleaseGenerateASecureKey"
# This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.io, and is fully opt-in
error_reporting:
enabled: true
postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false
auth:
password: "ThisIsNotASecurePassword" postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false
postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false

28
12_Authentik/justfile Normal file
View File

@@ -0,0 +1,28 @@
set fallback := true
export AUTHENTIK_NAMESPACE := env("AUTHENTIK_NAMESPACE", "authentik")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add authentik https://charts.goauthentik.io
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove authentik
install:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install authentik authentik/authentik \
-n ${AUTHENTIK_NAMESPACE} --create-namespace --wait \
-f authentik-values.yaml
uninstall:
@helm uninstall authentik -n ${AUTHENTIK_NAMESPACE} --wait
@kubectl delete namespace ${AUTHENTIK_NAMESPACE} --ignore-not-found

10
12_reloader/README.md Normal file
View File

@@ -0,0 +1,10 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

View File

@@ -0,0 +1,2 @@
https://igeadetokunbo.medium.com/how-to-run-databases-on-kubernetes-an-8-step-guide-b75ce9117600

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "mysql"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.4.0-oraclelinux8
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "your_password"
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: mysql-storage
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: longhorn
hostPath:
path: /mnt/data # Specify a path in the host for storage

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,13 @@
# Headless service
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql

View File

@@ -133,6 +133,151 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:
app: zwavejs2mqtt
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi---
apiVersion: v1
kind: Namespace
metadata:
name: home-assistant
---
apiVersion: v1
kind: Service
metadata:
namespace: home-assistant
name: home-assistant
spec:
selector:
app: home-assistant
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8123
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: home-assistant
name: home-assistant
labels:
app: home-assistant
spec:
replicas: 1
selector:
matchLabels:
app: home-assistant
template:
metadata:
labels:
app: home-assistant
spec:
containers:
- name: bluez
image: ghcr.io/mysticrenji/bluez-service:v1.0.0
securityContext:
privileged: true
- name: home-assistant
image: ghcr.io/mysticrenji/homeassistant-arm64:2023.3.0
resources:
requests:
memory: "256Mi"
limits:
memory: "512Mi"
ports:
- containerPort: 8123
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/configuration.yaml
subPath: configuration.yaml
name: configmap-file
- mountPath: /config/automations.yaml
subPath: automations.yaml
name: configmap-file
- mountPath: /media
name: media-volume
# - mountPath: /run/dbus
# name: d-bus
# readOnly: true
- mountPath: /dev/ttyUSB1
name: zigbee
#- mountPath: /dev/video0
# name: cam
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
hostNetwork: true
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-pvc
- name: media-volume
hostPath:
path: /tmp/media
- name: configmap-file
configMap:
name: home-assistant-configmap
# hostPath:
# path: /tmp/home-assistant
# type: DirectoryOrCreate
# - name: d-bus
# hostPath:
# path: /run/dbus
- name: zigbee
hostPath:
path: /dev/ttyACM0
#- name: cam
# hostPath:
# path: /dev/video0
---
kind: ConfigMap
apiVersion: v1
metadata:
name: home-assistant-configmap
namespace: home-assistant
data:
known_devices.yaml: |
automations.yaml: |
configuration.yaml: |-
default_config:
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 10.10.0.0/16
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: home-assistant-pvc
labels:
app: home-assistant
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:

View File

@@ -5,15 +5,66 @@ Hier sind die Schritte, um ein Persistent Volume für Longhorn zu erstellen:
### 1. Stelle sicher, dass Longhorn installiert ist
Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert ist. Falls Longhorn noch nicht installiert ist, kannst du es mit Helm oder direkt aus den YAML-Dateien installieren.
#### Node Labeling
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm:
```bash
helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
```
#### Mit kubectl:
```bash
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.2/deploy/install.yaml
#### Adding additional disks
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
Add disk in Proxmox, which appears as:
Run in worker node:
```
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 29G 0 part /
├─sda14 8:14 0 4M 0 part
├─sda15 8:15 0 106M 0 part /boot/efi
└─sda16 259:0 0 913M 0 part /boot
sdb 8:16 0 250G 0 disk
sr0 11:0 1 4M 0 rom
```
SDB...
```
fdisk /dev/sdb
# Hit n(new), p(primary), Enter, Enter
# w(write to disk and exit)
mkfs.ext4 /dev/sdb1
mkdir /mnt/nvmedisk1
nano /etc/fstab
->
/dev/sdb1 /mnt/nvmedisk1 ext4
systemctl daemon-reload
mount -a
```
### Check via UI
```
k port-forward svc/longhorn-frontend 8000:80 -n longhorn-system
```
### 2. Erstelle ein PersistentVolume (PV) und ein PersistentVolumeClaim (PVC)
@@ -116,7 +167,8 @@ Mit diesen Schritten hast du ein Persistent Volume (PV) und einen Persistent Vol
## Disable Localpath as default
```
kubectl get storageclass
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
```

1
Longhorn/auth Normal file
View File

@@ -0,0 +1 @@
basti:$apr1$N23gJpBe$CYlDcwTfp8YsQMq0UcADQ0

67
Longhorn/justfile Normal file
View File

@@ -0,0 +1,67 @@
set fallback:=true
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
export LONGHORN_VERSION := env("LONGHORN_VERSION","1.10.1")
add-helm-repo:
helm repo add longhorn https://charts.longhorn.io --force-update
helm repo update
# Delete namespace
delete-namespace:
#!/bin/bash
set -euo pipefail
if kubectl get namespace ${LONGHORN_NAMESPACE} &>/dev/null; then
kubectl delete namespace ${LONGHORN_NAMESPACE} --ignore-not-found
else
echo "Namespace ${LONGHORN_NAMESPACE} does not exist."
fi
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repo
helm upgrade longhorn longhorn/longhorn \
--install \
--cleanup-on-fail \
--namespace ${LONGHORN_NAMESPACE} \
--create-namespace \
--version ${LONGHORN_VERSION} \
--values longhorn-values.yaml
# remove default storage class annotation from local-path storage class
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
uninstall:
#!/bin/bash
set -euo pipefail
for crd in $(kubectl get crd -o name | grep longhorn); do
kubectl patch $crd -p '{"metadata":{"finalizers":[]}}' --type=merge
done
kubectl -n ${LONGHORN_NAMESPACE} patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag || true
helm uninstall longhorn --namespace ${LONGHORN_NAMESPACE} || true
just delete-namespace
install-dashboard-ingress:
#!/bin/bash
set -euo pipefail
just env::check
echo "Deploying Longhorn Dashboard Ingress with EXTERNAL_DOMAIN=${EXTERNAL_DOMAIN}"
gomplate -f longhorn-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f longhorn-ingressroute-gomplate.yaml | kubectl apply -f -
uninstall-dashboard-ingress:
#!/bin/bash
set -euo pipefail
kubectl delete -f longhorn-ingressroute-gomplate.yaml || true
kubectl delete -f longhorn-certificate-gomplate.yaml || true

View File

@@ -7,7 +7,7 @@ metadata:
spec:
secretName: longhorn-web-ui-tls
dnsNames:
- longhorn.k8s.internal.schnrbs.work
- longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer

View File

@@ -1,18 +1,16 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: longhorn-web-ui
name: longhorn-ingress-route
namespace: longhorn-system
spec:
properties:
entrypoints:
- websecure
entryPoints:
- websecure
routes:
- match: Host(`longhorn.k8s.internal.schnrbs.work`)
- match: Host(`longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: longhorn-frontend
port: 80
tls:
secretName: longhorn-web-ui-tls
tls:
secretName: longhorn-web-ui-tls

View File

@@ -0,0 +1,22 @@
# Replica count for the default Longhorn StorageClass.
persistence:
defaultClassReplicaCount: 2
reclaimPolicy: Delete
# Replica counts for CSI Attacher, Provisioner, Resizer, Snapshotter
csi:
attacherReplicaCount: 2
provisionerReplicaCount: 2
resizerReplicaCount: 2
snapshotterReplicaCount: 2
# Default replica count and storage path
defaultSettings:
kubernetesClusterAutoscalerEnabled: false
allowCollectingLonghornUsageMetrics: false
defaultReplicaCount: 2
# defaultDataPath: "/k8s-data"
longhornUI:
replicas: 1

View File

@@ -0,0 +1,40 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-volv-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
name: volume-test
namespace: default
spec:
restartPolicy: Always
containers:
- name: volume-test
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- ls
- /data/lost+found
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: volv
mountPath: /data
ports:
- containerPort: 80
volumes:
- name: volv
persistentVolumeClaim:
claimName: longhorn-volv-pvc

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,16 @@
Metallb Installation
## Used IP Range
Metallb will advertise IPs of the range:
192.168.178.226-192.168.178.240
First Address x.x.x.226 will be the traefik reverse proxy deployment.
https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
Following https://metallb.universe.tf/installation/ (0.14.3)
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml

View File

@@ -5,4 +5,4 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.178.220-192.168.178.250
- {{ .Env.METALLB_ADDRESS_RANGE }}

66
Metallb_Setup/justfile Normal file
View File

@@ -0,0 +1,66 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
METALLB_VERSION="v0.15.3"
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
context=""
if gum confirm "Update KUBECONFIG?"; then
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
echo "MetalLB ${METALLB_VERSION} installed successfully."
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
echo "Address pool configured."
kubectl apply -f advertisement.yaml
echo "Advertisement created."
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
test-deployment:
#!/bin/bash
set -euo pipefail
just env::check
kubectl apply -f test-deployment.yaml
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
echo "To clean up, run 'just test-deployment-cleanup'."
test-deployment-cleanup:
#!/bin/bash
set -euo pipefail
just env::check
kubectl delete -f test-deployment.yaml
echo "Test deployment and service deleted."

View File

@@ -27,7 +27,8 @@ kubectl expose deploy schnipo --port=80 --target-port=8080 --type=LoadBalancer -
```
#Create deploy
kubectl create deploy nginx --image=nginx
k create ns test
kubectl create deploy nginx --image=nginx -n test
kubectl scale --replicas=3 deployment/nginx -n test

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: schnipo-ingress-certificate
namespace: dishes
spec:
secretName: schnipo-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- schnipo.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: dishes
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dish-schnipo
namespace: dishes
labels:
app: dishes
spec:
replicas: 3
selector:
matchLabels:
app: dishes
template:
metadata:
labels:
app: dishes
spec:
containers:
- name: dish-schnipo
image: bschnorbus/dish-schnipo
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: dish-schnipo
namespace: dishes
spec:
type: ClusterIP
selector:
app: dishes
ports:
- port: 80
targetPort: 8080
protocol: TCP

View File

@@ -0,0 +1,18 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: schnipo-ingress-route
namespace: dishes
spec:
entryPoints:
- websecure
routes:
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: schnipo
port: 80
targetPort: 8080
tls:
secretName: schnipo-certificate-secret

37
Test-Deployment/justfile Normal file
View File

@@ -0,0 +1,37 @@
set fallback:=true
export EXTERNAL := env("EXTERNAL_DOMAIN", "")
install-nginx:
#!/bin/bash
set -euo pipefail
just env::check
if [ -z "${EXTERNAL}" ]; then
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
exit 1
fi
kubectl apply -f nginx-deployment.yaml
gomplate -f nginx-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f nginx-ingress-route-gomplate.yaml | kubectl apply -f -
install-dishes:
#!/bin/bash
set -euo pipefail
just env::check
if [ -z "${EXTERNAL}" ]; then
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
exit 1
fi
kubectl apply -f dishes-deployment.yaml
gomplate -f dishes-certificate-gomplate.yaml | kubectl apply -f -
gomplate -f dishes-ingress-route-gomplate.yaml | kubectl apply -f -
remove-nginx:
kubectl delete ns test || true
remove-dishes:
kubectl delete ns dishes || true

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
spec:
type: LoadBalancer
selector:
app: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@@ -7,10 +7,10 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-test.k8s.internal.schnrbs.work`)
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: nginx
port: 80
tls:
secretName: nginx-certificate-secret
secretName: nginx-certificate-secret

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: nginx-test.k8s.internal.schnrbs.work
- host: nginx-test.int.schnrbs.work
http:
paths:
- path: /
@@ -19,5 +19,5 @@ spec:
number: 80
tls:
- hosts:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.int.schnrbs.work
secretName: nginx-certificate-secret

View File

@@ -4,14 +4,63 @@
helm repo add traefik https://helm.traefik.io/traefik
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Troubleshooting steps
## Cert-Manager
Cert Manager will be used as it will store certs in a secret, therefore accessible for every pod.
In contrast to this, Traefik stores certs on disk, so a volume would be needed in RWX mode (too much effort).
### Issuer - CA
An issuer is a CA. This can be done with 2 different kinds.
#### Issuer
can be used in the namespace they are created in.
#### Cluster Issuer
can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster.
## Test Deployment
```
k create ns test
kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test
```
## Install Traefik & Cert-Manager
```
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
traefik-dashboard.k8s.schnrbs.work
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply -f cert-manager-issuer-secret.yaml
k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml
```
## Switch Test Deployment to https
```
k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml
```
## Troubleshooting steps
```
k get po -n test -o wide
k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80
@@ -25,41 +74,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test
k get ingress
k get ingress -n test
git staus
git status
git diff
git commit -am "wip thing"
git checkout master
git pull --rebase
git merge wip
git push
git log
git checkout master
cd traefik_lempa
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
cd ..
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
```
```
k get svc ingressRoute
k get svc ingressRoutes
k get svc ingressroutes.traefik.io
k get svc ingressroutes.traefik.io --all-namespaces
k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
cert-manager-values.yaml
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
k get po
alias k=kubectl
k get po
k apply traefik_lempa/cert-manager-issuer-secret.yaml
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret
k get secrets
k get secret -n cert-manager
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
k get clusterissuers.cert-manager.io
k apply -f traefik_lempa/nginx-certificate.yaml
k apply -f traefik_lempa/nginx-ingress.yaml
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
```

View File

@@ -4,7 +4,7 @@ metadata:
name: cloudflare-cluster-issuer
spec:
acme:
email: hello@schnorbus.net
email: {{ .Env.ACME_EMAIL }}
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: cloudflare-acme-key

View File

@@ -5,4 +5,4 @@ metadata:
namespace: cert-manager
type: Opaque
stringData:
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}

62
Traefik/justfile Normal file
View File

@@ -0,0 +1,62 @@
set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repos
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
--install \
--cleanup-on-fail \
--namespace ${CERT_MANAGER_NAMESPACE} \
--create-namespace \
--values cert-manager-values.yaml
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
setup-cluster-issuer:
#!/bin/bash
set -euo pipefail
just env::check
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
# Get status of cert-manager components
status:
#!/bin/bash
set -euo pipefail
echo "=== cert-manager Components Status ==="
echo ""
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
echo ""
echo "Pods:"
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "Services:"
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io

View File

@@ -0,0 +1,15 @@
ports:
web:
redirections:
entryPoint:
to: websecure
scheme: https
logs:
general:
level: DEBUG
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)

View File

@@ -1,10 +0,0 @@
ports:
web:
redirectTo:
port: websecure
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.redacted`)

647
VPA/justfile Normal file
View File

@@ -0,0 +1,647 @@
set fallback := true
export CNPG_NAMESPACE := env("CNPG_NAMESPACE", "postgres")
export CNPG_CHART_VERSION := env("CNPG_CHART_VERSION", "0.26.1")
export CNPG_CLUSTER_CHART_VERSION := env("CNPG_CLUSTER_CHART_VERSION", "0.3.1")
export POSTGRES_STORAGE_SIZE := env("POSTGRES_STORAGE_SIZE", "20Gi")
export POSTGRES_MAX_CONNECTIONS := env("POSTGRES_MAX_CONNECTIONS", "200")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
@helm repo add autoscaler https://kubernetes.github.io/autoscaler
@helm repo update
# Remove Helm repository
remove-helm-repo:
@helm repo remove autoscaler
# Install autoscaler
install:
@just install-cnpg
@just create-cluster
# Uninstall CloudNativePG and delete the cluster
uninstall:
@just delete-cluster
@just uninstall-cnpg
# Install CloudNativePG
install-cnpg:
@just add-helm-repo
@helm upgrade --cleanup-on-fail --install cnpg cnpg/cloudnative-pg \
--version ${CNPG_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --create-namespace --wait \
-f cnpg-values.yaml
@kubectl label namespace ${CNPG_NAMESPACE} \
pod-security.kubernetes.io/enforce=restricted --overwrite
# Uninstall CloudNativePG
uninstall-cnpg:
@helm uninstall cnpg -n ${CNPG_NAMESPACE} --wait
@kubectl delete namespace ${CNPG_NAMESPACE} --ignore-not-found
# Create Postgres cluster
create-cluster:
#!/bin/bash
set -euo pipefail
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Creating admin credentials via ExternalSecret..."
password=$(just utils::random-password)
just vault::put-root postgres/admin username=postgres password="${password}"
kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
gomplate -f postgres-superuser-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/postgres-cluster-superuser \
-n ${CNPG_NAMESPACE} --timeout=60s
else
echo "External Secrets Operator not found. Creating superuser secret directly..."
password=$(just utils::random-password)
kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
kubectl create secret generic postgres-cluster-superuser -n ${CNPG_NAMESPACE} \
--from-literal=username=postgres \
--from-literal=password="${password}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put-root postgres/admin username=postgres password="${password}"
fi
fi
gomplate -f postgres-cluster-values.gomplate.yaml -o postgres-cluster-values.yaml
helm upgrade --install postgres-cluster cnpg/cluster \
--version ${CNPG_CLUSTER_CHART_VERSION} \
-n ${CNPG_NAMESPACE} --wait -f postgres-cluster-values.yaml
echo "Waiting for PostgreSQL cluster to be ready..."
kubectl wait --for=condition=Ready clusters.postgresql.cnpg.io/postgres-cluster \
-n ${CNPG_NAMESPACE} --timeout=300s
# Delete Postgres cluster
delete-cluster:
@helm uninstall postgres-cluster -n ${CNPG_NAMESPACE} --ignore-not-found --wait
@kubectl delete externalsecret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
@kubectl delete secret postgres-cluster-superuser -n ${CNPG_NAMESPACE} --ignore-not-found
# Print Postgres username
admin-username:
@echo "postgres"
# Print Postgres password
admin-password:
@kubectl get -n ${CNPG_NAMESPACE} secret postgres-cluster-superuser \
-o jsonpath="{.data.password}" | base64 --decode
@echo
# Create Postgres database
create-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} already exists" >&2
exit
fi
echo "Creating database ${DB_NAME}..."
just psql -c "\"CREATE DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} created."
# Delete Postgres database
delete-db db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
if ! just db-exists ${DB_NAME} &>/dev/null; then
echo "Database ${DB_NAME} does not exist." >&2
exit
fi
# Terminate all connections to the database
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}' AND pid <> pg_backend_pid();\""
# Force disconnect if needed
just psql -c "\"UPDATE pg_database SET datallowconn = false WHERE datname = '${DB_NAME}';\""
just psql -c "\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity
WHERE datname = '${DB_NAME}';\""
just psql -c "\"DROP DATABASE ${DB_NAME};\""
echo "Database ${DB_NAME} deleted."
# Check if database exists
[no-exit-message]
db-exists db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-{{ db_name }}}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if echo '\l' | just postgres::psql | grep -E "^ *${DB_NAME} *\|" &>/dev/null; then
echo "Database ${DB_NAME} exists."
else
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
# Create Postgres user
create-user username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} already exists" >&2
exit
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="Password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"CREATE USER ${USERNAME} WITH LOGIN PASSWORD '${PASSWORD}';\""
echo "User ${USERNAME} created."
# Delete Postgres user
delete-user username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit
fi
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TABLES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON FUNCTIONS FROM ${USERNAME};\""
just psql -c "\"ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE ALL ON TYPES FROM ${USERNAME};\""
just psql -c "\"ALTER SCHEMA public OWNER TO postgres;\""
just psql -c "\"DROP USER ${USERNAME};\""
echo "User ${USERNAME} deleted."
# Check if user exists
[no-exit-message]
user-exists username='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if echo '\du' | just postgres::psql | grep -E "^ *${USERNAME} *\|" &>/dev/null; then
echo "User ${USERNAME} exists."
else
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
# Change user password
change-password username='' password='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
PASSWORD=${PASSWORD:-"{{ password }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just user-exists ${USERNAME} &>/dev/null; then
echo "User ${USERNAME} does not exist." >&2
exit 1
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(gum input --prompt="New password: " --password --width=100 \
--placeholder="Empty to generate a random password")
fi
if [ -z "${PASSWORD}" ]; then
PASSWORD=$(just utils::random-password)
echo "Generated random password: ${PASSWORD}"
fi
just psql -c "\"ALTER USER ${USERNAME} WITH PASSWORD '${PASSWORD}';\""
echo "Password changed for user ${USERNAME}."
# Grant all privileges on database to user
grant db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql ${DB_NAME} -U postgres -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} TO ${USERNAME};\""
# Grant CREATE permission on public schema (needed for PostgreSQL 15+)
just psql -d ${DB_NAME} -c "\"GRANT CREATE ON SCHEMA public TO ${USERNAME};\""
echo "Privileges granted."
# Revoke all privileges on database from user
revoke db_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username: " --width=100)
done
if ! just psql -U postgres ${DB_NAME} -P pager=off -c "\"SELECT 1;\""; then
echo "Database ${DB_NAME} does not exist." >&2
exit 1
fi
just psql -c "\"REVOKE ALL PRIVILEGES ON DATABASE ${DB_NAME} FROM ${USERNAME};\""
echo "Privileges revoked."
# Create Postgres database and user
create-user-and-db username='' db_name='' password='':
@just create-db "{{ db_name }}"
@just create-user "{{ username }}" "{{ password }}"
@just grant "{{ db_name }}" "{{ username }}"
# Delete Postgres database and user
delete-user-and-db username='' db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
if just db-exists ${DB_NAME} &>/dev/null; then
if just user-exists ${USERNAME} &>/dev/null; then
just revoke "${DB_NAME}" "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping revoke."
fi
just delete-db "${DB_NAME}"
else
echo "Database ${DB_NAME} does not exist, skipping database deletion."
fi
if just user-exists ${USERNAME} &>/dev/null; then
just delete-user "${USERNAME}"
else
echo "User ${USERNAME} does not exist, skipping user deletion."
fi
echo "Cleanup completed."
# Create logical replication slot for CDC
create-replication-slot slot_name='' db_name='postgres' plugin='pgoutput':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
PLUGIN=${PLUGIN:-"{{ plugin }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., airbyte_slot")
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' already exists."
exit 0
fi
echo "Creating replication slot '${SLOT_NAME}' with plugin '${PLUGIN}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_create_logical_replication_slot('${SLOT_NAME}', '${PLUGIN}');"
echo "Replication slot '${SLOT_NAME}' created."
# Delete replication slot
delete-replication-slot slot_name='' db_name='postgres':
#!/bin/bash
set -euo pipefail
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" | grep -q "${SLOT_NAME}"; then
echo "Replication slot '${SLOT_NAME}' does not exist."
exit 1
fi
echo "Deleting replication slot '${SLOT_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pg_drop_replication_slot('${SLOT_NAME}');"
echo "Replication slot '${SLOT_NAME}' deleted."
# List all replication slots
list-replication-slots:
@echo "Replication slots:"
@kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -c \
"SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;"
# Create publication for CDC
create-publication pub_name='' db_name='' tables='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
TABLES="${TABLES:-{{ tables }}}"
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., airbyte_publication")
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' already exists in database '${DB_NAME}'."
exit 0
fi
if [ -z "${TABLES}" ]; then
echo "Select tables to include in publication:"
echo "1) All tables (ALL TABLES)"
echo "2) All user tables (exclude system/internal tables)"
echo "3) Specific tables (comma-separated list)"
CHOICE=$(gum choose "All tables" "User tables only" "Specific tables")
case "${CHOICE}" in
"All tables")
TABLES="ALL TABLES"
;;
"User tables only")
# Get list of user tables (excluding _airbyte* and other system tables)
USER_TABLES=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT string_agg(tablename, ', ') FROM pg_tables
WHERE schemaname = 'public'
AND tablename NOT LIKE '\_%'
AND tablename NOT LIKE 'pg_%';")
if [ -z "${USER_TABLES}" ]; then
echo "No user tables found in database '${DB_NAME}'"
exit 1
fi
TABLES="TABLE ${USER_TABLES}"
echo "Including tables: ${USER_TABLES}"
;;
"Specific tables")
TABLES=$(gum input --prompt="Enter table names (comma-separated): " --width=100 \
--placeholder="e.g., users, products, orders")
TABLES="TABLE ${TABLES}"
;;
esac
elif [ "${TABLES}" = "ALL" ]; then
TABLES="ALL TABLES"
fi
echo "Creating publication '${PUB_NAME}' in database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"CREATE PUBLICATION ${PUB_NAME} FOR ${TABLES};"
if [ "${TABLES}" != "ALL TABLES" ]; then
echo "Setting REPLICA IDENTITY for included tables..."
TABLE_LIST=$(echo "${TABLES}" | sed 's/TABLE //')
IFS=',' read -ra TABLE_ARRAY <<< "${TABLE_LIST}"
for table in "${TABLE_ARRAY[@]}"; do
table=$(echo "$table" | xargs) # trim whitespace
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"ALTER TABLE ${table} REPLICA IDENTITY FULL;" 2>/dev/null || true
done
fi
echo "Publication '${PUB_NAME}' created."
# Delete publication
delete-publication pub_name='' db_name='':
#!/bin/bash
set -euo pipefail
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
if ! kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -tAc \
"SELECT pubname FROM pg_publication WHERE pubname = '${PUB_NAME}';" | grep -q "${PUB_NAME}"; then
echo "Publication '${PUB_NAME}' does not exist in database '${DB_NAME}'."
exit 1
fi
echo "Deleting publication '${PUB_NAME}' from database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"DROP PUBLICATION ${PUB_NAME};"
echo "Publication '${PUB_NAME}' deleted."
# List all publications in a database
list-publications db_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Publications in database '${DB_NAME}':"
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"SELECT pubname, puballtables, pubinsert, pubupdate, pubdelete FROM pg_publication;"
# Grant CDC privileges to user
grant-cdc-privileges username='' db_name='':
#!/bin/bash
set -euo pipefail
USERNAME=${USERNAME:-"{{ username }}"}
DB_NAME=${DB_NAME:-"{{ db_name }}"}
while [ -z "${USERNAME}" ]; do
USERNAME=$(gum input --prompt="Username to grant CDC privileges: " --width=100)
done
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name: " --width=100)
done
echo "Granting CDC privileges to user '${USERNAME}' on database '${DB_NAME}'..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "ALTER USER ${USERNAME} WITH REPLICATION;"
echo "Granting schema and table privileges..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c \
"GRANT USAGE ON SCHEMA public TO ${USERNAME};
GRANT CREATE ON SCHEMA public TO ${USERNAME};
GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${USERNAME};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${USERNAME};"
echo "Granting pg_read_all_data role..."
kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${DB_NAME} -c "GRANT pg_read_all_data TO ${USERNAME};" 2>/dev/null || true
echo "CDC privileges granted to user '${USERNAME}'"
# Setup CDC (Change Data Capture)
setup-cdc db_name='' slot_name='' pub_name='' username='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
USERNAME=${USERNAME:-"{{ username }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC setup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Setting up CDC on database '${DB_NAME}'..."
WAL_LEVEL=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc "SHOW wal_level;")
if [ "${WAL_LEVEL}" != "logical" ]; then
echo "WARNING: wal_level is '${WAL_LEVEL}', should be 'logical' for CDC"
echo "Please ensure PostgreSQL is configured with wal_level=logical"
exit 1
fi
just create-replication-slot "${SLOT_NAME}" "${DB_NAME}"
just create-publication "${PUB_NAME}" "${DB_NAME}"
if [ -n "${USERNAME}" ]; then
echo ""
just grant-cdc-privileges "${USERNAME}" "${DB_NAME}"
fi
echo ""
echo "CDC setup completed for database '${DB_NAME}'"
echo " Replication Method: Logical Replication (CDC)"
echo " Replication Slot: ${SLOT_NAME}"
echo " Publication: ${PUB_NAME}"
if [ -n "${USERNAME}" ]; then
echo " User with CDC privileges: ${USERNAME}"
fi
# Cleanup CDC (removes slot and publication)
cleanup-cdc db_name='' slot_name='' pub_name='':
#!/bin/bash
set -euo pipefail
DB_NAME=${DB_NAME:-"{{ db_name }}"}
SLOT_NAME=${SLOT_NAME:-"{{ slot_name }}"}
PUB_NAME=${PUB_NAME:-"{{ pub_name }}"}
while [ -z "${DB_NAME}" ]; do
DB_NAME=$(gum input --prompt="Database name for CDC cleanup: " --width=100)
done
while [ -z "${SLOT_NAME}" ]; do
SLOT_NAME=$(gum input --prompt="Replication slot name to delete: " --width=100 \
--placeholder="e.g., demo_slot")
done
while [ -z "${PUB_NAME}" ]; do
PUB_NAME=$(gum input --prompt="Publication name to delete: " --width=100 \
--placeholder="e.g., demo_pub")
done
echo "Cleaning up CDC configuration for database '${DB_NAME}'..."
# Check if slot is active
SLOT_ACTIVE=$(kubectl exec -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- \
psql -U postgres -d postgres -tAc \
"SELECT active FROM pg_replication_slots WHERE slot_name = '${SLOT_NAME}';" 2>/dev/null || echo "")
if [ "${SLOT_ACTIVE}" = "t" ]; then
echo "WARNING: Replication slot '${SLOT_NAME}' is currently active!"
echo "Please stop any active replication connections first."
if ! gum confirm "Proceed with deletion anyway?"; then
echo "Cleanup cancelled"
exit 1
fi
fi
# Delete in correct order: Slot first, then Publication
echo "Step 1: Deleting replication slot '${SLOT_NAME}'..."
just delete-replication-slot "${SLOT_NAME}" "${DB_NAME}" || \
echo "Replication slot '${SLOT_NAME}' not found or already deleted"
echo "Step 2: Deleting publication '${PUB_NAME}'..."
just delete-publication "${PUB_NAME}" "${DB_NAME}" || \
echo "Publication '${PUB_NAME}' not found or already deleted"
echo "CDC cleanup completed for database '${DB_NAME}'"
# Run psql
[no-exit-message]
psql *args='':
@kubectl exec -it -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- psql {{ args }}
# Dump Postgres database by pg_dump
[no-cd]
dump db_name file exclude_tables='':
#!/bin/bash
set -euo pipefail
DUMP_OPTIONS="-Fc"
if [ -n "{{ exclude_tables }}" ]; then
IFS=',' read -ra TABLES <<< "{{ exclude_tables }}"
for table in "${TABLES[@]}"; do
DUMP_OPTIONS="$DUMP_OPTIONS --exclude-table=$table"
done
fi
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_dump -d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} $DUMP_OPTIONS > \
/var/lib/postgresql/data/db.dump"
kubectl cp -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump {{ file }}
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- rm /var/lib/postgresql/data/db.dump
# Restore Postgres database by pg_restore
[no-cd]
restore db_name file:
just postgres::create-db {{ db_name }}
kubectl cp {{ file }} -n ${CNPG_NAMESPACE} -c postgres \
postgres-cluster-1:/var/lib/postgresql/data/db.dump
kubectl exec -i -n ${CNPG_NAMESPACE} postgres-cluster-1 -c postgres -- bash -c \
"pg_restore --clean --if-exists \
-d postgresql://$(just postgres::admin-username):$(just postgres::admin-password)@localhost/{{ db_name }} \
/var/lib/postgresql/data/db.dump"
# Enable Prometheus monitoring
enable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Enabling Prometheus PodMonitor for PostgreSQL cluster..."
# Label namespace to enable monitoring
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring=true --overwrite
# Enable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":true}}}'
echo "Waiting for PodMonitor to be created..."
sleep 3
# Add release label to PodMonitor
kubectl label podmonitor postgres-cluster -n ${CNPG_NAMESPACE} release=kube-prometheus-stack --overwrite
kubectl get podmonitor -n ${CNPG_NAMESPACE} -l cnpg.io/cluster=postgres-cluster
echo "✓ PostgreSQL monitoring enabled"
# Disable Prometheus monitoring
disable-monitoring:
#!/bin/bash
set -euo pipefail
echo "Disabling Prometheus PodMonitor for PostgreSQL cluster..."
# Disable PodMonitor
kubectl patch cluster postgres-cluster -n ${CNPG_NAMESPACE} --type=merge -p '{"spec":{"monitoring":{"enablePodMonitor":false}}}'
# Remove namespace label
kubectl label namespace ${CNPG_NAMESPACE} buun.channel/enable-monitoring- --ignore-not-found
echo "✓ PostgreSQL monitoring disabled"

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: echopod-ingress-certificate
namespace: default
spec:
secretName: echopod-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- echopod.k8s.schnrbs.work

View File

@@ -30,10 +30,30 @@ kind: Service
metadata:
name: echopod-service
spec:
type: NodePort # Change to LoadBalancer if using a cloud provider
type: LoadBalancer # Change to LoadBalancer if using a cloud provider
# type: NodePort # Change to LoadBalancer if using a cloud provider
ports:
- port: 80
targetPort: 80
nodePort: 30080 # Port to expose on the node
# nodePort: 30080 # Port to expose on the node
selector:
app: echopod
app: echopod
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: echopod-ingress-route
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`echopod.k8s.schnrbs.work`)
kind: Rule
services:
- name: echopod-service
port: 80
tls:
secretName: echopod-certificate-secret

11
env/env.local.gomplate vendored Normal file
View File

@@ -0,0 +1,11 @@
# shellcheck disable=all
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
VAULT_HOST={{ .Env.VAULT_HOST }}
AUTHENTIK_HOST={{ .Env.AUTHENTIK_HOST }}

144
env/justfile vendored Normal file
View File

@@ -0,0 +1,144 @@
set fallback := true
export ENV_FILE := ".env.local"
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export SERVER_IP := env("SERVER_IP", "")
export AGENT_IP := env("AGENT_IP", "")
check:
#!/bin/bash
set -euo pipefail
if [ -z "${K8S_CONTEXT}" ]; then
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${SERVER_IP}" ]; then
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${AGENT_IP}" ]; then
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
setup:
#!/bin/bash
set -euo pipefail
if [ -f ../.env.local ]; then
echo ".env.local already exists." >&2
if gum confirm "Do you want to overwrite it?"; then
K8S_CONTEXT=""
SERVER_IP=""
AGENT_IP=""
elif [[ $? -eq 130 ]]; then
echo "Setup cancelled by user." >&2
exit 1
else
echo "Aborting setup." >&2
exit 1
fi
fi
while [ -z "${K8S_CONTEXT}" ]; do
if ! K8S_CONTEXT=$(
gum input --prompt="Context name: " \
--width=100 --placeholder="context"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
if ! K8S_MASTER_NODE_NAME=$(
gum input --prompt="Master Node Hostname: " \
--width=100 --placeholder="Master Node Name"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${SERVER_IP}" ]; do
if ! SERVER_IP=$(
gum input --prompt="IP of Server/Master Node: " \
--width=100 --placeholder="Master Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AGENT_IP}" ]; do
if ! AGENT_IP=$(
gum input --prompt="IP of Agent Node: " \
--width=100 --placeholder="Agent Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
if ! METALLB_ADDRESS_RANGE=$(
gum input --prompt="IP Range for LoadBalancer: " \
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
if ! CLOUDFLARE_API_TOKEN=$(
gum input --prompt="Cloudflare API Token: " \
--width=100 --placeholder="API Token" --password
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${ACME_EMAIL}" ]; do
if ! ACME_EMAIL=$(
gum input --prompt="ACME Email for Cert-Manager: " \
--width=100 --placeholder="Email"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${EXTERNAL_DOMAIN}" ]; do
if ! EXTERNAL_DOMAIN=$(
gum input --prompt="External Domain: " \
--width=100 --placeholder="Domain"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${VAULT_HOST}" ]; do
if ! VAULT_HOST=$(
gum input --prompt="Vault hostname: " \
--width=100 --placeholder="vault"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AUTHENTIK_HOST}" ]; do
if ! AUTHENTIK_HOST=$(
gum input --prompt="Authentik hostname: " \
--width=100 --placeholder="authentik"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

17
gitops/README.md Normal file
View File

@@ -0,0 +1,17 @@
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."

17
justfile Normal file
View File

@@ -0,0 +1,17 @@
set dotenv-filename := ".env.local"
export PATH := "./node_modules/.bin:" + env_var('PATH')
[private]
default:
@just --list --unsorted --list-submodules
mod env
mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik
mod Longhorn
mod Vault '08_Vault'
mod ExternalSecrets '09_ExternalSecrets'
mod Postgres '10_Postgres'
mod KubePrometheusStack '07_KubePrometheusStack'

View File

@@ -1,25 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-web-ui
namespace: longhorn-system
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: longhorn.k8s.internal.schnrbs.work
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- longhorn.k8s.internal.schnrbs.work
secretName: longhorn-web-ui-tls

View File

@@ -1,2 +0,0 @@
longhornUI:
replicas: 1

8
mise.toml Normal file
View File

@@ -0,0 +1,8 @@
[tools]
jq = '1.8.1'
k3sup = '0.13.11'
helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"
vault = "1.20.2"

View File

@@ -11,19 +11,24 @@ Use for `helm` values:
https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
```
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --create-namespace --namespace monitoring --values kube-prometheus-stack-values.yaml
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--create-namespace \
--namespace monitoring \
--values kube-prometheus-stack-values.yaml
```
Accessing UIs via PortForwarding
```
kubectl port-forward svc/prometheus-grafana 8080:80 -n monitoring
kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n monitoring
kubectl port-forward svc/prometheus-kube-prometheus-prometheus 9090 -n monitoring
kubectl port-forward svc/prometheus-kube-prometheus-alertmanager 9093 -n monitoring
```
This will make Grafana accessible on http://localhost:8080, Prometheus on http://localhost:9090 and Alert Manager on http://localhost:9093
Get Grafana Password via:
```
kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath="{.items[0].data.admin-password}" | base64 --decode ; echo
```
Login for Grafana:
**User:** admin
**Pwd:** prom-operator

View File

@@ -0,0 +1,19 @@
NAME: kube-prometheus-stack
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
Get Grafana 'admin' user password by running:
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
Access Grafana local instance:
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
kubectl --namespace monitoring port-forward $POD_NAME 3000
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

37
statefulset/depl.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "longhorn"
resources:
requests:
storage: 1Gi

18
statefulset/svc.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
template:
spec:
nodeSelector:
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node