12 Commits

Author SHA1 Message Date
baschno
a9ea233c15 fixing traefik chart deployment! 2026-04-03 10:45:50 +02:00
baschno
bc69332ca5 working state 2026-04-02 21:14:53 +02:00
baschno
37fc96023a Adding show ports 2026-04-02 21:13:46 +02:00
baschno
24e56c658a advance setup 2026-04-02 21:13:14 +02:00
baschno
bb5add7a10 add k8s stop 2026-04-01 21:54:37 +02:00
baschno
7e47ce2787 kubeprom 2026-03-20 15:43:24 +01:00
baschno
2c23ac85ce prometheus stack 2026-02-23 23:11:04 +01:00
baschno
c6d2b3de3c auth update 2026-02-12 22:20:11 +01:00
baschno
a5d220418e authentik update 2026-02-12 22:16:00 +01:00
baschno
3226e527f4 added doku 2026-02-05 20:53:14 +01:00
baschno
71348ad7f5 kubeprom 2026-02-02 23:51:48 +01:00
baschno
40eae4f567 add pg stuff 2026-02-01 22:09:00 +01:00
12 changed files with 243 additions and 96 deletions

1
.gitignore vendored
View File

@@ -1 +1,2 @@
.env.local
traefik-values.yaml

View File

@@ -10,6 +10,7 @@ export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
[private]
default:
@@ -146,3 +147,75 @@ configure-registry:
echo "Restarting k3s to apply registry configuration..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
echo "✓ Registry configuration applied"
stop:
#!/bin/bash
set -euo pipefail
START_TIME=$(date +%s)
elapsed() {
echo "$(($(date +%s) - START_TIME))s"
}
nodenames=$(kubectl get nodes -o=jsonpath="{.items[*]['metadata.name']}")
for node in ${nodenames}; do
kubectl drain "${node}" --ignore-daemonsets --delete-emptydir-data --force --disable-eviction --grace-period=60 --timeout=180s 2>&1 || true
kubectl cordon "${node}"
echo "Node ${node} stopped."
done
echo "Drain complete. Nodes are cordoned and drained."
if helm status longhorn -n ${LONGHORN_NAMESPACE} &>/dev/null; then
echo "[$(elapsed)] Waiting for Longhorn volumes to be detached..."
TIMEOUT=90
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
ATTACHED=$(kubectl get volumes.longhorn.io -n ${LONGHORN_NAMESPACE} -o json 2>/dev/null | \
jq -r '.items[] | select(.status.state == "attached") | .metadata.name' 2>/dev/null || true)
if [ -z "$ATTACHED" ]; then
echo "[$(elapsed)] ✓ All Longhorn volumes detached successfully"
break
fi
ATTACHED_COUNT=$(echo "$ATTACHED" | grep -c . || echo 0)
echo " Still waiting for $ATTACHED_COUNT volume(s) to detach..."
sleep 2
ELAPSED=$((ELAPSED + 2))
done
if [ $ELAPSED -ge $TIMEOUT ]; then
echo "[$(elapsed)] ⚠ Warning: Timeout waiting for volumes to detach"
fi
fi
for node in ${nodenames}; do
echo "[$(elapsed)] Stopping and disabling k3s service..."
ssh "${node}" "sudo systemctl stop k3s 2>/dev/null || true"
ssh "${node}" "sudo systemctl disable k3s 2>/dev/null || true"
done
start:
#!/bin/bash
set -euo pipefail
is_schedulable() {
node_name="$1"
! kubectl get node "$node_name" -o jsonpath='{.spec.unschedulable}' 2>/dev/null | grep -q "true"
}
nodenames=$(kubectl get nodes -o=jsonpath="{.items[*]['metadata.name']}")
for node in ${nodenames}; do
echo "Starting k3s service on ${node}..."
if is_schedulable "$node"; then
echo "✓ Node $node is already schedulable"
exit 0
fi
echo "Uncordoning node $node..."
kubectl uncordon "$node" 2>&1 || true
echo "Wait for every node to become Ready..."
done

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: grafana-ingress-certificate
namespace: {{.Env.PROMETHEUS_NAMESPACE}}
spec:
secretName: grafana-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- {{.Env.GRAFANA_HOST}}

View File

@@ -1,6 +1,7 @@
set fallback := true
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "prometheus")
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring")
export GRAFANA_HOST := env("GRAFANA_HOST", "")
[private]
default:
@@ -13,7 +14,29 @@ add-helm-repo:
install:
@just add-helm-repo
just add-helm-repo
gomplate -f kube-stack-config-values.gomplate.yaml -o kube-stack-config-values.yaml
@helm upgrade --cleanup-on-fail --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
--namespace ${PROMETHEUS_NAMESPACE} \
--create-namespace \
--wait \
-f kube-stack-config-values.yaml
just KubePrometheusStack::show-ports
gomplate -f ./grafana-certificate.gomplate.yaml | kubectl apply -f -
uninstall:
helm uninstall kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE}
show-ports:
@echo "kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n ${PROMETHEUS_NAMESPACE}"
@echo "kubectl port-forward svc/kube-prometheus-stack-prometheus 9090 -n ${PROMETHEUS_NAMESPACE}"
@echo "kubectl port-forward svc/kube-prometheus-stack-alertmanager 9093 -n ${PROMETHEUS_NAMESPACE}"
@echo "Get Grafana Password:"
@echo "kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath=\"{.items[0].data.admin-user}\" | base64 --decode ; echo"
@echo "kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath=\"{.items[0].data.admin-password}\" | base64 --decode ; echo"

View File

@@ -0,0 +1,23 @@
grafana:
enabled: true
ingress:
enabled: true
ingressClassName: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
- {{ .Env.GRAFANA_HOST }}
tls:
- secretName: grafana-certificate-secret
- hosts:
- {{ .Env.GRAFANA_HOST }}
annotations:
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: "" # empty = use secretName, not its own resolver
grafana.ini:
server:
domain: {{ .Env.GRAFANA_HOST }}
root_url: https://{{ .Env.GRAFANA_HOST }}
serve_from_sub_path: false

View File

@@ -1,78 +0,0 @@
#
# Copyright © contributors to CloudNativePG, established as
# CloudNativePG a Series of LF Projects, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# -- here you can pass the whole values directly to the kube-prometheus-stack chart
enabled: true
kubeControllerManager:
enabled: false
nodeExporter:
enabled: false
defaultRules:
create: true
rules:
alertmanager: false
etcd: false
configReloaders: false
general: false
k8s: true
kubeApiserver: false
kubeApiserverAvailability: false
kubeApiserverSlos: false
kubelet: true
kubeProxy: false
kubePrometheusGeneral: false
kubePrometheusNodeRecording: false
kubernetesApps: false
kubernetesResources: false
kubernetesStorage: false
kubernetesSystem: false
kubeScheduler: false
kubeStateMetrics: false
network: false
node: true
nodeExporterAlerting: false
nodeExporterRecording: true
prometheus: false
prometheusOperator: false
#nodeSelector:
#workload: monitor
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
probeSelectorNilUsesHelmValues: false
#nodeSelector:
#workload: monitor
grafana:
enabled: true
# -- the grafana admin password
adminPassword: prom-operator
defaultDashboardsEnabled: false
sidecar:
dashboards:
enabled: true
#nodeSelector:
#workload: monitor
alertmanager:
enabled: true
#alertManagerSpec:
#nodeSelector:
#workload: monitor

4
12_Authentik/README.md Normal file
View File

@@ -0,0 +1,4 @@
https://docs.goauthentik.io/install-config/install/kubernetes/#install-authentik-helm-chart
https://nohup.no/posts/authentik-on-k8s/

View File

@@ -0,0 +1,10 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-pgdb
namespace: authentik
spec:
instances: 3
storage:
size: 1Gi

View File

@@ -5,8 +5,10 @@ authentik:
error_reporting:
enabled: true
postgresql:
password: "ThisIsNotASecurePassword"
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
@@ -15,7 +17,55 @@ server:
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: true
enabled: false
auth:
password: "ThisIsNotASecurePassword"
password: "ThisIsNotASecurePassword" postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false
postgresql:
host: "your-cnpg-cluster-rw.namespace.svc.cluster.local"
name: "authentik"
port: 5432
existingSecret: "authentik-credentials" # if you want to use a secret
server:
ingress:
# Specify kubernetes ingress controller class name
ingressClassName: nginx | traefik | kong
enabled: true
hosts:
- authentik.domain.tld
# Disable the built-in PostgreSQL
postgresql:
enabled: false

View File

@@ -2,6 +2,7 @@ set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
export TRAEFIK_CHART_VERSION := env("TRAEFIK_CHART_VERSION", "v39.0.7")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
@@ -15,11 +16,14 @@ install:
just add-helm-repos
gomplate -f traefik-values-gomplate.yaml -o traefik-values.yaml
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--version ${TRAEFIK_CHART_VERSION} \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
@@ -60,3 +64,8 @@ status:
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io
ingressroute:
#!/bin/bash
set -euo pipefail
gomplate -f traefik-ingressroute-gomplate.yaml | kubectl apply -f -

View File

@@ -1,15 +1,33 @@
additionalArguments:
- "--serversTransport.insecureSkipVerify=true"
- "--log.level=INFO"
deployment:
enabled: true
replicas: 1
annotations: {}
podAnnotations: {}
additionalContainers: []
initContainers: []
ports:
web:
redirections:
entryPoint:
to: websecure
scheme: https
http:
redirections:
entryPoint:
to: websecure
websecure:
http:
tls:
enabled: true
logs:
general:
level: DEBUG
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)
entryPoints:
- websecure

View File

@@ -19,14 +19,16 @@ helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
Accessing UIs via PortForwarding
```
kubectl port-forward svc/prometheus-grafana 8080:80 -n monitoring
kubectl port-forward svc/kube-prometheus-stack-grafana 8080:80 -n monitoring
kubectl port-forward svc/prometheus-kube-prometheus-prometheus 9090 -n monitoring
kubectl port-forward svc/prometheus-kube-prometheus-alertmanager 9093 -n monitoring
```
This will make Grafana accessible on http://localhost:8080, Prometheus on http://localhost:9090 and Alert Manager on http://localhost:9093
Get Grafana Password via:
```
kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath="{.items[0].data.admin-password}" | base64 --decode ; echo
```
Login for Grafana:
**User:** admin
**Pwd:** prom-operator