Compare commits

..

32 Commits

Author SHA1 Message Date
baschno
09026d6812 move test deployment to different justfile 2025-12-29 18:33:46 +01:00
baschno
24991fce90 add setup-cluster-issuer 2025-12-28 17:04:24 +01:00
baschno
65a59d2d0c WIP: cert manager 2025-12-28 16:19:08 +01:00
baschno
85fb620e39 add module traefik 2025-12-28 11:19:30 +01:00
baschno
b56e02d2ed fix formatting 2025-12-28 11:19:12 +01:00
baschno
15cb2ce903 adding test deployment 2025-12-28 11:18:46 +01:00
baschno
b47fe8f66b fix formatting 2025-12-27 20:38:12 +01:00
baschno
c5810661e5 Add support for metallb installation 2025-12-27 20:32:16 +01:00
baschno
7ddc08d622 add local docker registry config 2025-12-27 09:58:15 +01:00
baschno
c5aa7f8105 fix context name parameter 2025-12-26 20:15:41 +01:00
baschno
0c6cfedcde update manual readme 2025-12-22 20:48:17 +01:00
2be83a977a Merge pull request 'just enabled' (#1) from just into master
Reviewed-on: #1
2025-12-22 19:47:19 +00:00
baschno
4f5a18c84c install incl agent ready 2025-12-22 20:41:06 +01:00
baschno
7a54346331 add local container registry 2025-12-22 20:15:48 +01:00
baschno
5abc0de38a add just and mise tool support 2025-12-22 11:21:20 +01:00
baschno
29674ae504 adding vault in dev mode 2025-12-20 11:32:56 +01:00
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
baschno
f58fad216a add prometheus helm 2025-08-20 19:27:05 +02:00
baschno
90e0de0804 add reloader component 2025-08-20 19:27:05 +02:00
baschno
8cb83ffd9c updsate 2025-08-11 20:31:16 +02:00
baschno
cca6f599d5 add statefulset stuff 2025-06-13 21:26:58 +02:00
baschno
506a199c95 longorn other namespace 2025-06-13 21:26:58 +02:00
baschno
d2a16bd55b helm prometheus 2025-06-09 19:24:40 +02:00
baschno
d25c9227c7 longhorn configure additional disk 2025-06-08 23:09:39 +02:00
baschno
45c61d5130 streamlined homepage deployment 2025-05-23 19:46:11 +02:00
baschno
82c19ff12c updating steps for traefik 2025-05-23 19:10:27 +02:00
41 changed files with 1086 additions and 1856 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env.local

View File

@@ -34,4 +34,30 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```
# Just Setup // K3sup
export SERVER_IP=192.168.178.45
export AGENT_IP=192.168.178.75
export USER=basti
k3sup install \
--cluster \
--ip 192.168.178.45 \
--user $USER \
--merge \
--local-path $HOME/.kube/config \
--context my-k3s
k3sup join \
--ip $AGENT_IP \
--server-ip $SERVER_IP \
--user $USER

148
01_Basic_Setup/justfile Normal file
View File

@@ -0,0 +1,148 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
kubeconfig=""
context=""
if gum confirm "Update KUBECONFIG?"; then
kubeconfig=$(
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
)
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
args=(
"install"
"--context" "${context}"
"--host" "${K8S_MASTER_NODE_NAME}"
"--user" "${username}"
"--no-extras" #
)
if [ -n "${kubeconfig}" ]; then
mkdir -p "$(dirname "${kubeconfig}")"
args+=("--local-path" "${kubeconfig}" "--merge")
fi
echo "Running: k3sup ${args[@]}"
k3sup "${args[@]}"
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
echo "Setting up local Docker registry..."
# Deploy Docker registry to cluster
kubectl apply -f ./registry/registry.yaml
# Set Pod Security Standard for registry namespace
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
# Wait for registry deployment
echo "Waiting for registry to be ready..."
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
# Configure registries.yaml for k3s
just configure-registry
echo "✓ Local Docker registry deployed and configured"
echo ""
echo "Registry accessible at:"
echo " localhost:30500"
echo ""
echo "Usage:"
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
echo " docker build -t localhost:30500/myapp:latest ."
echo " docker push localhost:30500/myapp:latest"
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
fi
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
uninstall:
#!/bin/bash
set -euo pipefail
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
echo "Removing Agent node at ${AGENT_IP}..."
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
fi
echo "Removing content of Server node..."
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
echo "Cleaning up kubeconfig entries..."
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
kubectl config delete-context "${K8S_CONTEXT}"
echo "Deleted context: ${K8S_CONTEXT}"
fi
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
kubectl config delete-cluster "${cluster_name}"
echo "Deleted cluster: ${cluster_name}"
fi
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
kubectl config delete-user "${user_name}"
echo "Deleted user: ${user_name}"
fi
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
else
echo "Uninstallation cancelled." >&2
exit 1
fi
add-agent:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
args=(
"join"
"--ip" "${new_agent_ip}"
"--server-ip" "${SERVER_IP}"
"--user" "${username}"
)
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
echo "Agent node at ${new_agent_ip} added to cluster."
# Configure k3s to use local registry
configure-registry:
#!/bin/bash
set -euo pipefail
echo "Configuring k3s registries.yaml..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
echo "Restarting k3s to apply registry configuration..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
echo "✓ Registry configuration applied"

View File

@@ -0,0 +1,4 @@
configs:
"localhost:30500":
tls:
insecure_skip_verify: true

View File

@@ -0,0 +1,109 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
seccompProfile:
type: RuntimeDefault
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
cpu: 2000m
memory: 20Gi
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
capabilities:
drop:
- ALL
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: registry-data
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
name: http
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: registry-nodeport
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
nodePort: 30500
name: http
type: NodePort

131
08_Vault/README.md Normal file
View File

@@ -0,0 +1,131 @@
# Helm
## Installation
helm repo add hashicorp https://helm.releases.hashicorp.com
helm install vault hashicorp/vault \
--set='server.dev.enabled=true' \
--set='ui.enabled=true' \
--set='ui.serviceType=LoadBalancer' \
--namespace vault \
--create-namespace
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
## Output
```
$ kubectl get all -n vault
NAME READY STATUS RESTARTS AGE
pod/vault-0 1/1 Running 0 2m39s
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
NAME DESIRED CURRENT READY AGE
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
NAME READY AGE
statefulset.apps/vault 1/1 2m40s
```
# Configuration
## Enter Pod
kubectl exec -it vault-0 -n vault -- /bin/sh
## Create policy
```
cat <<EOF > /home/vault/read-policy.hcl
path "secret*" {
capabilities = ["read"]
}
EOF
```
## Apply
```
vault policy write read-policy /home/vault/read-policy.hcl
```
## Enable Kubernetes
```
vault auth enable kubernetes
```
## Configure Kubernetes Auth
Configure to communicate with API server
```
vault write auth/kubernetes/config \
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
```
## Create a Role
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
```
vault write auth/kubernetes/role/vault-role \
bound_service_account_names=vault-serviceaccount \
bound_service_account_namespaces=vault \
policies=read-policy \
ttl=1h
```
# Create Secrets
## Via CLI
```
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
```
## Via UI
Now you can login to vault using the Token method, initially use Token=`root` to login.
# Accessing Secrets in Pods
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
First, lets create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
Apply the above manifest using the below command
```
kubectl apply -f vault-sa.yaml -n vault
```
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
```
kubectl apply -f vault-secret-test-deploy.yaml -n vault
```
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```

6
08_Vault/vault-sa.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-serviceaccount
labels:
app: read-vault-secret

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-test
labels:
app: read-vault-secret
spec:
selector:
matchLabels:
app: read-vault-secret
replicas: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-status: "update"
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
vault.hashicorp.com/agent-inject-template-login: |
{{- with secret "secret/login" -}}
pattoken={{ .Data.data.pattoken }}
{{- end }}
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
vault.hashicorp.com/agent-inject-template-my-first-secret: |
{{- with secret "secret/my-first-secret" -}}
username={{ .Data.data.username }}
password={{ .Data.data.password }}
{{- end }}
vault.hashicorp.com/role: "vault-role"
labels:
app: read-vault-secret
spec:
serviceAccountName: vault-serviceaccount
containers:
- name: nginx
image: nginx

View File

@@ -46,6 +46,13 @@ data:
url: http://pi.hole url: http://pi.hole
version: 6 version: 6
key: 5ipI9bvB key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group: - My Second Group:
@@ -61,20 +68,33 @@ data:
icon: proxmox.png icon: proxmox.png
href: https://pve-83.fritz.box:8006 href: https://pve-83.fritz.box:8006
description: Homepage is the best description: Homepage is the best
widgets: # widgets:
- type: proxmox # - type: proxmox
url: https://pve-83.fritz.box:8006 # url: https://pve-83.fritz.box:8006
username: homepage_api@pam!homepage_api # username: homepage_api@pam!homepage_api
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871 # password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn:
icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning
- Party Time: - Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden: - Linkwarden:
icon: linkwarden.png icon: linkwarden.png
href: https://lw.homeee.schnorbus.net href: https://lw.homeee.schnorbus.net
description: Homepage is 😎 description: Homepage isssss 😎
widgets: widgets:
- type: linkwarden - type: linkwarden
url: https://lw.homeee.schnorbus.net url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager: - Nginx Proxy Manager:
icon: nginx-proxy-manager.png icon: nginx-proxy-manager.png

View File

@@ -1,6 +1,35 @@
Install via helm: ## Installation
### Install via helm
https://gethomepage.dev/installation/k8s/#install-with-helm https://gethomepage.dev/installation/k8s/#install-with-helm
```
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
```
### Install via deployment
```
k create ns homepage
k apply -f 01_homepage-deployment.yaml
```
## Setup Https & Certificate
```
k apply -f 02_homepage-certificate.yaml
k apply -f 03_homepage-ingress-route.yaml
```
## Upload Content
```
k apply -f 04_homepage-configmap.yaml
```
## Test
Open Browser and navigate to:
https://homepage.k8s.schnrbs.work

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolume kind: PersistentVolume
metadata: metadata:
name: longhorn-test-pv name: longhorn-test-pv
namespace: default namespace: test
spec: spec:
capacity: capacity:
storage: 10Gi # Setze die gewünschte Speichergröße storage: 10Gi # Setze die gewünschte Speichergröße

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: longhorn-test-pvc name: longhorn-test-pvc
namespace: default namespace: test
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce

10
12_reloader/README.md Normal file
View File

@@ -0,0 +1,10 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

File diff suppressed because it is too large Load Diff

View File

@@ -5,5 +5,4 @@ metadata:
namespace: metallb-system namespace: metallb-system
spec: spec:
addresses: addresses:
# - 192.168.178.220-192.168.178.225 #pve-82 - {{ .Env.METALLB_ADDRESS_RANGE }}
- 192.168.178.226-192.168.178.240 #pve-83

66
Metallb_Setup/justfile Normal file
View File

@@ -0,0 +1,66 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
METALLB_VERSION="v0.15.3"
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
context=""
if gum confirm "Update KUBECONFIG?"; then
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
echo "MetalLB ${METALLB_VERSION} installed successfully."
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
echo "Address pool configured."
kubectl apply -f advertisement.yaml
echo "Advertisement created."
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
test-deployment:
#!/bin/bash
set -euo pipefail
just env::check
kubectl apply -f test-deployment.yaml
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
echo "To clean up, run 'just test-deployment-cleanup'."
test-deployment-cleanup:
#!/bin/bash
set -euo pipefail
just env::check
kubectl delete -f test-deployment.yaml
echo "Test deployment and service deleted."

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
kind: ClusterIssuer kind: ClusterIssuer
dnsNames: dnsNames:
- schnipo.k8s.schnrbs.work - schnipo.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: dishes
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dish-schnipo
namespace: dishes
labels:
app: dishes
spec:
replicas: 3
selector:
matchLabels:
app: dishes
template:
metadata:
labels:
app: dishes
spec:
containers:
- name: dish-schnipo
image: bschnorbus/dish-schnipo
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: dish-schnipo
namespace: dishes
spec:
type: ClusterIP
selector:
app: dishes
ports:
- port: 80
targetPort: 8080
protocol: TCP

View File

@@ -7,10 +7,12 @@ spec:
entryPoints: entryPoints:
- websecure - websecure
routes: routes:
- match: Host(`schnipo.k8s.schnrbs.work`) - match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule kind: Rule
services: services:
- name: schnipo - name: schnipo
port: 8080 port: 80
targetPort: 8080
tls: tls:
secretName: schnipo-certificate-secret secretName: schnipo-certificate-secret

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
kind: ClusterIssuer kind: ClusterIssuer
dnsNames: dnsNames:
- nginx-test.k8s.schnrbs.work - nginx-test.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
spec:
type: LoadBalancer
selector:
app: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@@ -7,10 +7,10 @@ spec:
entryPoints: entryPoints:
- websecure - websecure
routes: routes:
- match: Host(`nginx-test.k8s.schnrbs.work`) - match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule kind: Rule
services: services:
- name: nginx - name: nginx
port: 80 port: 80
tls: tls:
secretName: nginx-certificate-secret secretName: nginx-certificate-secret

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec: spec:
rules: rules:
- host: nginx-test.k8s.schnrbs.work - host: nginx-test.int.schnrbs.work
http: http:
paths: paths:
- path: / - path: /
@@ -19,5 +19,5 @@ spec:
number: 80 number: 80
tls: tls:
- hosts: - hosts:
- nginx-test.k8s.schnrbs.work - nginx-test.int.schnrbs.work
secretName: nginx-certificate-secret secretName: nginx-certificate-secret

View File

@@ -4,7 +4,7 @@
helm repo add traefik https://helm.traefik.io/traefik helm repo add traefik https://helm.traefik.io/traefik
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Cert-Manager ## Cert-Manager
@@ -23,11 +23,44 @@ can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster. i.e. general issuer for all namespaces in cluster.
## Troubleshooting steps ## Test Deployment
```
k create ns test
kubectl create deploy nginx --image=nginx -n test kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test k scale --replicas=3 deployment/nginx -n test
```
## Install Traefik & Cert-Manager
```
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
traefik-dashboard.k8s.schnrbs.work
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply -f cert-manager-issuer-secret.yaml
k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml
```
## Switch Test Deployment to https
```
k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml
```
## Troubleshooting steps
```
k get po -n test -o wide k get po -n test -o wide
k create svc -n test clusterip nginx k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
@@ -41,28 +74,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test k get svc -n test
k get ingress k get ingress
k get ingress -n test k get ingress -n test
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml ```
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
```
k get svc ingressRoute k get svc ingressRoute
k get svc ingressRoutes k get svc ingressRoutes
k get svc ingressroutes.traefik.io k get svc ingressroutes.traefik.io
k get ingressroutes.traefik.io --all-namespaces k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit cert-manager-values.yaml
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64 echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
k get po k get po
alias k=kubectl alias k=kubectl
k get po k get po
k apply traefik_lempa/cert-manager-issuer-secret.yaml
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret k get secret
k get secrets k get secrets
k get secret -n cert-manager
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
k get clusterissuers.cert-manager.io k get clusterissuers.cert-manager.io
k apply -f traefik_lempa/nginx-certificate.yaml ```
k apply -f traefik_lempa/nginx-ingress.yaml
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml

View File

@@ -4,7 +4,7 @@ metadata:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
spec: spec:
acme: acme:
email: hello@schnorbus.net email: {{ .Env.ACME_EMAIL }}
server: https://acme-v02.api.letsencrypt.org/directory server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef: privateKeySecretRef:
name: cloudflare-acme-key name: cloudflare-acme-key

View File

@@ -5,4 +5,4 @@ metadata:
namespace: cert-manager namespace: cert-manager
type: Opaque type: Opaque
stringData: stringData:
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1 api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}

62
Traefik/justfile Normal file
View File

@@ -0,0 +1,62 @@
set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repos
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
--install \
--cleanup-on-fail \
--namespace ${CERT_MANAGER_NAMESPACE} \
--create-namespace \
--values cert-manager-values.yaml
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
setup-cluster-issuer:
#!/bin/bash
set -euo pipefail
just env::check
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
# Get status of cert-manager components
status:
#!/bin/bash
set -euo pipefail
echo "=== cert-manager Components Status ==="
echo ""
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
echo ""
echo "Pods:"
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "Services:"
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io

View File

@@ -11,5 +11,5 @@ ingressRoute:
dashboard: dashboard:
enabled: true enabled: true
entryPoints: [web, websecure] entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`) matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)

9
env/env.local.gomplate vendored Normal file
View File

@@ -0,0 +1,9 @@
# shellcheck disable=all
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}

125
env/justfile vendored Normal file
View File

@@ -0,0 +1,125 @@
set fallback := true
export ENV_FILE := ".env.local"
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export SERVER_IP := env("SERVER_IP", "")
export AGENT_IP := env("AGENT_IP", "")
check:
#!/bin/bash
set -euo pipefail
if [ -z "${K8S_CONTEXT}" ]; then
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${SERVER_IP}" ]; then
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${AGENT_IP}" ]; then
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
setup:
#!/bin/bash
set -euo pipefail
if [ -f ../.env.local ]; then
echo ".env.local already exists." >&2
if gum confirm "Do you want to overwrite it?"; then
K8S_CONTEXT=""
SERVER_IP=""
AGENT_IP=""
elif [[ $? -eq 130 ]]; then
echo "Setup cancelled by user." >&2
exit 1
else
echo "Aborting setup." >&2
exit 1
fi
fi
while [ -z "${K8S_CONTEXT}" ]; do
if ! K8S_CONTEXT=$(
gum input --prompt="Context name: " \
--width=100 --placeholder="context"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
if ! K8S_MASTER_NODE_NAME=$(
gum input --prompt="Master Node Hostname: " \
--width=100 --placeholder="Master Node Name"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${SERVER_IP}" ]; do
if ! SERVER_IP=$(
gum input --prompt="IP of Server/Master Node: " \
--width=100 --placeholder="Master Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AGENT_IP}" ]; do
if ! AGENT_IP=$(
gum input --prompt="IP of Agent Node: " \
--width=100 --placeholder="Agent Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
if ! METALLB_ADDRESS_RANGE=$(
gum input --prompt="IP Range for LoadBalancer: " \
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
if ! CLOUDFLARE_API_TOKEN=$(
gum input --prompt="Cloudflare API Token: " \
--width=100 --placeholder="API Token" --password
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${ACME_EMAIL}" ]; do
if ! ACME_EMAIL=$(
gum input --prompt="ACME Email for Cert-Manager: " \
--width=100 --placeholder="Email"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${EXTERNAL_DOMAIN}" ]; do
if ! EXTERNAL_DOMAIN=$(
gum input --prompt="External Domain: " \
--width=100 --placeholder="Domain"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

17
gitops/README.md Normal file
View File

@@ -0,0 +1,17 @@
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."

12
justfile Normal file
View File

@@ -0,0 +1,12 @@
set dotenv-filename := ".env.local"
export PATH := "./node_modules/.bin:" + env_var('PATH')
[private]
default:
@just --list --unsorted --list-submodules
mod env
mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik

View File

@@ -8,11 +8,12 @@ Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert i
#### Node Labeling #### Node Labeling
In the case not all nodes should provide disk In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
``` ```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
``` ```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm: #### Mit Helm:
@@ -21,6 +22,45 @@ helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
``` ```
#### Adding additional disks
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
Add disk in Proxmox, which appears as:
Run in worker node:
```
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 29G 0 part /
├─sda14 8:14 0 4M 0 part
├─sda15 8:15 0 106M 0 part /boot/efi
└─sda16 259:0 0 913M 0 part /boot
sdb 8:16 0 250G 0 disk
sr0 11:0 1 4M 0 rom
```
SDB...
```
fdisk /dev/sdb
# Hit n(new), p(primary), Enter, Enter
# w(write to disk and exit)
mkfs.ext4 /dev/sdb1
mkdir /mnt/nvmedisk1
nano /etc/fstab
->
/dev/sdb1 /mnt/nvmedisk1 ext4
systemctl daemon-reload
mount -a
```
### Check via UI ### Check via UI
``` ```

7
mise.toml Normal file
View File

@@ -0,0 +1,7 @@
[tools]
jq = '1.8.1'
k3sup = '0.13.11'
helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"

View File

@@ -0,0 +1,19 @@
NAME: kube-prometheus-stack
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
Get Grafana 'admin' user password by running:
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
Access Grafana local instance:
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
kubectl --namespace monitoring port-forward $POD_NAME 3000
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

37
statefulset/depl.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "longhorn"
resources:
requests:
storage: 1Gi

18
statefulset/svc.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
template:
spec:
nodeSelector:
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node