Compare commits

11 Commits

Author SHA1 Message Date
baschno
4f5a18c84c install incl agent ready 2025-12-22 20:41:06 +01:00
baschno
7a54346331 add local container registry 2025-12-22 20:15:48 +01:00
baschno
5abc0de38a add just and mise tool support 2025-12-22 11:21:20 +01:00
baschno
29674ae504 adding vault in dev mode 2025-12-20 11:32:56 +01:00
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
17 changed files with 594 additions and 9 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env.local

View File

@@ -34,4 +34,30 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```
# Just Setup // K3sup
export SERVER_IP=192.168.178.45
export AGENT_IP=192.168.178.75
export USER=basti
k3sup install \
--cluster \
--ip 192.168.178.45 \
--user $USER \
--merge \
--local-path $HOME/.kube/config \
--context my-k3s
k3sup join \
--ip $AGENT_IP \
--server-ip $SERVER_IP \
--user $USER

135
01_Basic_Setup/justfile Normal file
View File

@@ -0,0 +1,135 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "false")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
kubeconfig=""
context=""
if gum confirm "Update KUBECONFIG?"; then
kubeconfig=$(
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
)
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
args=(
"install"
"--context" "${K8S_CONTEXT}"
"--host" "${K8S_MASTER_NODE_NAME}"
"--user" "${username}"
)
if [ -n "${kubeconfig}" ]; then
mkdir -p "$(dirname "${kubeconfig}")"
args+=("--local-path" "${kubeconfig}" "--merge")
fi
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
echo "Setting up local Docker registry..."
# Deploy Docker registry to cluster
kubectl apply -f ./registry/registry.yaml
# Set Pod Security Standard for registry namespace
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
# Wait for registry deployment
echo "Waiting for registry to be ready..."
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
# Configure registries.yaml for k3s
just configure-registry
echo "✓ Local Docker registry deployed and configured"
echo ""
echo "Registry accessible at:"
echo " localhost:30500"
echo ""
echo "Usage:"
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
echo " docker build -t localhost:30500/myapp:latest ."
echo " docker push localhost:30500/myapp:latest"
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
fi
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
uninstall:
#!/bin/bash
set -euo pipefail
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
echo "Removing Agent node at ${AGENT_IP}..."
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
fi
echo "Removing content of Server node..."
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
echo "Cleaning up kubeconfig entries..."
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
kubectl config delete-context "${K8S_CONTEXT}"
echo "Deleted context: ${K8S_CONTEXT}"
fi
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
kubectl config delete-cluster "${cluster_name}"
echo "Deleted cluster: ${cluster_name}"
fi
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
kubectl config delete-user "${user_name}"
echo "Deleted user: ${user_name}"
fi
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
else
echo "Uninstallation cancelled." >&2
exit 1
fi
add-agent:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
args=(
"join"
"--ip" "${new_agent_ip}"
"--server-ip" "${SERVER_IP}"
"--user" "${username}"
)
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
echo "Agent node at ${new_agent_ip} added to cluster."

View File

@@ -0,0 +1,4 @@
configs:
"localhost:30500":
tls:
insecure_skip_verify: true

View File

@@ -0,0 +1,109 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
seccompProfile:
type: RuntimeDefault
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
cpu: 2000m
memory: 20Gi
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
capabilities:
drop:
- ALL
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: registry-data
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
name: http
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: registry-nodeport
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
nodePort: 30500
name: http
type: NodePort

131
08_Vault/README.md Normal file
View File

@@ -0,0 +1,131 @@
# Helm
## Installation
helm repo add hashicorp https://helm.releases.hashicorp.com
helm install vault hashicorp/vault \
--set='server.dev.enabled=true' \
--set='ui.enabled=true' \
--set='ui.serviceType=LoadBalancer' \
--namespace vault \
--create-namespace
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
## Output
```
$ kubectl get all -n vault
NAME READY STATUS RESTARTS AGE
pod/vault-0 1/1 Running 0 2m39s
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
NAME DESIRED CURRENT READY AGE
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
NAME READY AGE
statefulset.apps/vault 1/1 2m40s
```
# Configuration
## Enter Pod
kubectl exec -it vault-0 -n vault -- /bin/sh
## Create policy
```
cat <<EOF > /home/vault/read-policy.hcl
path "secret*" {
capabilities = ["read"]
}
EOF
```
## Apply
```
vault policy write read-policy /home/vault/read-policy.hcl
```
## Enable Kubernetes
```
vault auth enable kubernetes
```
## Configure Kubernetes Auth
Configure to communicate with API server
```
vault write auth/kubernetes/config \
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
```
## Create a Role
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
```
vault write auth/kubernetes/role/vault-role \
bound_service_account_names=vault-serviceaccount \
bound_service_account_namespaces=vault \
policies=read-policy \
ttl=1h
```
# Create Secrets
## Via CLI
```
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
```
## Via UI
Now you can login to vault using the Token method, initially use Token=`root` to login.
# Accessing Secrets in Pods
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
First, lets create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
Apply the above manifest using the below command
```
kubectl apply -f vault-sa.yaml -n vault
```
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
```
kubectl apply -f vault-secret-test-deploy.yaml -n vault
```
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```

6
08_Vault/vault-sa.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-serviceaccount
labels:
app: read-vault-secret

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-test
labels:
app: read-vault-secret
spec:
selector:
matchLabels:
app: read-vault-secret
replicas: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-status: "update"
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
vault.hashicorp.com/agent-inject-template-login: |
{{- with secret "secret/login" -}}
pattoken={{ .Data.data.pattoken }}
{{- end }}
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
vault.hashicorp.com/agent-inject-template-my-first-secret: |
{{- with secret "secret/my-first-secret" -}}
username={{ .Data.data.username }}
password={{ .Data.data.password }}
{{- end }}
vault.hashicorp.com/role: "vault-role"
labels:
app: read-vault-secret
spec:
serviceAccountName: vault-serviceaccount
containers:
- name: nginx
image: nginx

View File

@@ -46,6 +46,13 @@ data:
url: http://pi.hole url: http://pi.hole
version: 6 version: 6
key: 5ipI9bvB key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group: - My Second Group:
@@ -61,24 +68,33 @@ data:
icon: proxmox.png icon: proxmox.png
href: https://pve-83.fritz.box:8006 href: https://pve-83.fritz.box:8006
description: Homepage is the best description: Homepage is the best
widgets: # widgets:
- type: proxmox # - type: proxmox
url: https://pve-83.fritz.box:8006 # url: https://pve-83.fritz.box:8006
username: homepage_api@pam!homepage_api # username: homepage_api@pam!homepage_api
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871 # password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn: - Longhorn:
icon: longhorn.png icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning description: Longhorn volume provisioning
- Party Time: - Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden: - Linkwarden:
icon: linkwarden.png icon: linkwarden.png
href: https://lw.homeee.schnorbus.net href: https://lw.homeee.schnorbus.net
description: Homepage isssss 😎 description: Homepage isssss 😎
widgets: widgets:
- type: linkwarden - type: linkwarden
url: https://lw.homeee.schnorbus.net url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager: - Nginx Proxy Manager:
icon: nginx-proxy-manager.png icon: nginx-proxy-manager.png

View File

@@ -2,3 +2,9 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

View File

@@ -6,4 +6,4 @@ metadata:
spec: spec:
addresses: addresses:
# - 192.168.178.220-192.168.178.225 #pve-82 # - 192.168.178.220-192.168.178.225 #pve-82
- 192.168.178.226-192.168.178.240 #pve-83 - 192.168.178.160-192.168.178.180 #pve-83

5
env/env.local.gomplate vendored Normal file
View File

@@ -0,0 +1,5 @@
# shellcheck disable=all
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}

83
env/justfile vendored Normal file
View File

@@ -0,0 +1,83 @@
set fallback := true
export ENV_FILE := ".env.local"
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export SERVER_IP := env("SERVER_IP", "")
export AGENT_IP := env("AGENT_IP", "")
check:
#!/bin/bash
set -euo pipefail
if [ -z "${K8S_CONTEXT}" ]; then
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${SERVER_IP}" ]; then
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${AGENT_IP}" ]; then
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
setup:
#!/bin/bash
set -euo pipefail
if [ -f ../.env.local ]; then
echo ".env.local already exists." >&2
if gum confirm "Do you want to overwrite it?"; then
K8S_CONTEXT=""
SERVER_IP=""
AGENT_IP=""
elif [[ $? -eq 130 ]]; then
echo "Setup cancelled by user." >&2
exit 1
else
echo "Aborting setup." >&2
exit 1
fi
fi
while [ -z "${K8S_CONTEXT}" ]; do
if ! K8S_CONTEXT=$(
gum input --prompt="Context name: " \
--width=100 --placeholder="context"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
if ! K8S_MASTER_NODE_NAME=$(
gum input --prompt="Master Node Hostname: " \
--width=100 --placeholder="Master Node Name"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${SERVER_IP}" ]; do
if ! SERVER_IP=$(
gum input --prompt="IP of Server/Master Node: " \
--width=100 --placeholder="Master Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AGENT_IP}" ]; do
if ! AGENT_IP=$(
gum input --prompt="IP of Agent Node: " \
--width=100 --placeholder="Agent Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

View File

@@ -2,5 +2,15 @@ https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/ https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/ https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/

10
justfile Normal file
View File

@@ -0,0 +1,10 @@
set dotenv-filename := ".env.local"
export PATH := "./node_modules/.bin:" + env_var('PATH')
[private]
default:
@just --list --unsorted --list-submodules
mod env
mod BasicSetup '01_Basic_Setup'

View File

@@ -8,11 +8,12 @@ Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert i
#### Node Labeling #### Node Labeling
In the case not all nodes should provide disk In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
``` ```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
``` ```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm: #### Mit Helm:

7
mise.toml Normal file
View File

@@ -0,0 +1,7 @@
[tools]
jq = '1.8.1'
k3sup = '0.13.11'
helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"