Compare commits

...

45 Commits

Author SHA1 Message Date
baschno
09026d6812 move test deployment to different justfile 2025-12-29 18:33:46 +01:00
baschno
24991fce90 add setup-cluster-issuer 2025-12-28 17:04:24 +01:00
baschno
65a59d2d0c WIP: cert manager 2025-12-28 16:19:08 +01:00
baschno
85fb620e39 add module traefik 2025-12-28 11:19:30 +01:00
baschno
b56e02d2ed fix formatting 2025-12-28 11:19:12 +01:00
baschno
15cb2ce903 adding test deployment 2025-12-28 11:18:46 +01:00
baschno
b47fe8f66b fix formatting 2025-12-27 20:38:12 +01:00
baschno
c5810661e5 Add support for metallb installation 2025-12-27 20:32:16 +01:00
baschno
7ddc08d622 add local docker registry config 2025-12-27 09:58:15 +01:00
baschno
c5aa7f8105 fix context name parameter 2025-12-26 20:15:41 +01:00
baschno
0c6cfedcde update manual readme 2025-12-22 20:48:17 +01:00
2be83a977a Merge pull request 'just enabled' (#1) from just into master
Reviewed-on: #1
2025-12-22 19:47:19 +00:00
baschno
4f5a18c84c install incl agent ready 2025-12-22 20:41:06 +01:00
baschno
7a54346331 add local container registry 2025-12-22 20:15:48 +01:00
baschno
5abc0de38a add just and mise tool support 2025-12-22 11:21:20 +01:00
baschno
29674ae504 adding vault in dev mode 2025-12-20 11:32:56 +01:00
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
baschno
f58fad216a add prometheus helm 2025-08-20 19:27:05 +02:00
baschno
90e0de0804 add reloader component 2025-08-20 19:27:05 +02:00
baschno
8cb83ffd9c updsate 2025-08-11 20:31:16 +02:00
baschno
cca6f599d5 add statefulset stuff 2025-06-13 21:26:58 +02:00
baschno
506a199c95 longorn other namespace 2025-06-13 21:26:58 +02:00
baschno
d2a16bd55b helm prometheus 2025-06-09 19:24:40 +02:00
baschno
d25c9227c7 longhorn configure additional disk 2025-06-08 23:09:39 +02:00
baschno
45c61d5130 streamlined homepage deployment 2025-05-23 19:46:11 +02:00
baschno
82c19ff12c updating steps for traefik 2025-05-23 19:10:27 +02:00
baschno
9695376a0a adding pihole to homepage 2025-05-19 21:48:21 +02:00
baschno
84fd560675 update docu 2025-05-19 21:47:01 +02:00
baschno
5708f841e7 add linkwarden to homepage 2025-05-19 19:47:34 +02:00
baschno
97ef02c1da adding proxmox widgets 2025-04-27 12:53:14 +02:00
baschno
65e99a9f83 enabling reloader component for homepage 2025-04-27 01:56:36 +02:00
baschno
77ad59eae5 fixing longhorn ui certificate 2025-04-26 23:15:31 +02:00
baschno
a13663754d fix nginx pm icon 2025-04-26 21:34:21 +02:00
baschno
5e30b1e83d adding services to homepage 2025-04-26 21:21:24 +02:00
baschno
5514b5687f longhorn and echopod tests 2025-04-26 19:57:56 +02:00
baschno
a3404bba2b homepage setup without helm 2025-04-26 19:56:50 +02:00
baschno
0e4ddcefdf longhorn nummer 2 2025-04-21 21:18:23 +02:00
baschno
12546a9669 neu ist der mai 2025-04-21 00:21:28 +02:00
baschno
a6ac7b84e4 savegame 2025-04-10 22:56:27 +02:00
58 changed files with 1794 additions and 1973 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env.local

View File

@@ -34,4 +34,30 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```
# Just Setup // K3sup
export SERVER_IP=192.168.178.45
export AGENT_IP=192.168.178.75
export USER=basti
k3sup install \
--cluster \
--ip 192.168.178.45 \
--user $USER \
--merge \
--local-path $HOME/.kube/config \
--context my-k3s
k3sup join \
--ip $AGENT_IP \
--server-ip $SERVER_IP \
--user $USER

148
01_Basic_Setup/justfile Normal file
View File

@@ -0,0 +1,148 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
kubeconfig=""
context=""
if gum confirm "Update KUBECONFIG?"; then
kubeconfig=$(
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
)
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
args=(
"install"
"--context" "${context}"
"--host" "${K8S_MASTER_NODE_NAME}"
"--user" "${username}"
"--no-extras" #
)
if [ -n "${kubeconfig}" ]; then
mkdir -p "$(dirname "${kubeconfig}")"
args+=("--local-path" "${kubeconfig}" "--merge")
fi
echo "Running: k3sup ${args[@]}"
k3sup "${args[@]}"
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
echo "Setting up local Docker registry..."
# Deploy Docker registry to cluster
kubectl apply -f ./registry/registry.yaml
# Set Pod Security Standard for registry namespace
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
# Wait for registry deployment
echo "Waiting for registry to be ready..."
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
# Configure registries.yaml for k3s
just configure-registry
echo "✓ Local Docker registry deployed and configured"
echo ""
echo "Registry accessible at:"
echo " localhost:30500"
echo ""
echo "Usage:"
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
echo " docker build -t localhost:30500/myapp:latest ."
echo " docker push localhost:30500/myapp:latest"
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
fi
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
uninstall:
#!/bin/bash
set -euo pipefail
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
echo "Removing Agent node at ${AGENT_IP}..."
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
fi
echo "Removing content of Server node..."
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
echo "Cleaning up kubeconfig entries..."
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
kubectl config delete-context "${K8S_CONTEXT}"
echo "Deleted context: ${K8S_CONTEXT}"
fi
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
kubectl config delete-cluster "${cluster_name}"
echo "Deleted cluster: ${cluster_name}"
fi
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
kubectl config delete-user "${user_name}"
echo "Deleted user: ${user_name}"
fi
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
else
echo "Uninstallation cancelled." >&2
exit 1
fi
add-agent:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
args=(
"join"
"--ip" "${new_agent_ip}"
"--server-ip" "${SERVER_IP}"
"--user" "${username}"
)
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
echo "Agent node at ${new_agent_ip} added to cluster."
# Configure k3s to use local registry
configure-registry:
#!/bin/bash
set -euo pipefail
echo "Configuring k3s registries.yaml..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
echo "Restarting k3s to apply registry configuration..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
echo "✓ Registry configuration applied"

View File

@@ -0,0 +1,4 @@
configs:
"localhost:30500":
tls:
insecure_skip_verify: true

View File

@@ -0,0 +1,109 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
seccompProfile:
type: RuntimeDefault
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
cpu: 2000m
memory: 20Gi
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
capabilities:
drop:
- ALL
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: registry-data
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
name: http
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: registry-nodeport
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
nodePort: 30500
name: http
type: NodePort

131
08_Vault/README.md Normal file
View File

@@ -0,0 +1,131 @@
# Helm
## Installation
helm repo add hashicorp https://helm.releases.hashicorp.com
helm install vault hashicorp/vault \
--set='server.dev.enabled=true' \
--set='ui.enabled=true' \
--set='ui.serviceType=LoadBalancer' \
--namespace vault \
--create-namespace
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
## Output
```
$ kubectl get all -n vault
NAME READY STATUS RESTARTS AGE
pod/vault-0 1/1 Running 0 2m39s
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
NAME DESIRED CURRENT READY AGE
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
NAME READY AGE
statefulset.apps/vault 1/1 2m40s
```
# Configuration
## Enter Pod
kubectl exec -it vault-0 -n vault -- /bin/sh
## Create policy
```
cat <<EOF > /home/vault/read-policy.hcl
path "secret*" {
capabilities = ["read"]
}
EOF
```
## Apply
```
vault policy write read-policy /home/vault/read-policy.hcl
```
## Enable Kubernetes
```
vault auth enable kubernetes
```
## Configure Kubernetes Auth
Configure to communicate with API server
```
vault write auth/kubernetes/config \
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
```
## Create a Role
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
```
vault write auth/kubernetes/role/vault-role \
bound_service_account_names=vault-serviceaccount \
bound_service_account_namespaces=vault \
policies=read-policy \
ttl=1h
```
# Create Secrets
## Via CLI
```
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
```
## Via UI
Now you can login to vault using the Token method, initially use Token=`root` to login.
# Accessing Secrets in Pods
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
First, lets create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
Apply the above manifest using the below command
```
kubectl apply -f vault-sa.yaml -n vault
```
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
```
kubectl apply -f vault-secret-test-deploy.yaml -n vault
```
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
Use the below command to check the vault secrets from the pod volume
```
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
```

6
08_Vault/vault-sa.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-serviceaccount
labels:
app: read-vault-secret

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-test
labels:
app: read-vault-secret
spec:
selector:
matchLabels:
app: read-vault-secret
replicas: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-status: "update"
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
vault.hashicorp.com/agent-inject-template-login: |
{{- with secret "secret/login" -}}
pattoken={{ .Data.data.pattoken }}
{{- end }}
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
vault.hashicorp.com/agent-inject-template-my-first-secret: |
{{- with secret "secret/my-first-secret" -}}
username={{ .Data.data.username }}
password={{ .Data.data.password }}
{{- end }}
vault.hashicorp.com/role: "vault-role"
labels:
app: read-vault-secret
spec:
serviceAccountName: vault-serviceaccount
containers:
- name: nginx
image: nginx

View File

@@ -0,0 +1,180 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
secrets:
- name: homepage
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
kubernetes.io/service-account.name: homepage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- nodes
verbs:
- get
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- apiGroups:
- traefik.io
resources:
- ingressroutes
verbs:
- get
- list
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
- gateways
verbs:
- get
- list
- apiGroups:
- metrics.k8s.io
resources:
- nodes
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: homepage
subjects:
- kind: ServiceAccount
name: homepage
namespace: homepage
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/search: "true"
secret.reloader.stakater.com/reload: "homepage"
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
serviceAccountName: homepage
automountServiceAccountToken: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
containers:
- name: homepage
image: "ghcr.io/gethomepage/homepage:latest"
imagePullPolicy: Always
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: homepage.k8s.schnrbs.work # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: homepage-config
subPath: custom.js
- mountPath: /app/config/custom.css
name: homepage-config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: homepage-config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: homepage-config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: homepage-config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: homepage-config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: homepage-config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: homepage-config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: homepage-config
configMap:
name: homepage
- name: logs
emptyDir: {}

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- homepage.k8s.internal.schnrbs.work
- homepage.k8s.schnrbs.work

View File

@@ -0,0 +1,24 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
gethomepage.dev/description: Dynamically Detected Homepage
gethomepage.dev/enabled: "true"
gethomepage.dev/group: Cluster Management
gethomepage.dev/icon: homepage.png
gethomepage.dev/name: Homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -0,0 +1,150 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/match: "true"
data:
kubernetes.yaml: |
mode: cluster
settings.yaml: |
background: https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80
cardBlur: xs
providers:
longhorn:
url: https://longhorn-dashboard.k8s.schnrbs.work
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services.yaml: |
- Smart Home:
- Home Assistant:
icon: home-assistant.png
href: https://ha.homeee.schnorbus.net
description: Home Assistant is awesome
widgets:
- type: homeassistant
url: https://ha.homeee.schnorbus.net
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
- Zigbee2MQTT:
icon: zigbee2mqtt.png
href: http://muckibude.fritz.box:8383
description: Zigbee2MQTT is awesome
- Pihole:
icon: pi-hole.png
href: http://pi.hole
description: Pi-hole
widgets:
- type: pihole
url: http://pi.hole
version: 6
key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group:
- Proxmox pve-81:
icon: proxmox.png
href: http://pve-81.fritz.box:8006
description: Homepage is the best
- Proxmox pve-82:
icon: proxmox.png
href: http://pve-82.fritz.box:8006
description: Homepage is the best
- Proxmox pve-83:
icon: proxmox.png
href: https://pve-83.fritz.box:8006
description: Homepage is the best
# widgets:
# - type: proxmox
# url: https://pve-83.fritz.box:8006
# username: homepage_api@pam!homepage_api
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn:
icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning
- Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden:
icon: linkwarden.png
href: https://lw.homeee.schnorbus.net
description: Homepage isssss 😎
widgets:
- type: linkwarden
url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager:
icon: nginx-proxy-manager.png
href: http://192.168.178.42:8181
description: Nginx Proxy Manager is awesome
widgets:
- type: npm
url: http://192.168.178.42:8181
username: bastian@schnorbus.net
password: abcd1234
- Plex:
icon: plex.png
href: http://diskstation.fritz.box:32400/web/index.html#!/
description: Watch movies and TV shows.
server: http://diskstation.fritz.box:32400/web/index.html#!/
container: plex
widgets:
- type: plex
url: http://diskstation.fritz.box:32400
key: aNcUss31qsVsea5bsDf9
widgets.yaml: |
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- longhorn:
# Show the expanded view
expanded: true
# Shows a node representing the aggregate values
total: true
# Shows the node names as labels
labels: true
# Show the nodes
nodes: true
- resources:
backend: resources
expanded: true
cpu: true
memory: true
network: default
- search:
provider: duckduckgo
target: _blank
docker.yaml: ""

View File

@@ -1,6 +1,35 @@
Install via helm:
## Installation
### Install via helm
https://gethomepage.dev/installation/k8s/#install-with-helm
```
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
```
### Install via deployment
```
k create ns homepage
k apply -f 01_homepage-deployment.yaml
```
## Setup Https & Certificate
```
k apply -f 02_homepage-certificate.yaml
k apply -f 03_homepage-ingress-route.yaml
```
## Upload Content
```
k apply -f 04_homepage-configmap.yaml
```
## Test
Open Browser and navigate to:
https://homepage.k8s.schnrbs.work

View File

@@ -1,16 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.internal.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -1,72 +0,0 @@
config:
bookmarks:
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services:
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
widgets:
# show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- pihole:
show: true
url: http://192.168.178.202
key: 1eae9e87f4b4710981639ee591b7d75734811d61697092110cb748c3244e01cc
- fritzbox:
show: true
url: http://192.168.178.1
- search:
provider: duckduckgo
target: _blank
kubernetes:
mode: cluster
settings:
# The service account is necessary to allow discovery of other services
serviceAccount:
create: true
name: homepage
# This enables the service account to access the necessary resources
enableRbac: true
ingress:
main:
enabled: false
annotations:
# Example annotations to add Homepage to your Homepage!
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Homepage"
gethomepage.dev/description: "Dynamically Detected Homepage"
gethomepage.dev/group: "Dynamic"
gethomepage.dev/icon: "homepage.png"
hosts:
- host: homepage.k8s.internal.schnrbs.work
paths:
- path: /
pathType: Prefix

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: longhorn-test-pv
namespace: default
namespace: test
spec:
capacity:
storage: 10Gi # Setze die gewünschte Speichergröße

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-test-pvc
namespace: default
namespace: test
spec:
accessModes:
- ReadWriteOnce

10
12_reloader/README.md Normal file
View File

@@ -0,0 +1,10 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

View File

@@ -0,0 +1,2 @@
https://igeadetokunbo.medium.com/how-to-run-databases-on-kubernetes-an-8-step-guide-b75ce9117600

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "mysql"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.4.0-oraclelinux8
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "your_password"
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: mysql-storage
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: longhorn
hostPath:
path: /mnt/data # Specify a path in the host for storage

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,13 @@
# Headless service
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql

View File

@@ -133,6 +133,151 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:
app: zwavejs2mqtt
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi---
apiVersion: v1
kind: Namespace
metadata:
name: home-assistant
---
apiVersion: v1
kind: Service
metadata:
namespace: home-assistant
name: home-assistant
spec:
selector:
app: home-assistant
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8123
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: home-assistant
name: home-assistant
labels:
app: home-assistant
spec:
replicas: 1
selector:
matchLabels:
app: home-assistant
template:
metadata:
labels:
app: home-assistant
spec:
containers:
- name: bluez
image: ghcr.io/mysticrenji/bluez-service:v1.0.0
securityContext:
privileged: true
- name: home-assistant
image: ghcr.io/mysticrenji/homeassistant-arm64:2023.3.0
resources:
requests:
memory: "256Mi"
limits:
memory: "512Mi"
ports:
- containerPort: 8123
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/configuration.yaml
subPath: configuration.yaml
name: configmap-file
- mountPath: /config/automations.yaml
subPath: automations.yaml
name: configmap-file
- mountPath: /media
name: media-volume
# - mountPath: /run/dbus
# name: d-bus
# readOnly: true
- mountPath: /dev/ttyUSB1
name: zigbee
#- mountPath: /dev/video0
# name: cam
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
hostNetwork: true
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-pvc
- name: media-volume
hostPath:
path: /tmp/media
- name: configmap-file
configMap:
name: home-assistant-configmap
# hostPath:
# path: /tmp/home-assistant
# type: DirectoryOrCreate
# - name: d-bus
# hostPath:
# path: /run/dbus
- name: zigbee
hostPath:
path: /dev/ttyACM0
#- name: cam
# hostPath:
# path: /dev/video0
---
kind: ConfigMap
apiVersion: v1
metadata:
name: home-assistant-configmap
namespace: home-assistant
data:
known_devices.yaml: |
automations.yaml: |
configuration.yaml: |-
default_config:
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 10.10.0.0/16
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: home-assistant-pvc
labels:
app: home-assistant
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,16 @@
Metallb Installation
## Used IP Range
Metallb will advertise IPs of the range:
192.168.178.226-192.168.178.240
First Address x.x.x.226 will be the traefik reverse proxy deployment.
https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
Following https://metallb.universe.tf/installation/ (0.14.3)
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml

View File

@@ -5,4 +5,4 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.178.220-192.168.178.250
- {{ .Env.METALLB_ADDRESS_RANGE }}

66
Metallb_Setup/justfile Normal file
View File

@@ -0,0 +1,66 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
METALLB_VERSION="v0.15.3"
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
context=""
if gum confirm "Update KUBECONFIG?"; then
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
echo "MetalLB ${METALLB_VERSION} installed successfully."
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
echo "Address pool configured."
kubectl apply -f advertisement.yaml
echo "Advertisement created."
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
test-deployment:
#!/bin/bash
set -euo pipefail
just env::check
kubectl apply -f test-deployment.yaml
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
echo "To clean up, run 'just test-deployment-cleanup'."
test-deployment-cleanup:
#!/bin/bash
set -euo pipefail
just env::check
kubectl delete -f test-deployment.yaml
echo "Test deployment and service deleted."

View File

@@ -27,7 +27,8 @@ kubectl expose deploy schnipo --port=80 --target-port=8080 --type=LoadBalancer -
```
#Create deploy
kubectl create deploy nginx --image=nginx
k create ns test
kubectl create deploy nginx --image=nginx -n test
kubectl scale --replicas=3 deployment/nginx -n test

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: schnipo-ingress-certificate
namespace: dishes
spec:
secretName: schnipo-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- schnipo.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: dishes
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dish-schnipo
namespace: dishes
labels:
app: dishes
spec:
replicas: 3
selector:
matchLabels:
app: dishes
template:
metadata:
labels:
app: dishes
spec:
containers:
- name: dish-schnipo
image: bschnorbus/dish-schnipo
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: dish-schnipo
namespace: dishes
spec:
type: ClusterIP
selector:
app: dishes
ports:
- port: 80
targetPort: 8080
protocol: TCP

View File

@@ -0,0 +1,18 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: schnipo-ingress-route
namespace: dishes
spec:
entryPoints:
- websecure
routes:
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: schnipo
port: 80
targetPort: 8080
tls:
secretName: schnipo-certificate-secret

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
spec:
type: LoadBalancer
selector:
app: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@@ -7,10 +7,10 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-test.k8s.internal.schnrbs.work`)
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule
services:
- name: nginx
port: 80
tls:
secretName: nginx-certificate-secret
secretName: nginx-certificate-secret

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: nginx-test.k8s.internal.schnrbs.work
- host: nginx-test.int.schnrbs.work
http:
paths:
- path: /
@@ -19,5 +19,5 @@ spec:
number: 80
tls:
- hosts:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.int.schnrbs.work
secretName: nginx-certificate-secret

View File

@@ -4,14 +4,63 @@
helm repo add traefik https://helm.traefik.io/traefik
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Troubleshooting steps
## Cert-Manager
Cert Manager will be used as it will store certs in a secret, therefore accessible for every pod.
In contrast to this, Traefik stores certs on disk, so a volume would be needed in RWX mode (too much effort).
### Issuer - CA
An issuer is a CA. This can be done with 2 different kinds.
#### Issuer
can be used in the namespace they are created in.
#### Cluster Issuer
can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster.
## Test Deployment
```
k create ns test
kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test
```
## Install Traefik & Cert-Manager
```
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
traefik-dashboard.k8s.schnrbs.work
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply -f cert-manager-issuer-secret.yaml
k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml
```
## Switch Test Deployment to https
```
k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml
```
## Troubleshooting steps
```
k get po -n test -o wide
k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80
@@ -25,41 +74,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test
k get ingress
k get ingress -n test
git staus
git status
git diff
git commit -am "wip thing"
git checkout master
git pull --rebase
git merge wip
git push
git log
git checkout master
cd traefik_lempa
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
cd ..
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
```
```
k get svc ingressRoute
k get svc ingressRoutes
k get svc ingressroutes.traefik.io
k get svc ingressroutes.traefik.io --all-namespaces
k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
cert-manager-values.yaml
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
k get po
alias k=kubectl
k get po
k apply traefik_lempa/cert-manager-issuer-secret.yaml
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret
k get secrets
k get secret -n cert-manager
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
k get clusterissuers.cert-manager.io
k apply -f traefik_lempa/nginx-certificate.yaml
k apply -f traefik_lempa/nginx-ingress.yaml
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
```

View File

@@ -4,7 +4,7 @@ metadata:
name: cloudflare-cluster-issuer
spec:
acme:
email: hello@schnorbus.net
email: {{ .Env.ACME_EMAIL }}
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: cloudflare-acme-key

View File

@@ -5,4 +5,4 @@ metadata:
namespace: cert-manager
type: Opaque
stringData:
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}

62
Traefik/justfile Normal file
View File

@@ -0,0 +1,62 @@
set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repos
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
--install \
--cleanup-on-fail \
--namespace ${CERT_MANAGER_NAMESPACE} \
--create-namespace \
--values cert-manager-values.yaml
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
setup-cluster-issuer:
#!/bin/bash
set -euo pipefail
just env::check
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
# Get status of cert-manager components
status:
#!/bin/bash
set -euo pipefail
echo "=== cert-manager Components Status ==="
echo ""
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
echo ""
echo "Pods:"
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "Services:"
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io

View File

@@ -0,0 +1,15 @@
ports:
web:
redirections:
entryPoint:
to: websecure
scheme: https
logs:
general:
level: DEBUG
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)

View File

@@ -1,10 +0,0 @@
ports:
web:
redirectTo:
port: websecure
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.redacted`)

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: echopod-ingress-certificate
namespace: default
spec:
secretName: echopod-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- echopod.k8s.schnrbs.work

View File

@@ -30,10 +30,30 @@ kind: Service
metadata:
name: echopod-service
spec:
type: NodePort # Change to LoadBalancer if using a cloud provider
type: LoadBalancer # Change to LoadBalancer if using a cloud provider
# type: NodePort # Change to LoadBalancer if using a cloud provider
ports:
- port: 80
targetPort: 80
nodePort: 30080 # Port to expose on the node
# nodePort: 30080 # Port to expose on the node
selector:
app: echopod
app: echopod
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: echopod-ingress-route
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`echopod.k8s.schnrbs.work`)
kind: Rule
services:
- name: echopod-service
port: 80
tls:
secretName: echopod-certificate-secret

9
env/env.local.gomplate vendored Normal file
View File

@@ -0,0 +1,9 @@
# shellcheck disable=all
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}

125
env/justfile vendored Normal file
View File

@@ -0,0 +1,125 @@
set fallback := true
export ENV_FILE := ".env.local"
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export SERVER_IP := env("SERVER_IP", "")
export AGENT_IP := env("AGENT_IP", "")
check:
#!/bin/bash
set -euo pipefail
if [ -z "${K8S_CONTEXT}" ]; then
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${SERVER_IP}" ]; then
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${AGENT_IP}" ]; then
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
setup:
#!/bin/bash
set -euo pipefail
if [ -f ../.env.local ]; then
echo ".env.local already exists." >&2
if gum confirm "Do you want to overwrite it?"; then
K8S_CONTEXT=""
SERVER_IP=""
AGENT_IP=""
elif [[ $? -eq 130 ]]; then
echo "Setup cancelled by user." >&2
exit 1
else
echo "Aborting setup." >&2
exit 1
fi
fi
while [ -z "${K8S_CONTEXT}" ]; do
if ! K8S_CONTEXT=$(
gum input --prompt="Context name: " \
--width=100 --placeholder="context"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
if ! K8S_MASTER_NODE_NAME=$(
gum input --prompt="Master Node Hostname: " \
--width=100 --placeholder="Master Node Name"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${SERVER_IP}" ]; do
if ! SERVER_IP=$(
gum input --prompt="IP of Server/Master Node: " \
--width=100 --placeholder="Master Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AGENT_IP}" ]; do
if ! AGENT_IP=$(
gum input --prompt="IP of Agent Node: " \
--width=100 --placeholder="Agent Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
if ! METALLB_ADDRESS_RANGE=$(
gum input --prompt="IP Range for LoadBalancer: " \
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
if ! CLOUDFLARE_API_TOKEN=$(
gum input --prompt="Cloudflare API Token: " \
--width=100 --placeholder="API Token" --password
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${ACME_EMAIL}" ]; do
if ! ACME_EMAIL=$(
gum input --prompt="ACME Email for Cert-Manager: " \
--width=100 --placeholder="Email"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${EXTERNAL_DOMAIN}" ]; do
if ! EXTERNAL_DOMAIN=$(
gum input --prompt="External Domain: " \
--width=100 --placeholder="Domain"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

17
gitops/README.md Normal file
View File

@@ -0,0 +1,17 @@
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."

12
justfile Normal file
View File

@@ -0,0 +1,12 @@
set dotenv-filename := ".env.local"
export PATH := "./node_modules/.bin:" + env_var('PATH')
[private]
default:
@just --list --unsorted --list-submodules
mod env
mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik

View File

@@ -5,15 +5,66 @@ Hier sind die Schritte, um ein Persistent Volume für Longhorn zu erstellen:
### 1. Stelle sicher, dass Longhorn installiert ist
Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert ist. Falls Longhorn noch nicht installiert ist, kannst du es mit Helm oder direkt aus den YAML-Dateien installieren.
#### Node Labeling
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm:
```bash
helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
```
#### Mit kubectl:
```bash
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.2/deploy/install.yaml
#### Adding additional disks
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
Add disk in Proxmox, which appears as:
Run in worker node:
```
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 29G 0 part /
├─sda14 8:14 0 4M 0 part
├─sda15 8:15 0 106M 0 part /boot/efi
└─sda16 259:0 0 913M 0 part /boot
sdb 8:16 0 250G 0 disk
sr0 11:0 1 4M 0 rom
```
SDB...
```
fdisk /dev/sdb
# Hit n(new), p(primary), Enter, Enter
# w(write to disk and exit)
mkfs.ext4 /dev/sdb1
mkdir /mnt/nvmedisk1
nano /etc/fstab
->
/dev/sdb1 /mnt/nvmedisk1 ext4
systemctl daemon-reload
mount -a
```
### Check via UI
```
k port-forward svc/longhorn-frontend 8000:80 -n longhorn-system
```
### 2. Erstelle ein PersistentVolume (PV) und ein PersistentVolumeClaim (PVC)

View File

@@ -7,7 +7,7 @@ metadata:
spec:
secretName: longhorn-web-ui-tls
dnsNames:
- longhorn.k8s.internal.schnrbs.work
- longhorn-dashboard.k8s.schnrbs.work
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer

View File

@@ -1,18 +1,16 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: longhorn-web-ui
name: longhorn-ingress-route
namespace: longhorn-system
spec:
properties:
entrypoints:
- websecure
entryPoints:
- websecure
routes:
- match: Host(`longhorn.k8s.internal.schnrbs.work`)
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
kind: Rule
services:
- name: longhorn-frontend
port: 80
tls:
secretName: longhorn-web-ui-tls
tls:
secretName: longhorn-web-ui-tls

View File

@@ -1,2 +1,36 @@
global:
nodeSelector:
node.longhorn.io/create-default-disk: "true"
service:
ui:
type: NodePort
nodePort: 30050
manager:
type: ClusterIP
# Replica count for the default Longhorn StorageClass.
persistence:
defaultClass: false
defaultFsType: ext4
defaultClassReplicaCount: 2
reclaimPolicy: Delete
# Replica counts for CSI Attacher, Provisioner, Resizer, Snapshotter
csi:
attacherReplicaCount: 2
provisionerReplicaCount: 2
resizerReplicaCount: 2
snapshotterReplicaCount: 2
# Default replica count and storage path
defaultSettings:
upgradeChecker: false
kubernetesClusterAutoscalerEnabled: false
allowCollectingLonghornUsageMetrics: false
createDefaultDiskLabeledNodes: true
defaultReplicaCount: 2
defaultDataPath: "/k8s-data"
longhornUI:
replicas: 1

7
mise.toml Normal file
View File

@@ -0,0 +1,7 @@
[tools]
jq = '1.8.1'
k3sup = '0.13.11'
helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"

View File

@@ -11,7 +11,10 @@ Use for `helm` values:
https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
```
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --create-namespace --namespace monitoring --values kube-prometheus-stack-values.yaml
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--create-namespace \
--namespace monitoring \
--values kube-prometheus-stack-values.yaml
```
Accessing UIs via PortForwarding

View File

@@ -0,0 +1,19 @@
NAME: kube-prometheus-stack
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
Get Grafana 'admin' user password by running:
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
Access Grafana local instance:
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
kubectl --namespace monitoring port-forward $POD_NAME 3000
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

37
statefulset/depl.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "longhorn"
resources:
requests:
storage: 1Gi

18
statefulset/svc.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
template:
spec:
nodeSelector:
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node