Compare commits

...

18 Commits

Author SHA1 Message Date
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
baschno
f58fad216a add prometheus helm 2025-08-20 19:27:05 +02:00
baschno
90e0de0804 add reloader component 2025-08-20 19:27:05 +02:00
baschno
8cb83ffd9c updsate 2025-08-11 20:31:16 +02:00
baschno
cca6f599d5 add statefulset stuff 2025-06-13 21:26:58 +02:00
baschno
506a199c95 longorn other namespace 2025-06-13 21:26:58 +02:00
baschno
d2a16bd55b helm prometheus 2025-06-09 19:24:40 +02:00
baschno
d25c9227c7 longhorn configure additional disk 2025-06-08 23:09:39 +02:00
baschno
45c61d5130 streamlined homepage deployment 2025-05-23 19:46:11 +02:00
baschno
82c19ff12c updating steps for traefik 2025-05-23 19:10:27 +02:00
baschno
9695376a0a adding pihole to homepage 2025-05-19 21:48:21 +02:00
baschno
84fd560675 update docu 2025-05-19 21:47:01 +02:00
18 changed files with 269 additions and 30 deletions

View File

@@ -34,4 +34,9 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```

View File

@@ -35,8 +35,25 @@ data:
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
- Zigbee2MQTT: - Zigbee2MQTT:
icon: zigbee2mqtt.png icon: zigbee2mqtt.png
href: http://muckibude.fritz.box:8383/#/ href: http://muckibude.fritz.box:8383
description: Zigbee2MQTT is awesome description: Zigbee2MQTT is awesome
- Pihole:
icon: pi-hole.png
href: http://pi.hole
description: Pi-hole
widgets:
- type: pihole
url: http://pi.hole
version: 6
key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group: - My Second Group:
- Proxmox pve-81: - Proxmox pve-81:
@@ -51,20 +68,33 @@ data:
icon: proxmox.png icon: proxmox.png
href: https://pve-83.fritz.box:8006 href: https://pve-83.fritz.box:8006
description: Homepage is the best description: Homepage is the best
widgets: # widgets:
- type: proxmox # - type: proxmox
url: https://pve-83.fritz.box:8006 # url: https://pve-83.fritz.box:8006
username: homepage_api@pam!homepage_api # username: homepage_api@pam!homepage_api
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871 # password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn:
icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning
- Party Time: - Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden: - Linkwarden:
icon: linkwarden.png icon: linkwarden.png
href: https://lw.homeee.schnorbus.net href: https://lw.homeee.schnorbus.net
description: Homepage is 😎 description: Homepage isssss 😎
widgets: widgets:
- type: linkwarden - type: linkwarden
url: https://lw.homeee.schnorbus.net url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager: - Nginx Proxy Manager:
icon: nginx-proxy-manager.png icon: nginx-proxy-manager.png

View File

@@ -1,6 +1,35 @@
Install via helm: ## Installation
### Install via helm
https://gethomepage.dev/installation/k8s/#install-with-helm https://gethomepage.dev/installation/k8s/#install-with-helm
```
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
```
### Install via deployment
```
k create ns homepage
k apply -f 01_homepage-deployment.yaml
```
## Setup Https & Certificate
```
k apply -f 02_homepage-certificate.yaml
k apply -f 03_homepage-ingress-route.yaml
```
## Upload Content
```
k apply -f 04_homepage-configmap.yaml
```
## Test
Open Browser and navigate to:
https://homepage.k8s.schnrbs.work

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolume kind: PersistentVolume
metadata: metadata:
name: longhorn-test-pv name: longhorn-test-pv
namespace: default namespace: test
spec: spec:
capacity: capacity:
storage: 10Gi # Setze die gewünschte Speichergröße storage: 10Gi # Setze die gewünschte Speichergröße

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: longhorn-test-pvc name: longhorn-test-pvc
namespace: default namespace: test
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce

10
12_reloader/README.md Normal file
View File

@@ -0,0 +1,10 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

View File

@@ -6,4 +6,4 @@ metadata:
spec: spec:
addresses: addresses:
# - 192.168.178.220-192.168.178.225 #pve-82 # - 192.168.178.220-192.168.178.225 #pve-82
- 192.168.178.226-192.168.178.240 #pve-83 - 192.168.178.160-192.168.178.180 #pve-83

View File

@@ -1,5 +1,13 @@
Metallb Installation Metallb Installation
## Used IP Range
Metallb will advertise IPs of the range:
192.168.178.226-192.168.178.240
First Address x.x.x.226 will be the traefik reverse proxy deployment.
https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/ https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
Following https://metallb.universe.tf/installation/ (0.14.3) Following https://metallb.universe.tf/installation/ (0.14.3)

View File

@@ -23,11 +23,40 @@ can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster. i.e. general issuer for all namespaces in cluster.
## Troubleshooting steps ## Test Deployment
k create ns test
kubectl create deploy nginx --image=nginx -n test kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test k scale --replicas=3 deployment/nginx -n test
## Install Traefik & Cert-Manager
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
traefik-dashboard.k8s.schnrbs.work
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply cert-manager-issuer-secret.yaml
k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml
## Switch Test Deployment to https
k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml
## Troubleshooting steps
k get po -n test -o wide k get po -n test -o wide
k create svc -n test clusterip nginx k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
@@ -41,28 +70,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test k get svc -n test
k get ingress k get ingress
k get ingress -n test k get ingress -n test
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
k get svc ingressRoute k get svc ingressRoute
k get svc ingressRoutes k get svc ingressRoutes
k get svc ingressroutes.traefik.io k get svc ingressroutes.traefik.io
k get ingressroutes.traefik.io --all-namespaces k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit cert-manager-values.yaml
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64 echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
k get po k get po
alias k=kubectl alias k=kubectl
k get po k get po
k apply traefik_lempa/cert-manager-issuer-secret.yaml
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret k get secret
k get secrets k get secrets
k get secret -n cert-manager
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
k get clusterissuers.cert-manager.io k get clusterissuers.cert-manager.io
k apply -f traefik_lempa/nginx-certificate.yaml
k apply -f traefik_lempa/nginx-ingress.yaml
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml

16
gitops/README.md Normal file
View File

@@ -0,0 +1,16 @@
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/

View File

@@ -8,11 +8,12 @@ Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert i
#### Node Labeling #### Node Labeling
In the case not all nodes should provide disk In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
``` ```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
``` ```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm: #### Mit Helm:
@@ -21,6 +22,45 @@ helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
``` ```
#### Adding additional disks
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
Add disk in Proxmox, which appears as:
Run in worker node:
```
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 29G 0 part /
├─sda14 8:14 0 4M 0 part
├─sda15 8:15 0 106M 0 part /boot/efi
└─sda16 259:0 0 913M 0 part /boot
sdb 8:16 0 250G 0 disk
sr0 11:0 1 4M 0 rom
```
SDB...
```
fdisk /dev/sdb
# Hit n(new), p(primary), Enter, Enter
# w(write to disk and exit)
mkfs.ext4 /dev/sdb1
mkdir /mnt/nvmedisk1
nano /etc/fstab
->
/dev/sdb1 /mnt/nvmedisk1 ext4
systemctl daemon-reload
mount -a
```
### Check via UI ### Check via UI
``` ```

View File

@@ -11,7 +11,10 @@ Use for `helm` values:
https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
``` ```
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --create-namespace --namespace monitoring --values kube-prometheus-stack-values.yaml helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--create-namespace \
--namespace monitoring \
--values kube-prometheus-stack-values.yaml
``` ```
Accessing UIs via PortForwarding Accessing UIs via PortForwarding

View File

@@ -0,0 +1,19 @@
NAME: kube-prometheus-stack
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
Get Grafana 'admin' user password by running:
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
Access Grafana local instance:
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
kubectl --namespace monitoring port-forward $POD_NAME 3000
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

37
statefulset/depl.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "longhorn"
resources:
requests:
storage: 1Gi

18
statefulset/svc.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
template:
spec:
nodeSelector:
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node