Compare commits

..

29 Commits

Author SHA1 Message Date
baschno
6abe5d1a8f optiona 2025-11-22 19:39:35 +01:00
baschno
67a6c414f2 updating ip range 2025-11-22 19:39:26 +01:00
baschno
08212c26a6 taint 2025-11-22 09:33:41 +01:00
baschno
e4adbfd0b2 add few links 2025-08-31 17:16:55 +02:00
baschno
d7db562a23 helm and flux 2025-08-22 18:10:24 +02:00
baschno
7896130d05 longhorn nodeselector doku 2025-08-21 21:07:31 +02:00
baschno
efcb4ee172 . 2025-08-20 21:50:18 +02:00
baschno
f58fad216a add prometheus helm 2025-08-20 19:27:05 +02:00
baschno
90e0de0804 add reloader component 2025-08-20 19:27:05 +02:00
baschno
8cb83ffd9c updsate 2025-08-11 20:31:16 +02:00
baschno
cca6f599d5 add statefulset stuff 2025-06-13 21:26:58 +02:00
baschno
506a199c95 longorn other namespace 2025-06-13 21:26:58 +02:00
baschno
d2a16bd55b helm prometheus 2025-06-09 19:24:40 +02:00
baschno
d25c9227c7 longhorn configure additional disk 2025-06-08 23:09:39 +02:00
baschno
45c61d5130 streamlined homepage deployment 2025-05-23 19:46:11 +02:00
baschno
82c19ff12c updating steps for traefik 2025-05-23 19:10:27 +02:00
baschno
9695376a0a adding pihole to homepage 2025-05-19 21:48:21 +02:00
baschno
84fd560675 update docu 2025-05-19 21:47:01 +02:00
baschno
5708f841e7 add linkwarden to homepage 2025-05-19 19:47:34 +02:00
baschno
97ef02c1da adding proxmox widgets 2025-04-27 12:53:14 +02:00
baschno
65e99a9f83 enabling reloader component for homepage 2025-04-27 01:56:36 +02:00
baschno
77ad59eae5 fixing longhorn ui certificate 2025-04-26 23:15:31 +02:00
baschno
a13663754d fix nginx pm icon 2025-04-26 21:34:21 +02:00
baschno
5e30b1e83d adding services to homepage 2025-04-26 21:21:24 +02:00
baschno
5514b5687f longhorn and echopod tests 2025-04-26 19:57:56 +02:00
baschno
a3404bba2b homepage setup without helm 2025-04-26 19:56:50 +02:00
baschno
0e4ddcefdf longhorn nummer 2 2025-04-21 21:18:23 +02:00
baschno
12546a9669 neu ist der mai 2025-04-21 00:21:28 +02:00
baschno
a6ac7b84e4 savegame 2025-04-10 22:56:27 +02:00
38 changed files with 956 additions and 147 deletions

View File

@@ -34,4 +34,9 @@ Rancher Installation
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# Prevent scheduling on master (optional)
```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
```

View File

@@ -0,0 +1,180 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
secrets:
- name: homepage
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
kubernetes.io/service-account.name: homepage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- nodes
verbs:
- get
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- apiGroups:
- traefik.io
resources:
- ingressroutes
verbs:
- get
- list
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
- gateways
verbs:
- get
- list
- apiGroups:
- metrics.k8s.io
resources:
- nodes
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: homepage
subjects:
- kind: ServiceAccount
name: homepage
namespace: homepage
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/search: "true"
secret.reloader.stakater.com/reload: "homepage"
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
serviceAccountName: homepage
automountServiceAccountToken: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
containers:
- name: homepage
image: "ghcr.io/gethomepage/homepage:latest"
imagePullPolicy: Always
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: homepage.k8s.schnrbs.work # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: homepage-config
subPath: custom.js
- mountPath: /app/config/custom.css
name: homepage-config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: homepage-config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: homepage-config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: homepage-config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: homepage-config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: homepage-config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: homepage-config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: homepage-config
configMap:
name: homepage
- name: logs
emptyDir: {}

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- homepage.k8s.internal.schnrbs.work
- homepage.k8s.schnrbs.work

View File

@@ -0,0 +1,24 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
gethomepage.dev/description: Dynamically Detected Homepage
gethomepage.dev/enabled: "true"
gethomepage.dev/group: Cluster Management
gethomepage.dev/icon: homepage.png
gethomepage.dev/name: Homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -0,0 +1,150 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/match: "true"
data:
kubernetes.yaml: |
mode: cluster
settings.yaml: |
background: https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80
cardBlur: xs
providers:
longhorn:
url: https://longhorn-dashboard.k8s.schnrbs.work
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services.yaml: |
- Smart Home:
- Home Assistant:
icon: home-assistant.png
href: https://ha.homeee.schnorbus.net
description: Home Assistant is awesome
widgets:
- type: homeassistant
url: https://ha.homeee.schnorbus.net
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
- Zigbee2MQTT:
icon: zigbee2mqtt.png
href: http://muckibude.fritz.box:8383
description: Zigbee2MQTT is awesome
- Pihole:
icon: pi-hole.png
href: http://pi.hole
description: Pi-hole
widgets:
- type: pihole
url: http://pi.hole
version: 6
key: 5ipI9bvB
- Paperless NGX:
icon: paperless-ng.png
href: https://ppl.homeee.schnorbus.net
widgets:
- type: paperlessngx
url: https://ppl.homeee.schnorbus.net
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- My Second Group:
- Proxmox pve-81:
icon: proxmox.png
href: http://pve-81.fritz.box:8006
description: Homepage is the best
- Proxmox pve-82:
icon: proxmox.png
href: http://pve-82.fritz.box:8006
description: Homepage is the best
- Proxmox pve-83:
icon: proxmox.png
href: https://pve-83.fritz.box:8006
description: Homepage is the best
# widgets:
# - type: proxmox
# url: https://pve-83.fritz.box:8006
# username: homepage_api@pam!homepage_api
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
- Longhorn:
icon: longhorn.png
href: https://longhorn-dashboard.k8s.schnrbs.work
description: Longhorn volume provisioning
- Party Time:
- Immich:
icon: immich.png
href: https://immich.homeee.schnorbus.net
description: Immich is awesome
widgets:
- type: immich
url: https://immich.homeee.schnorbus.net
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
version: 2
- Linkwarden:
icon: linkwarden.png
href: https://lw.homeee.schnorbus.net
description: Homepage isssss 😎
widgets:
- type: linkwarden
url: http://docker-host-02.fritz.box:9595
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
- Nginx Proxy Manager:
icon: nginx-proxy-manager.png
href: http://192.168.178.42:8181
description: Nginx Proxy Manager is awesome
widgets:
- type: npm
url: http://192.168.178.42:8181
username: bastian@schnorbus.net
password: abcd1234
- Plex:
icon: plex.png
href: http://diskstation.fritz.box:32400/web/index.html#!/
description: Watch movies and TV shows.
server: http://diskstation.fritz.box:32400/web/index.html#!/
container: plex
widgets:
- type: plex
url: http://diskstation.fritz.box:32400
key: aNcUss31qsVsea5bsDf9
widgets.yaml: |
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- longhorn:
# Show the expanded view
expanded: true
# Shows a node representing the aggregate values
total: true
# Shows the node names as labels
labels: true
# Show the nodes
nodes: true
- resources:
backend: resources
expanded: true
cpu: true
memory: true
network: default
- search:
provider: duckduckgo
target: _blank
docker.yaml: ""

View File

@@ -1,6 +1,35 @@
Install via helm:
## Installation
### Install via helm
https://gethomepage.dev/installation/k8s/#install-with-helm
```
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
```
### Install via deployment
```
k create ns homepage
k apply -f 01_homepage-deployment.yaml
```
## Setup Https & Certificate
```
k apply -f 02_homepage-certificate.yaml
k apply -f 03_homepage-ingress-route.yaml
```
## Upload Content
```
k apply -f 04_homepage-configmap.yaml
```
## Test
Open Browser and navigate to:
https://homepage.k8s.schnrbs.work

View File

@@ -1,16 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.internal.schnrbs.work`)
kind: Rule
services:
- name: homepage
port: 3000
tls:
secretName: homepage-certificate-secret

View File

@@ -1,72 +0,0 @@
config:
bookmarks:
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services:
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
widgets:
# show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- pihole:
show: true
url: http://192.168.178.202
key: 1eae9e87f4b4710981639ee591b7d75734811d61697092110cb748c3244e01cc
- fritzbox:
show: true
url: http://192.168.178.1
- search:
provider: duckduckgo
target: _blank
kubernetes:
mode: cluster
settings:
# The service account is necessary to allow discovery of other services
serviceAccount:
create: true
name: homepage
# This enables the service account to access the necessary resources
enableRbac: true
ingress:
main:
enabled: false
annotations:
# Example annotations to add Homepage to your Homepage!
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Homepage"
gethomepage.dev/description: "Dynamically Detected Homepage"
gethomepage.dev/group: "Dynamic"
gethomepage.dev/icon: "homepage.png"
hosts:
- host: homepage.k8s.internal.schnrbs.work
paths:
- path: /
pathType: Prefix

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: longhorn-test-pv
namespace: default
namespace: test
spec:
capacity:
storage: 10Gi # Setze die gewünschte Speichergröße

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-test-pvc
namespace: default
namespace: test
spec:
accessModes:
- ReadWriteOnce

10
12_reloader/README.md Normal file
View File

@@ -0,0 +1,10 @@
helm install reloader stakater/reloader --namespace reloader --create-namespace
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
flux create helmrelease my-reloader --chart stakater/reloader \
--source HelmRepository/stakater \
--chart-version 2.1.3 \
--namespace reloader

View File

@@ -0,0 +1,2 @@
https://igeadetokunbo.medium.com/how-to-run-databases-on-kubernetes-an-8-step-guide-b75ce9117600

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "mysql"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.4.0-oraclelinux8
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "your_password"
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: mysql-storage
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: longhorn
hostPath:
path: /mnt/data # Specify a path in the host for storage

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,13 @@
# Headless service
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql

View File

@@ -133,6 +133,151 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:
app: zwavejs2mqtt
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi---
apiVersion: v1
kind: Namespace
metadata:
name: home-assistant
---
apiVersion: v1
kind: Service
metadata:
namespace: home-assistant
name: home-assistant
spec:
selector:
app: home-assistant
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8123
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: home-assistant
name: home-assistant
labels:
app: home-assistant
spec:
replicas: 1
selector:
matchLabels:
app: home-assistant
template:
metadata:
labels:
app: home-assistant
spec:
containers:
- name: bluez
image: ghcr.io/mysticrenji/bluez-service:v1.0.0
securityContext:
privileged: true
- name: home-assistant
image: ghcr.io/mysticrenji/homeassistant-arm64:2023.3.0
resources:
requests:
memory: "256Mi"
limits:
memory: "512Mi"
ports:
- containerPort: 8123
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/configuration.yaml
subPath: configuration.yaml
name: configmap-file
- mountPath: /config/automations.yaml
subPath: automations.yaml
name: configmap-file
- mountPath: /media
name: media-volume
# - mountPath: /run/dbus
# name: d-bus
# readOnly: true
- mountPath: /dev/ttyUSB1
name: zigbee
#- mountPath: /dev/video0
# name: cam
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
hostNetwork: true
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-pvc
- name: media-volume
hostPath:
path: /tmp/media
- name: configmap-file
configMap:
name: home-assistant-configmap
# hostPath:
# path: /tmp/home-assistant
# type: DirectoryOrCreate
# - name: d-bus
# hostPath:
# path: /run/dbus
- name: zigbee
hostPath:
path: /dev/ttyACM0
#- name: cam
# hostPath:
# path: /dev/video0
---
kind: ConfigMap
apiVersion: v1
metadata:
name: home-assistant-configmap
namespace: home-assistant
data:
known_devices.yaml: |
automations.yaml: |
configuration.yaml: |-
default_config:
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 10.10.0.0/16
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: home-assistant-pvc
labels:
app: home-assistant
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:

View File

@@ -5,4 +5,5 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.178.220-192.168.178.250
# - 192.168.178.220-192.168.178.225 #pve-82
- 192.168.178.160-192.168.178.180 #pve-83

View File

@@ -1,8 +1,16 @@
Metallb Installation
## Used IP Range
Metallb will advertise IPs of the range:
192.168.178.226-192.168.178.240
First Address x.x.x.226 will be the traefik reverse proxy deployment.
https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
Following https://metallb.universe.tf/installation/ (0.14.3)
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml

View File

@@ -27,7 +27,8 @@ kubectl expose deploy schnipo --port=80 --target-port=8080 --type=LoadBalancer -
```
#Create deploy
kubectl create deploy nginx --image=nginx
k create ns test
kubectl create deploy nginx --image=nginx -n test
kubectl scale --replicas=3 deployment/nginx -n test

View File

@@ -7,11 +7,56 @@
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Troubleshooting steps
## Cert-Manager
Cert Manager will be used as it will store certs in a secret, therefore accessible for every pod.
In contrast to this, Traefik stores certs on disk, so a volume would be needed in RWX mode (too much effort).
### Issuer - CA
An issuer is a CA. This can be done with 2 different kinds.
#### Issuer
can be used in the namespace they are created in.
#### Cluster Issuer
can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster.
## Test Deployment
k create ns test
kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test
## Install Traefik & Cert-Manager
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
traefik-dashboard.k8s.schnrbs.work
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply cert-manager-issuer-secret.yaml
k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml
## Switch Test Deployment to https
k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml
## Troubleshooting steps
k get po -n test -o wide
k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80
@@ -25,41 +70,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test
k get ingress
k get ingress -n test
git staus
git status
git diff
git commit -am "wip thing"
git checkout master
git pull --rebase
git merge wip
git push
git log
git checkout master
cd traefik_lempa
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
cd ..
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
k get svc ingressRoute
k get svc ingressRoutes
k get svc ingressroutes.traefik.io
k get svc ingressroutes.traefik.io --all-namespaces
k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
cert-manager-values.yaml
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
k get po
alias k=kubectl
k get po
k apply traefik_lempa/cert-manager-issuer-secret.yaml
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret
k get secrets
k get secret -n cert-manager
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
k get clusterissuers.cert-manager.io
k apply -f traefik_lempa/nginx-certificate.yaml
k apply -f traefik_lempa/nginx-ingress.yaml
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: schnipo-ingress-certificate
namespace: dishes
spec:
secretName: schnipo-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- schnipo.k8s.schnrbs.work

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: schnipo-ingress-route
namespace: dishes
spec:
entryPoints:
- websecure
routes:
- match: Host(`schnipo.k8s.schnrbs.work`)
kind: Rule
services:
- name: schnipo
port: 8080
tls:
secretName: schnipo-certificate-secret

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.k8s.schnrbs.work

View File

@@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-test.k8s.internal.schnrbs.work`)
- match: Host(`nginx-test.k8s.schnrbs.work`)
kind: Rule
services:
- name: nginx

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: nginx-test.k8s.internal.schnrbs.work
- host: nginx-test.k8s.schnrbs.work
http:
paths:
- path: /
@@ -19,5 +19,5 @@ spec:
number: 80
tls:
- hosts:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.k8s.schnrbs.work
secretName: nginx-certificate-secret

View File

@@ -1,10 +1,15 @@
ports:
web:
redirectTo:
port: websecure
redirections:
entryPoint:
to: websecure
scheme: https
logs:
general:
level: DEBUG
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.redacted`)
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: echopod-ingress-certificate
namespace: default
spec:
secretName: echopod-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- echopod.k8s.schnrbs.work

View File

@@ -30,10 +30,30 @@ kind: Service
metadata:
name: echopod-service
spec:
type: NodePort # Change to LoadBalancer if using a cloud provider
type: LoadBalancer # Change to LoadBalancer if using a cloud provider
# type: NodePort # Change to LoadBalancer if using a cloud provider
ports:
- port: 80
targetPort: 80
nodePort: 30080 # Port to expose on the node
# nodePort: 30080 # Port to expose on the node
selector:
app: echopod
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: echopod-ingress-route
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`echopod.k8s.schnrbs.work`)
kind: Rule
services:
- name: echopod-service
port: 80
tls:
secretName: echopod-certificate-secret

16
gitops/README.md Normal file
View File

@@ -0,0 +1,16 @@
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
# Setup using internal Gitea server
## Create a Gitea personal access token and export it as an env var
```
export GITEA_TOKEN=<my-token>
```
## Bootstrap
```
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
```
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/

View File

@@ -5,15 +5,66 @@ Hier sind die Schritte, um ein Persistent Volume für Longhorn zu erstellen:
### 1. Stelle sicher, dass Longhorn installiert ist
Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert ist. Falls Longhorn noch nicht installiert ist, kannst du es mit Helm oder direkt aus den YAML-Dateien installieren.
#### Node Labeling
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
```
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
#### Mit Helm:
```bash
helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
```
#### Mit kubectl:
```bash
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.2/deploy/install.yaml
#### Adding additional disks
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
Add disk in Proxmox, which appears as:
Run in worker node:
```
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 29G 0 part /
├─sda14 8:14 0 4M 0 part
├─sda15 8:15 0 106M 0 part /boot/efi
└─sda16 259:0 0 913M 0 part /boot
sdb 8:16 0 250G 0 disk
sr0 11:0 1 4M 0 rom
```
SDB...
```
fdisk /dev/sdb
# Hit n(new), p(primary), Enter, Enter
# w(write to disk and exit)
mkfs.ext4 /dev/sdb1
mkdir /mnt/nvmedisk1
nano /etc/fstab
->
/dev/sdb1 /mnt/nvmedisk1 ext4
systemctl daemon-reload
mount -a
```
### Check via UI
```
k port-forward svc/longhorn-frontend 8000:80 -n longhorn-system
```
### 2. Erstelle ein PersistentVolume (PV) und ein PersistentVolumeClaim (PVC)

View File

@@ -7,7 +7,7 @@ metadata:
spec:
secretName: longhorn-web-ui-tls
dnsNames:
- longhorn.k8s.internal.schnrbs.work
- longhorn-dashboard.k8s.schnrbs.work
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer

View File

@@ -1,15 +1,13 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: longhorn-web-ui
name: longhorn-ingress-route
namespace: longhorn-system
spec:
properties:
entrypoints:
entryPoints:
- websecure
routes:
- match: Host(`longhorn.k8s.internal.schnrbs.work`)
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
kind: Rule
services:
- name: longhorn-frontend

View File

@@ -1,2 +1,36 @@
global:
nodeSelector:
node.longhorn.io/create-default-disk: "true"
service:
ui:
type: NodePort
nodePort: 30050
manager:
type: ClusterIP
# Replica count for the default Longhorn StorageClass.
persistence:
defaultClass: false
defaultFsType: ext4
defaultClassReplicaCount: 2
reclaimPolicy: Delete
# Replica counts for CSI Attacher, Provisioner, Resizer, Snapshotter
csi:
attacherReplicaCount: 2
provisionerReplicaCount: 2
resizerReplicaCount: 2
snapshotterReplicaCount: 2
# Default replica count and storage path
defaultSettings:
upgradeChecker: false
kubernetesClusterAutoscalerEnabled: false
allowCollectingLonghornUsageMetrics: false
createDefaultDiskLabeledNodes: true
defaultReplicaCount: 2
defaultDataPath: "/k8s-data"
longhornUI:
replicas: 1

View File

@@ -11,7 +11,10 @@ Use for `helm` values:
https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
```
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --create-namespace --namespace monitoring --values kube-prometheus-stack-values.yaml
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--create-namespace \
--namespace monitoring \
--values kube-prometheus-stack-values.yaml
```
Accessing UIs via PortForwarding

View File

@@ -0,0 +1,19 @@
NAME: kube-prometheus-stack
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
Get Grafana 'admin' user password by running:
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
Access Grafana local instance:
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
kubectl --namespace monitoring port-forward $POD_NAME 3000
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

37
statefulset/depl.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "longhorn"
resources:
requests:
storage: 1Gi

18
statefulset/svc.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
template:
spec:
nodeSelector:
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node