Compare commits

..

10 Commits

Author SHA1 Message Date
baschno
97ef02c1da adding proxmox widgets 2025-04-27 12:53:14 +02:00
baschno
65e99a9f83 enabling reloader component for homepage 2025-04-27 01:56:36 +02:00
baschno
77ad59eae5 fixing longhorn ui certificate 2025-04-26 23:15:31 +02:00
baschno
a13663754d fix nginx pm icon 2025-04-26 21:34:21 +02:00
baschno
5e30b1e83d adding services to homepage 2025-04-26 21:21:24 +02:00
baschno
5514b5687f longhorn and echopod tests 2025-04-26 19:57:56 +02:00
baschno
a3404bba2b homepage setup without helm 2025-04-26 19:56:50 +02:00
baschno
0e4ddcefdf longhorn nummer 2 2025-04-21 21:18:23 +02:00
baschno
12546a9669 neu ist der mai 2025-04-21 00:21:28 +02:00
baschno
a6ac7b84e4 savegame 2025-04-10 22:56:27 +02:00
27 changed files with 678 additions and 113 deletions

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- homepage.k8s.internal.schnrbs.work
- homepage.k8s.schnrbs.work

View File

@@ -0,0 +1,115 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/match: "true"
data:
kubernetes.yaml: |
mode: cluster
settings.yaml: |
background: https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80
cardBlur: xs
providers:
longhorn:
url: https://longhorn-dashboard.k8s.schnrbs.work
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services.yaml: |
- Smart Home:
- Home Assistant:
icon: home-assistant.png
href: https://ha.homeee.schnorbus.net
description: Home Assistant is awesome
widgets:
- type: homeassistant
url: https://ha.homeee.schnorbus.net
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
- Zigbee2MQTT:
icon: zigbee2mqtt.png
href: http://muckibude.fritz.box:8383/#/
description: Zigbee2MQTT is awesome
- My Second Group:
- Proxmox pve-81:
icon: proxmox.png
href: http://pve-81.fritz.box:8006
description: Homepage is the best
- Proxmox pve-82:
icon: proxmox.png
href: http://pve-82.fritz.box:8006
description: Homepage is the best
- Proxmox pve-83:
icon: proxmox.png
href: https://pve-83.fritz.box:8006
description: Homepage is the best
widgets:
- type: proxmox
url: https://pve-83.fritz.box:8006
username: homepage_api@pam!homepage_api
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871
- Party Time:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
- Nginx Proxy Manager:
icon: nginx-proxy-manager.png
href: http://192.168.178.42:8181
description: Nginx Proxy Manager is awesome
widgets:
- type: npm
url: http://192.168.178.42:8181
username: bastian@schnorbus.net
password: abcd1234
- Plex:
icon: plex.png
href: http://diskstation.fritz.box:32400/web/index.html#!/
description: Watch movies and TV shows.
server: http://diskstation.fritz.box:32400/web/index.html#!/
container: plex
widgets:
- type: plex
url: http://diskstation.fritz.box:32400
key: aNcUss31qsVsea5bsDf9
widgets.yaml: |
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- longhorn:
# Show the expanded view
expanded: true
# Shows a node representing the aggregate values
total: true
# Shows the node names as labels
labels: true
# Show the nodes
nodes: true
- resources:
backend: resources
expanded: true
cpu: true
memory: true
network: default
- search:
provider: duckduckgo
target: _blank
docker.yaml: ""

View File

@@ -0,0 +1,180 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
secrets:
- name: homepage
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
kubernetes.io/service-account.name: homepage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- nodes
verbs:
- get
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- apiGroups:
- traefik.io
resources:
- ingressroutes
verbs:
- get
- list
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
- gateways
verbs:
- get
- list
- apiGroups:
- metrics.k8s.io
resources:
- nodes
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: homepage
subjects:
- kind: ServiceAccount
name: homepage
namespace: homepage
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
reloader.stakater.com/search: "true"
secret.reloader.stakater.com/reload: "homepage"
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
serviceAccountName: homepage
automountServiceAccountToken: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
containers:
- name: homepage
image: "ghcr.io/gethomepage/homepage:latest"
imagePullPolicy: Always
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: homepage.k8s.schnrbs.work # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: homepage-config
subPath: custom.js
- mountPath: /app/config/custom.css
name: homepage-config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: homepage-config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: homepage-config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: homepage-config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: homepage-config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: homepage-config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: homepage-config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: homepage-config
configMap:
name: homepage
- name: logs
emptyDir: {}

View File

@@ -3,11 +3,19 @@ kind: IngressRoute
metadata:
name: homepage-ingress-route
namespace: homepage
labels:
app.kubernetes.io/name: homepage
annotations:
gethomepage.dev/description: Dynamically Detected Homepage
gethomepage.dev/enabled: "true"
gethomepage.dev/group: Cluster Management
gethomepage.dev/icon: homepage.png
gethomepage.dev/name: Homepage
spec:
entryPoints:
- websecure
routes:
- match: Host(`homepage.k8s.internal.schnrbs.work`)
- match: Host(`homepage.k8s.schnrbs.work`)
kind: Rule
services:
- name: homepage

View File

@@ -1,72 +0,0 @@
config:
bookmarks:
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services:
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
widgets:
# show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
cpu: true
memory: true
showLabel: true
label: "cluster"
nodes:
show: true
cpu: true
memory: true
showLabel: true
- pihole:
show: true
url: http://192.168.178.202
key: 1eae9e87f4b4710981639ee591b7d75734811d61697092110cb748c3244e01cc
- fritzbox:
show: true
url: http://192.168.178.1
- search:
provider: duckduckgo
target: _blank
kubernetes:
mode: cluster
settings:
# The service account is necessary to allow discovery of other services
serviceAccount:
create: true
name: homepage
# This enables the service account to access the necessary resources
enableRbac: true
ingress:
main:
enabled: false
annotations:
# Example annotations to add Homepage to your Homepage!
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Homepage"
gethomepage.dev/description: "Dynamically Detected Homepage"
gethomepage.dev/group: "Dynamic"
gethomepage.dev/icon: "homepage.png"
hosts:
- host: homepage.k8s.internal.schnrbs.work
paths:
- path: /
pathType: Prefix

View File

@@ -0,0 +1,2 @@
https://igeadetokunbo.medium.com/how-to-run-databases-on-kubernetes-an-8-step-guide-b75ce9117600

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "mysql"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.4.0-oraclelinux8
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "your_password"
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: mysql-storage
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: longhorn
hostPath:
path: /mnt/data # Specify a path in the host for storage

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,13 @@
# Headless service
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql

View File

@@ -133,6 +133,151 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:
app: zwavejs2mqtt
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi---
apiVersion: v1
kind: Namespace
metadata:
name: home-assistant
---
apiVersion: v1
kind: Service
metadata:
namespace: home-assistant
name: home-assistant
spec:
selector:
app: home-assistant
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8123
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: home-assistant
name: home-assistant
labels:
app: home-assistant
spec:
replicas: 1
selector:
matchLabels:
app: home-assistant
template:
metadata:
labels:
app: home-assistant
spec:
containers:
- name: bluez
image: ghcr.io/mysticrenji/bluez-service:v1.0.0
securityContext:
privileged: true
- name: home-assistant
image: ghcr.io/mysticrenji/homeassistant-arm64:2023.3.0
resources:
requests:
memory: "256Mi"
limits:
memory: "512Mi"
ports:
- containerPort: 8123
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/configuration.yaml
subPath: configuration.yaml
name: configmap-file
- mountPath: /config/automations.yaml
subPath: automations.yaml
name: configmap-file
- mountPath: /media
name: media-volume
# - mountPath: /run/dbus
# name: d-bus
# readOnly: true
- mountPath: /dev/ttyUSB1
name: zigbee
#- mountPath: /dev/video0
# name: cam
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
hostNetwork: true
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-pvc
- name: media-volume
hostPath:
path: /tmp/media
- name: configmap-file
configMap:
name: home-assistant-configmap
# hostPath:
# path: /tmp/home-assistant
# type: DirectoryOrCreate
# - name: d-bus
# hostPath:
# path: /run/dbus
- name: zigbee
hostPath:
path: /dev/ttyACM0
#- name: cam
# hostPath:
# path: /dev/video0
---
kind: ConfigMap
apiVersion: v1
metadata:
name: home-assistant-configmap
namespace: home-assistant
data:
known_devices.yaml: |
automations.yaml: |
configuration.yaml: |-
default_config:
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 10.10.0.0/16
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: home-assistant-pvc
labels:
app: home-assistant
namespace: home-assistant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-pvc
labels:

View File

@@ -5,4 +5,5 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.178.220-192.168.178.250
# - 192.168.178.220-192.168.178.225 #pve-82
- 192.168.178.226-192.168.178.240 #pve-83

View File

@@ -4,5 +4,5 @@ https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
Following https://metallb.universe.tf/installation/ (0.14.3)
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml

View File

@@ -27,7 +27,8 @@ kubectl expose deploy schnipo --port=80 --target-port=8080 --type=LoadBalancer -
```
#Create deploy
kubectl create deploy nginx --image=nginx
k create ns test
kubectl create deploy nginx --image=nginx -n test
kubectl scale --replicas=3 deployment/nginx -n test

View File

@@ -7,6 +7,22 @@
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Cert-Manager
Cert Manager will be used as it will store certs in a secret, therefore accessible for every pod.
In contrast to this, Traefik stores certs on disk, so a volume would be needed in RWX mode (too much effort).
### Issuer - CA
An issuer is a CA. This can be done with 2 different kinds.
#### Issuer
can be used in the namespace they are created in.
#### Cluster Issuer
can be used throughout the whole cluster, not limited to a specific namespace.
i.e. general issuer for all namespaces in cluster.
## Troubleshooting steps
kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80
@@ -25,24 +41,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test
k get ingress
k get ingress -n test
git staus
git status
git diff
git commit -am "wip thing"
git checkout master
git pull --rebase
git merge wip
git push
git log
git checkout master
cd traefik_lempa
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
cd ..
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
k get svc ingressRoute
k get svc ingressRoutes
k get svc ingressroutes.traefik.io
k get svc ingressroutes.traefik.io --all-namespaces
k get ingressroutes.traefik.io --all-namespaces
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
exit

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: schnipo-ingress-certificate
namespace: dishes
spec:
secretName: schnipo-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- schnipo.k8s.schnrbs.work

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: schnipo-ingress-route
namespace: dishes
spec:
entryPoints:
- websecure
routes:
- match: Host(`schnipo.k8s.schnrbs.work`)
kind: Rule
services:
- name: schnipo
port: 8080
tls:
secretName: schnipo-certificate-secret

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.k8s.schnrbs.work

View File

@@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-test.k8s.internal.schnrbs.work`)
- match: Host(`nginx-test.k8s.schnrbs.work`)
kind: Rule
services:
- name: nginx

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: nginx-test.k8s.internal.schnrbs.work
- host: nginx-test.k8s.schnrbs.work
http:
paths:
- path: /
@@ -19,5 +19,5 @@ spec:
number: 80
tls:
- hosts:
- nginx-test.k8s.internal.schnrbs.work
- nginx-test.k8s.schnrbs.work
secretName: nginx-certificate-secret

View File

@@ -1,10 +1,15 @@
ports:
web:
redirectTo:
port: websecure
redirections:
entryPoint:
to: websecure
scheme: https
logs:
general:
level: DEBUG
ingressRoute:
dashboard:
enabled: true
entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.redacted`)
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: echopod-ingress-certificate
namespace: default
spec:
secretName: echopod-certificate-secret
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer
dnsNames:
- echopod.k8s.schnrbs.work

View File

@@ -30,10 +30,30 @@ kind: Service
metadata:
name: echopod-service
spec:
type: NodePort # Change to LoadBalancer if using a cloud provider
type: LoadBalancer # Change to LoadBalancer if using a cloud provider
# type: NodePort # Change to LoadBalancer if using a cloud provider
ports:
- port: 80
targetPort: 80
nodePort: 30080 # Port to expose on the node
# nodePort: 30080 # Port to expose on the node
selector:
app: echopod
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: echopod-ingress-route
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`echopod.k8s.schnrbs.work`)
kind: Rule
services:
- name: echopod-service
port: 80
tls:
secretName: echopod-certificate-secret

View File

@@ -5,15 +5,26 @@ Hier sind die Schritte, um ein Persistent Volume für Longhorn zu erstellen:
### 1. Stelle sicher, dass Longhorn installiert ist
Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert ist. Falls Longhorn noch nicht installiert ist, kannst du es mit Helm oder direkt aus den YAML-Dateien installieren.
#### Node Labeling
In the case not all nodes should provide disk
```
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
```
#### Mit Helm:
```bash
helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
```
#### Mit kubectl:
```bash
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.2/deploy/install.yaml
### Check via UI
```
k port-forward svc/longhorn-frontend 8000:80 -n longhorn-system
```
### 2. Erstelle ein PersistentVolume (PV) und ein PersistentVolumeClaim (PVC)

View File

@@ -7,7 +7,7 @@ metadata:
spec:
secretName: longhorn-web-ui-tls
dnsNames:
- longhorn.k8s.internal.schnrbs.work
- longhorn-dashboard.k8s.schnrbs.work
issuerRef:
name: cloudflare-cluster-issuer
kind: ClusterIssuer

View File

@@ -1,15 +1,13 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: longhorn-web-ui
name: longhorn-ingress-route
namespace: longhorn-system
spec:
properties:
entrypoints:
entryPoints:
- websecure
routes:
- match: Host(`longhorn.k8s.internal.schnrbs.work`)
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
kind: Rule
services:
- name: longhorn-frontend

View File

@@ -1,2 +1,36 @@
global:
nodeSelector:
node.longhorn.io/create-default-disk: "true"
service:
ui:
type: NodePort
nodePort: 30050
manager:
type: ClusterIP
# Replica count for the default Longhorn StorageClass.
persistence:
defaultClass: false
defaultFsType: ext4
defaultClassReplicaCount: 2
reclaimPolicy: Delete
# Replica counts for CSI Attacher, Provisioner, Resizer, Snapshotter
csi:
attacherReplicaCount: 2
provisionerReplicaCount: 2
resizerReplicaCount: 2
snapshotterReplicaCount: 2
# Default replica count and storage path
defaultSettings:
upgradeChecker: false
kubernetesClusterAutoscalerEnabled: false
allowCollectingLonghornUsageMetrics: false
createDefaultDiskLabeledNodes: true
defaultReplicaCount: 2
defaultDataPath: "/k8s-data"
longhornUI:
replicas: 1