Compare commits
23 Commits
f58fad216a
...
wip/certmg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09026d6812 | ||
|
|
24991fce90 | ||
|
|
65a59d2d0c | ||
|
|
85fb620e39 | ||
|
|
b56e02d2ed | ||
|
|
15cb2ce903 | ||
|
|
b47fe8f66b | ||
|
|
c5810661e5 | ||
|
|
7ddc08d622 | ||
|
|
c5aa7f8105 | ||
|
|
0c6cfedcde | ||
| 2be83a977a | |||
|
|
4f5a18c84c | ||
|
|
7a54346331 | ||
|
|
5abc0de38a | ||
|
|
29674ae504 | ||
|
|
6abe5d1a8f | ||
|
|
67a6c414f2 | ||
|
|
08212c26a6 | ||
|
|
e4adbfd0b2 | ||
|
|
d7db562a23 | ||
|
|
7896130d05 | ||
|
|
efcb4ee172 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env.local
|
||||
@@ -34,4 +34,30 @@ Rancher Installation
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
|
||||
|
||||
# Prevent scheduling on master (optional)
|
||||
|
||||
|
||||
```
|
||||
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
|
||||
```
|
||||
|
||||
# Just Setup // K3sup
|
||||
|
||||
export SERVER_IP=192.168.178.45
|
||||
export AGENT_IP=192.168.178.75
|
||||
export USER=basti
|
||||
|
||||
|
||||
k3sup install \
|
||||
--cluster \
|
||||
--ip 192.168.178.45 \
|
||||
--user $USER \
|
||||
--merge \
|
||||
--local-path $HOME/.kube/config \
|
||||
--context my-k3s
|
||||
|
||||
k3sup join \
|
||||
--ip $AGENT_IP \
|
||||
--server-ip $SERVER_IP \
|
||||
--user $USER
|
||||
|
||||
|
||||
148
01_Basic_Setup/justfile
Normal file
148
01_Basic_Setup/justfile
Normal file
@@ -0,0 +1,148 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
|
||||
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
||||
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
||||
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
||||
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
kubeconfig=""
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
kubeconfig=$(
|
||||
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
|
||||
)
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
args=(
|
||||
"install"
|
||||
"--context" "${context}"
|
||||
"--host" "${K8S_MASTER_NODE_NAME}"
|
||||
"--user" "${username}"
|
||||
"--no-extras" #
|
||||
)
|
||||
|
||||
if [ -n "${kubeconfig}" ]; then
|
||||
mkdir -p "$(dirname "${kubeconfig}")"
|
||||
args+=("--local-path" "${kubeconfig}" "--merge")
|
||||
fi
|
||||
echo "Running: k3sup ${args[@]}"
|
||||
k3sup "${args[@]}"
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
|
||||
echo "Setting up local Docker registry..."
|
||||
|
||||
# Deploy Docker registry to cluster
|
||||
kubectl apply -f ./registry/registry.yaml
|
||||
|
||||
# Set Pod Security Standard for registry namespace
|
||||
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Wait for registry deployment
|
||||
echo "Waiting for registry to be ready..."
|
||||
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
|
||||
|
||||
# Configure registries.yaml for k3s
|
||||
just configure-registry
|
||||
|
||||
echo "✓ Local Docker registry deployed and configured"
|
||||
echo ""
|
||||
echo "Registry accessible at:"
|
||||
echo " localhost:30500"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
|
||||
echo " docker build -t localhost:30500/myapp:latest ."
|
||||
echo " docker push localhost:30500/myapp:latest"
|
||||
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
|
||||
fi
|
||||
|
||||
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
|
||||
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
|
||||
|
||||
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
|
||||
echo "Removing Agent node at ${AGENT_IP}..."
|
||||
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
|
||||
fi
|
||||
|
||||
echo "Removing content of Server node..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
|
||||
echo "Cleaning up kubeconfig entries..."
|
||||
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
|
||||
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
|
||||
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
|
||||
kubectl config delete-context "${K8S_CONTEXT}"
|
||||
echo "Deleted context: ${K8S_CONTEXT}"
|
||||
fi
|
||||
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
|
||||
kubectl config delete-cluster "${cluster_name}"
|
||||
echo "Deleted cluster: ${cluster_name}"
|
||||
fi
|
||||
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
|
||||
kubectl config delete-user "${user_name}"
|
||||
echo "Deleted user: ${user_name}"
|
||||
fi
|
||||
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
|
||||
else
|
||||
echo "Uninstallation cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
add-agent:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
|
||||
|
||||
args=(
|
||||
"join"
|
||||
"--ip" "${new_agent_ip}"
|
||||
"--server-ip" "${SERVER_IP}"
|
||||
"--user" "${username}"
|
||||
)
|
||||
|
||||
|
||||
echo "Running: k3sup ${args[*]}"
|
||||
k3sup "${args[@]}"
|
||||
echo "Agent node at ${new_agent_ip} added to cluster."
|
||||
|
||||
# Configure k3s to use local registry
|
||||
configure-registry:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "Configuring k3s registries.yaml..."
|
||||
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
|
||||
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
|
||||
|
||||
echo "Restarting k3s to apply registry configuration..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
|
||||
echo "✓ Registry configuration applied"
|
||||
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
configs:
|
||||
"localhost:30500":
|
||||
tls:
|
||||
insecure_skip_verify: true
|
||||
109
01_Basic_Setup/registry/registry.yaml
Normal file
109
01_Basic_Setup/registry/registry.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
fsGroup: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: http
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 20Gi
|
||||
env:
|
||||
- name: REGISTRY_STORAGE_DELETE_ENABLED
|
||||
value: "true"
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: "0.0.0.0:5000"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- name: registry-data
|
||||
mountPath: /var/lib/registry
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: registry-data
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
name: http
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry-nodeport
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
nodePort: 30500
|
||||
name: http
|
||||
type: NodePort
|
||||
131
08_Vault/README.md
Normal file
131
08_Vault/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Helm
|
||||
|
||||
## Installation
|
||||
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||
|
||||
helm install vault hashicorp/vault \
|
||||
--set='server.dev.enabled=true' \
|
||||
--set='ui.enabled=true' \
|
||||
--set='ui.serviceType=LoadBalancer' \
|
||||
--namespace vault \
|
||||
--create-namespace
|
||||
|
||||
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
|
||||
|
||||
|
||||
## Output
|
||||
```
|
||||
$ kubectl get all -n vault
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/vault-0 1/1 Running 0 2m39s
|
||||
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
|
||||
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
|
||||
|
||||
NAME READY AGE
|
||||
statefulset.apps/vault 1/1 2m40s
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
## Enter Pod
|
||||
|
||||
kubectl exec -it vault-0 -n vault -- /bin/sh
|
||||
|
||||
## Create policy
|
||||
```
|
||||
cat <<EOF > /home/vault/read-policy.hcl
|
||||
path "secret*" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
## Apply
|
||||
|
||||
```
|
||||
vault policy write read-policy /home/vault/read-policy.hcl
|
||||
```
|
||||
|
||||
## Enable Kubernetes
|
||||
```
|
||||
vault auth enable kubernetes
|
||||
```
|
||||
|
||||
## Configure Kubernetes Auth
|
||||
|
||||
Configure to communicate with API server
|
||||
```
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
|
||||
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
|
||||
```
|
||||
|
||||
## Create a Role
|
||||
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
|
||||
|
||||
```
|
||||
vault write auth/kubernetes/role/vault-role \
|
||||
bound_service_account_names=vault-serviceaccount \
|
||||
bound_service_account_namespaces=vault \
|
||||
policies=read-policy \
|
||||
ttl=1h
|
||||
```
|
||||
|
||||
# Create Secrets
|
||||
|
||||
## Via CLI
|
||||
|
||||
```
|
||||
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
|
||||
```
|
||||
|
||||
## Via UI
|
||||
|
||||
Now you can login to vault using the Token method, initially use Token=`root` to login.
|
||||
|
||||
|
||||
# Accessing Secrets in Pods
|
||||
|
||||
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
|
||||
|
||||
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
|
||||
|
||||
First, let’s create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
|
||||
|
||||
Apply the above manifest using the below command
|
||||
```
|
||||
kubectl apply -f vault-sa.yaml -n vault
|
||||
```
|
||||
|
||||
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
|
||||
|
||||
|
||||
```
|
||||
kubectl apply -f vault-secret-test-deploy.yaml -n vault
|
||||
```
|
||||
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
|
||||
|
||||
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
|
||||
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
|
||||
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
|
||||
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
|
||||
|
||||
Use the below command to check the vault secrets from the pod volume
|
||||
```
|
||||
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
|
||||
```
|
||||
6
08_Vault/vault-sa.yaml
Normal file
6
08_Vault/vault-sa.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vault-serviceaccount
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vault-test
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: read-vault-secret
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-inject-status: "update"
|
||||
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
|
||||
vault.hashicorp.com/agent-inject-template-login: |
|
||||
{{- with secret "secret/login" -}}
|
||||
pattoken={{ .Data.data.pattoken }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
|
||||
vault.hashicorp.com/agent-inject-template-my-first-secret: |
|
||||
{{- with secret "secret/my-first-secret" -}}
|
||||
username={{ .Data.data.username }}
|
||||
password={{ .Data.data.password }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/role: "vault-role"
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
serviceAccountName: vault-serviceaccount
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -46,6 +46,13 @@ data:
|
||||
url: http://pi.hole
|
||||
version: 6
|
||||
key: 5ipI9bvB
|
||||
- Paperless NGX:
|
||||
icon: paperless-ng.png
|
||||
href: https://ppl.homeee.schnorbus.net
|
||||
widgets:
|
||||
- type: paperlessngx
|
||||
url: https://ppl.homeee.schnorbus.net
|
||||
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
|
||||
|
||||
- My Second Group:
|
||||
@@ -61,24 +68,33 @@ data:
|
||||
icon: proxmox.png
|
||||
href: https://pve-83.fritz.box:8006
|
||||
description: Homepage is the best
|
||||
widgets:
|
||||
- type: proxmox
|
||||
url: https://pve-83.fritz.box:8006
|
||||
username: homepage_api@pam!homepage_api
|
||||
password: 7676925b-3ed4-4c8b-9df5-defb4a9a0871
|
||||
# widgets:
|
||||
# - type: proxmox
|
||||
# url: https://pve-83.fritz.box:8006
|
||||
# username: homepage_api@pam!homepage_api
|
||||
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
- Longhorn:
|
||||
icon: longhorn.png
|
||||
href: https://longhorn-dashboard.k8s.schnrbs.work
|
||||
description: Longhorn volume provisioning
|
||||
|
||||
- Party Time:
|
||||
- Immich:
|
||||
icon: immich.png
|
||||
href: https://immich.homeee.schnorbus.net
|
||||
description: Immich is awesome
|
||||
widgets:
|
||||
- type: immich
|
||||
url: https://immich.homeee.schnorbus.net
|
||||
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
|
||||
version: 2
|
||||
- Linkwarden:
|
||||
icon: linkwarden.png
|
||||
href: https://lw.homeee.schnorbus.net
|
||||
description: Homepage isssss 😎
|
||||
widgets:
|
||||
- type: linkwarden
|
||||
url: https://lw.homeee.schnorbus.net
|
||||
url: http://docker-host-02.fritz.box:9595
|
||||
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
|
||||
- Nginx Proxy Manager:
|
||||
icon: nginx-proxy-manager.png
|
||||
|
||||
@@ -2,3 +2,9 @@
|
||||
|
||||
helm install reloader stakater/reloader --namespace reloader --create-namespace
|
||||
|
||||
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
|
||||
|
||||
flux create helmrelease my-reloader --chart stakater/reloader \
|
||||
--source HelmRepository/stakater \
|
||||
--chart-version 2.1.3 \
|
||||
--namespace reloader
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,5 +5,4 @@ metadata:
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
# - 192.168.178.220-192.168.178.225 #pve-82
|
||||
- 192.168.178.226-192.168.178.240 #pve-83
|
||||
- {{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
66
Metallb_Setup/justfile
Normal file
66
Metallb_Setup/justfile
Normal file
@@ -0,0 +1,66 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
METALLB_VERSION="v0.15.3"
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
|
||||
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
|
||||
echo "MetalLB ${METALLB_VERSION} installed successfully."
|
||||
|
||||
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
|
||||
echo "Address pool configured."
|
||||
|
||||
kubectl apply -f advertisement.yaml
|
||||
echo "Advertisement created."
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
|
||||
|
||||
test-deployment:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl apply -f test-deployment.yaml
|
||||
|
||||
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
|
||||
|
||||
echo "To clean up, run 'just test-deployment-cleanup'."
|
||||
|
||||
test-deployment-cleanup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
kubectl delete -f test-deployment.yaml
|
||||
echo "Test deployment and service deleted."
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- schnipo.k8s.schnrbs.work
|
||||
- schnipo.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/dishes-deployment.yaml
Normal file
43
Test-Deployment/dishes-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: dishes
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dishes
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dishes
|
||||
spec:
|
||||
containers:
|
||||
- name: dish-schnipo
|
||||
image: bschnorbus/dish-schnipo
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: dish-schnipo
|
||||
namespace: dishes
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: dishes
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
@@ -7,10 +7,12 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`schnipo.k8s.schnrbs.work`)
|
||||
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: schnipo
|
||||
port: 8080
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
tls:
|
||||
secretName: schnipo-certificate-secret
|
||||
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}
|
||||
43
Test-Deployment/nginx-deployment.yaml
Normal file
43
Test-Deployment/nginx-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
@@ -7,10 +7,10 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`nginx-test.k8s.schnrbs.work`)
|
||||
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: nginx
|
||||
port: 80
|
||||
tls:
|
||||
secretName: nginx-certificate-secret
|
||||
secretName: nginx-certificate-secret
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: nginx-test.k8s.schnrbs.work
|
||||
- host: nginx-test.int.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -19,5 +19,5 @@ spec:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
- nginx-test.int.schnrbs.work
|
||||
secretName: nginx-certificate-secret
|
||||
@@ -4,7 +4,7 @@
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
|
||||
## Cert-Manager
|
||||
@@ -24,13 +24,15 @@ i.e. general issuer for all namespaces in cluster.
|
||||
|
||||
|
||||
## Test Deployment
|
||||
```
|
||||
k create ns test
|
||||
kubectl create deploy nginx --image=nginx -n test
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
k scale --replicas=3 deployment/nginx -n test
|
||||
|
||||
```
|
||||
|
||||
## Install Traefik & Cert-Manager
|
||||
```
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
@@ -40,23 +42,25 @@ helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||
|
||||
|
||||
k apply cert-manager-issuer-secret.yaml
|
||||
k apply -f cert-manager-issuer-secret.yaml
|
||||
k get secret -n cert-manager
|
||||
|
||||
k apply -f cert-manager-cluster-issuer.yaml
|
||||
```
|
||||
|
||||
|
||||
## Switch Test Deployment to https
|
||||
|
||||
```
|
||||
k apply -f test/nginx-certificate.yaml
|
||||
k apply -f test/nginx-ingress.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Troubleshooting steps
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get po -n test -o wide
|
||||
k create svc -n test clusterip nginx
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
@@ -70,12 +74,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
|
||||
k get svc -n test
|
||||
k get ingress
|
||||
k get ingress -n test
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
k get svc ingressRoute
|
||||
k get svc ingressRoutes
|
||||
k get svc ingressroutes.traefik.io
|
||||
@@ -89,4 +92,5 @@ k get po
|
||||
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
|
||||
k get secret
|
||||
k get secrets
|
||||
k get clusterissuers.cert-manager.io
|
||||
k get clusterissuers.cert-manager.io
|
||||
```
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: cloudflare-cluster-issuer
|
||||
spec:
|
||||
acme:
|
||||
email: hello@schnorbus.net
|
||||
email: {{ .Env.ACME_EMAIL }}
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: cloudflare-acme-key
|
||||
@@ -5,4 +5,4 @@ metadata:
|
||||
namespace: cert-manager
|
||||
type: Opaque
|
||||
stringData:
|
||||
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
|
||||
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}
|
||||
62
Traefik/justfile
Normal file
62
Traefik/justfile
Normal file
@@ -0,0 +1,62 @@
|
||||
set fallback:=true
|
||||
|
||||
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
|
||||
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
|
||||
|
||||
add-helm-repos:
|
||||
helm repo add traefik https://helm.traefik.io/traefik --force-update
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm repo update
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
just add-helm-repos
|
||||
|
||||
helm upgrade traefik traefik/traefik \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${TRAEFIK_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values traefik-values.yaml
|
||||
|
||||
helm upgrade cert-manager jetstack/cert-manager \
|
||||
--install \
|
||||
--cleanup-on-fail \
|
||||
--namespace ${CERT_MANAGER_NAMESPACE} \
|
||||
--create-namespace \
|
||||
--values cert-manager-values.yaml
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
|
||||
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
|
||||
|
||||
setup-cluster-issuer:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
|
||||
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
|
||||
|
||||
# Get status of cert-manager components
|
||||
status:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
echo "=== cert-manager Components Status ==="
|
||||
echo ""
|
||||
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
|
||||
echo ""
|
||||
echo "Pods:"
|
||||
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "Services:"
|
||||
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
|
||||
echo ""
|
||||
echo "CRDs:"
|
||||
kubectl get crd | grep cert-manager.io
|
||||
@@ -11,5 +11,5 @@ ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
entryPoints: [web, websecure]
|
||||
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
|
||||
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)
|
||||
|
||||
9
env/env.local.gomplate
vendored
Normal file
9
env/env.local.gomplate
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# shellcheck disable=all
|
||||
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
|
||||
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
||||
SERVER_IP={{ .Env.SERVER_IP }}
|
||||
AGENT_IP={{ .Env.AGENT_IP }}
|
||||
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
|
||||
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
|
||||
ACME_EMAIL={{ .Env.ACME_EMAIL}}
|
||||
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
|
||||
125
env/justfile
vendored
Normal file
125
env/justfile
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
set fallback := true
|
||||
|
||||
export ENV_FILE := ".env.local"
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export SERVER_IP := env("SERVER_IP", "")
|
||||
export AGENT_IP := env("AGENT_IP", "")
|
||||
|
||||
check:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -z "${K8S_CONTEXT}" ]; then
|
||||
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
|
||||
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SERVER_IP}" ]; then
|
||||
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${AGENT_IP}" ]; then
|
||||
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
setup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -f ../.env.local ]; then
|
||||
echo ".env.local already exists." >&2
|
||||
if gum confirm "Do you want to overwrite it?"; then
|
||||
K8S_CONTEXT=""
|
||||
SERVER_IP=""
|
||||
AGENT_IP=""
|
||||
elif [[ $? -eq 130 ]]; then
|
||||
echo "Setup cancelled by user." >&2
|
||||
exit 1
|
||||
else
|
||||
echo "Aborting setup." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
while [ -z "${K8S_CONTEXT}" ]; do
|
||||
if ! K8S_CONTEXT=$(
|
||||
gum input --prompt="Context name: " \
|
||||
--width=100 --placeholder="context"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
|
||||
if ! K8S_MASTER_NODE_NAME=$(
|
||||
gum input --prompt="Master Node Hostname: " \
|
||||
--width=100 --placeholder="Master Node Name"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${SERVER_IP}" ]; do
|
||||
if ! SERVER_IP=$(
|
||||
gum input --prompt="IP of Server/Master Node: " \
|
||||
--width=100 --placeholder="Master Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AGENT_IP}" ]; do
|
||||
if ! AGENT_IP=$(
|
||||
gum input --prompt="IP of Agent Node: " \
|
||||
--width=100 --placeholder="Agent Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
|
||||
if ! METALLB_ADDRESS_RANGE=$(
|
||||
gum input --prompt="IP Range for LoadBalancer: " \
|
||||
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
|
||||
if ! CLOUDFLARE_API_TOKEN=$(
|
||||
gum input --prompt="Cloudflare API Token: " \
|
||||
--width=100 --placeholder="API Token" --password
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${ACME_EMAIL}" ]; do
|
||||
if ! ACME_EMAIL=$(
|
||||
gum input --prompt="ACME Email for Cert-Manager: " \
|
||||
--width=100 --placeholder="Email"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
while [ -z "${EXTERNAL_DOMAIN}" ]; do
|
||||
if ! EXTERNAL_DOMAIN=$(
|
||||
gum input --prompt="External Domain: " \
|
||||
--width=100 --placeholder="Domain"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Generating .env.local file..."
|
||||
rm -f ../.env.local
|
||||
gomplate -f env.local.gomplate -o ../.env.local
|
||||
@@ -2,5 +2,16 @@ https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on
|
||||
|
||||
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
|
||||
|
||||
# Setup using internal Gitea server
|
||||
## Create a Gitea personal access token and export it as an env var
|
||||
```
|
||||
export GITEA_TOKEN=<my-token>
|
||||
```
|
||||
## Bootstrap
|
||||
```
|
||||
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
|
||||
```
|
||||
|
||||
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
||||
|
||||
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."
|
||||
12
justfile
Normal file
12
justfile
Normal file
@@ -0,0 +1,12 @@
|
||||
set dotenv-filename := ".env.local"
|
||||
|
||||
export PATH := "./node_modules/.bin:" + env_var('PATH')
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
mod env
|
||||
mod BasicSetup '01_Basic_Setup'
|
||||
mod MetalLbSetup 'Metallb_Setup'
|
||||
mod Traefik
|
||||
@@ -8,11 +8,12 @@ Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert i
|
||||
|
||||
#### Node Labeling
|
||||
|
||||
In the case not all nodes should provide disk
|
||||
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
|
||||
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
|
||||
```
|
||||
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
|
||||
```
|
||||
|
||||
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
|
||||
|
||||
|
||||
#### Mit Helm:
|
||||
|
||||
Reference in New Issue
Block a user