Compare commits

...

15 Commits

Author SHA1 Message Date
baschno
09026d6812 move test deployment to different justfile 2025-12-29 18:33:46 +01:00
baschno
24991fce90 add setup-cluster-issuer 2025-12-28 17:04:24 +01:00
baschno
65a59d2d0c WIP: cert manager 2025-12-28 16:19:08 +01:00
baschno
85fb620e39 add module traefik 2025-12-28 11:19:30 +01:00
baschno
b56e02d2ed fix formatting 2025-12-28 11:19:12 +01:00
baschno
15cb2ce903 adding test deployment 2025-12-28 11:18:46 +01:00
baschno
b47fe8f66b fix formatting 2025-12-27 20:38:12 +01:00
baschno
c5810661e5 Add support for metallb installation 2025-12-27 20:32:16 +01:00
baschno
7ddc08d622 add local docker registry config 2025-12-27 09:58:15 +01:00
baschno
c5aa7f8105 fix context name parameter 2025-12-26 20:15:41 +01:00
baschno
0c6cfedcde update manual readme 2025-12-22 20:48:17 +01:00
2be83a977a Merge pull request 'just enabled' (#1) from just into master
Reviewed-on: #1
2025-12-22 19:47:19 +00:00
baschno
4f5a18c84c install incl agent ready 2025-12-22 20:41:06 +01:00
baschno
7a54346331 add local container registry 2025-12-22 20:15:48 +01:00
baschno
5abc0de38a add just and mise tool support 2025-12-22 11:21:20 +01:00
26 changed files with 679 additions and 1839 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env.local

View File

@@ -39,4 +39,25 @@ Rancher Installation
``` ```
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
``` ```
# Just Setup // K3sup
export SERVER_IP=192.168.178.45
export AGENT_IP=192.168.178.75
export USER=basti
k3sup install \
--cluster \
--ip 192.168.178.45 \
--user $USER \
--merge \
--local-path $HOME/.kube/config \
--context my-k3s
k3sup join \
--ip $AGENT_IP \
--server-ip $SERVER_IP \
--user $USER

148
01_Basic_Setup/justfile Normal file
View File

@@ -0,0 +1,148 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
kubeconfig=""
context=""
if gum confirm "Update KUBECONFIG?"; then
kubeconfig=$(
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
)
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
args=(
"install"
"--context" "${context}"
"--host" "${K8S_MASTER_NODE_NAME}"
"--user" "${username}"
"--no-extras" #
)
if [ -n "${kubeconfig}" ]; then
mkdir -p "$(dirname "${kubeconfig}")"
args+=("--local-path" "${kubeconfig}" "--merge")
fi
echo "Running: k3sup ${args[@]}"
k3sup "${args[@]}"
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
echo "Setting up local Docker registry..."
# Deploy Docker registry to cluster
kubectl apply -f ./registry/registry.yaml
# Set Pod Security Standard for registry namespace
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
# Wait for registry deployment
echo "Waiting for registry to be ready..."
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
# Configure registries.yaml for k3s
just configure-registry
echo "✓ Local Docker registry deployed and configured"
echo ""
echo "Registry accessible at:"
echo " localhost:30500"
echo ""
echo "Usage:"
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
echo " docker build -t localhost:30500/myapp:latest ."
echo " docker push localhost:30500/myapp:latest"
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
fi
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
uninstall:
#!/bin/bash
set -euo pipefail
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
echo "Removing Agent node at ${AGENT_IP}..."
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
fi
echo "Removing content of Server node..."
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
echo "Cleaning up kubeconfig entries..."
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
kubectl config delete-context "${K8S_CONTEXT}"
echo "Deleted context: ${K8S_CONTEXT}"
fi
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
kubectl config delete-cluster "${cluster_name}"
echo "Deleted cluster: ${cluster_name}"
fi
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
kubectl config delete-user "${user_name}"
echo "Deleted user: ${user_name}"
fi
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
else
echo "Uninstallation cancelled." >&2
exit 1
fi
add-agent:
#!/bin/bash
set -euo pipefail
just env::check
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
args=(
"join"
"--ip" "${new_agent_ip}"
"--server-ip" "${SERVER_IP}"
"--user" "${username}"
)
echo "Running: k3sup ${args[*]}"
k3sup "${args[@]}"
echo "Agent node at ${new_agent_ip} added to cluster."
# Configure k3s to use local registry
configure-registry:
#!/bin/bash
set -euo pipefail
echo "Configuring k3s registries.yaml..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
echo "Restarting k3s to apply registry configuration..."
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
echo "✓ Registry configuration applied"

View File

@@ -0,0 +1,4 @@
configs:
"localhost:30500":
tls:
insecure_skip_verify: true

View File

@@ -0,0 +1,109 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
seccompProfile:
type: RuntimeDefault
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
name: http
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
cpu: 2000m
memory: 20Gi
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
capabilities:
drop:
- ALL
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /v2/
port: 5000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: registry-data
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
name: http
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: registry-nodeport
namespace: registry
labels:
app: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
nodePort: 30500
name: http
type: NodePort

File diff suppressed because it is too large Load Diff

View File

@@ -5,5 +5,4 @@ metadata:
namespace: metallb-system namespace: metallb-system
spec: spec:
addresses: addresses:
# - 192.168.178.220-192.168.178.225 #pve-82 - {{ .Env.METALLB_ADDRESS_RANGE }}
- 192.168.178.160-192.168.178.180 #pve-83

66
Metallb_Setup/justfile Normal file
View File

@@ -0,0 +1,66 @@
set fallback := true
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
export USER := env("K3S_USER","basti")
[private]
default:
@just --list --unsorted --list-submodules
install:
#!/bin/bash
set -euo pipefail
just env::check
METALLB_VERSION="v0.15.3"
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
context=""
if gum confirm "Update KUBECONFIG?"; then
context=$(
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
)
fi
if [ -n "${context}" ]; then
kubectl config use-context "${context}"
fi
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
echo "MetalLB ${METALLB_VERSION} installed successfully."
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
echo "Address pool configured."
kubectl apply -f advertisement.yaml
echo "Advertisement created."
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
test-deployment:
#!/bin/bash
set -euo pipefail
just env::check
kubectl apply -f test-deployment.yaml
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
echo "To clean up, run 'just test-deployment-cleanup'."
test-deployment-cleanup:
#!/bin/bash
set -euo pipefail
just env::check
kubectl delete -f test-deployment.yaml
echo "Test deployment and service deleted."

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
kind: ClusterIssuer kind: ClusterIssuer
dnsNames: dnsNames:
- schnipo.k8s.schnrbs.work - schnipo.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: dishes
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dish-schnipo
namespace: dishes
labels:
app: dishes
spec:
replicas: 3
selector:
matchLabels:
app: dishes
template:
metadata:
labels:
app: dishes
spec:
containers:
- name: dish-schnipo
image: bschnorbus/dish-schnipo
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: dish-schnipo
namespace: dishes
spec:
type: ClusterIP
selector:
app: dishes
ports:
- port: 80
targetPort: 8080
protocol: TCP

View File

@@ -7,10 +7,12 @@ spec:
entryPoints: entryPoints:
- websecure - websecure
routes: routes:
- match: Host(`schnipo.k8s.schnrbs.work`) - match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule kind: Rule
services: services:
- name: schnipo - name: schnipo
port: 8080 port: 80
targetPort: 8080
tls: tls:
secretName: schnipo-certificate-secret secretName: schnipo-certificate-secret

View File

@@ -9,4 +9,4 @@ spec:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
kind: ClusterIssuer kind: ClusterIssuer
dnsNames: dnsNames:
- nginx-test.k8s.schnrbs.work - nginx-test.{{.Env.EXTERNAL_DOMAIN}}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: test
spec:
type: LoadBalancer
selector:
app: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@@ -7,10 +7,10 @@ spec:
entryPoints: entryPoints:
- websecure - websecure
routes: routes:
- match: Host(`nginx-test.k8s.schnrbs.work`) - match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
kind: Rule kind: Rule
services: services:
- name: nginx - name: nginx
port: 80 port: 80
tls: tls:
secretName: nginx-certificate-secret secretName: nginx-certificate-secret

View File

@@ -7,7 +7,7 @@ metadata:
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec: spec:
rules: rules:
- host: nginx-test.k8s.schnrbs.work - host: nginx-test.int.schnrbs.work
http: http:
paths: paths:
- path: / - path: /
@@ -19,5 +19,5 @@ spec:
number: 80 number: 80
tls: tls:
- hosts: - hosts:
- nginx-test.k8s.schnrbs.work - nginx-test.int.schnrbs.work
secretName: nginx-certificate-secret secretName: nginx-certificate-secret

View File

@@ -4,7 +4,7 @@
helm repo add traefik https://helm.traefik.io/traefik helm repo add traefik https://helm.traefik.io/traefik
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
## Cert-Manager ## Cert-Manager
@@ -24,13 +24,15 @@ i.e. general issuer for all namespaces in cluster.
## Test Deployment ## Test Deployment
```
k create ns test k create ns test
kubectl create deploy nginx --image=nginx -n test kubectl create deploy nginx --image=nginx -n test
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
k scale --replicas=3 deployment/nginx -n test k scale --replicas=3 deployment/nginx -n test
```
## Install Traefik & Cert-Manager ## Install Traefik & Cert-Manager
```
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
@@ -40,23 +42,25 @@ helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
k apply cert-manager-issuer-secret.yaml k apply -f cert-manager-issuer-secret.yaml
k get secret -n cert-manager k get secret -n cert-manager
k apply -f cert-manager-cluster-issuer.yaml k apply -f cert-manager-cluster-issuer.yaml
```
## Switch Test Deployment to https ## Switch Test Deployment to https
```
k apply -f test/nginx-certificate.yaml k apply -f test/nginx-certificate.yaml
k apply -f test/nginx-ingress.yaml k apply -f test/nginx-ingress.yaml
```
## Troubleshooting steps ## Troubleshooting steps
```
k get po -n test -o wide k get po -n test -o wide
k create svc -n test clusterip nginx k create svc -n test clusterip nginx
k create svc -n test clusterip nginx --tcp=80 k create svc -n test clusterip nginx --tcp=80
@@ -70,12 +74,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
k get svc -n test k get svc -n test
k get ingress k get ingress
k get ingress -n test k get ingress -n test
```
```
k get svc ingressRoute k get svc ingressRoute
k get svc ingressRoutes k get svc ingressRoutes
k get svc ingressroutes.traefik.io k get svc ingressroutes.traefik.io
@@ -89,4 +92,5 @@ k get po
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
k get secret k get secret
k get secrets k get secrets
k get clusterissuers.cert-manager.io k get clusterissuers.cert-manager.io
```

View File

@@ -4,7 +4,7 @@ metadata:
name: cloudflare-cluster-issuer name: cloudflare-cluster-issuer
spec: spec:
acme: acme:
email: hello@schnorbus.net email: {{ .Env.ACME_EMAIL }}
server: https://acme-v02.api.letsencrypt.org/directory server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef: privateKeySecretRef:
name: cloudflare-acme-key name: cloudflare-acme-key

View File

@@ -5,4 +5,4 @@ metadata:
namespace: cert-manager namespace: cert-manager
type: Opaque type: Opaque
stringData: stringData:
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1 api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}

62
Traefik/justfile Normal file
View File

@@ -0,0 +1,62 @@
set fallback:=true
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
add-helm-repos:
helm repo add traefik https://helm.traefik.io/traefik --force-update
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update
install:
#!/bin/bash
set -euo pipefail
just env::check
just add-helm-repos
helm upgrade traefik traefik/traefik \
--install \
--cleanup-on-fail \
--namespace ${TRAEFIK_NAMESPACE} \
--create-namespace \
--values traefik-values.yaml
helm upgrade cert-manager jetstack/cert-manager \
--install \
--cleanup-on-fail \
--namespace ${CERT_MANAGER_NAMESPACE} \
--create-namespace \
--values cert-manager-values.yaml
uninstall:
#!/bin/bash
set -euo pipefail
just env::check
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
setup-cluster-issuer:
#!/bin/bash
set -euo pipefail
just env::check
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
# Get status of cert-manager components
status:
#!/bin/bash
set -euo pipefail
echo "=== cert-manager Components Status ==="
echo ""
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
echo ""
echo "Pods:"
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "Services:"
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
echo ""
echo "CRDs:"
kubectl get crd | grep cert-manager.io

View File

@@ -11,5 +11,5 @@ ingressRoute:
dashboard: dashboard:
enabled: true enabled: true
entryPoints: [web, websecure] entryPoints: [web, websecure]
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`) matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)

9
env/env.local.gomplate vendored Normal file
View File

@@ -0,0 +1,9 @@
# shellcheck disable=all
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
SERVER_IP={{ .Env.SERVER_IP }}
AGENT_IP={{ .Env.AGENT_IP }}
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
ACME_EMAIL={{ .Env.ACME_EMAIL}}
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}

125
env/justfile vendored Normal file
View File

@@ -0,0 +1,125 @@
set fallback := true
export ENV_FILE := ".env.local"
export K8S_CONTEXT := env("K8S_CONTEXT", "")
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
export SERVER_IP := env("SERVER_IP", "")
export AGENT_IP := env("AGENT_IP", "")
check:
#!/bin/bash
set -euo pipefail
if [ -z "${K8S_CONTEXT}" ]; then
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${SERVER_IP}" ]; then
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
if [ -z "${AGENT_IP}" ]; then
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
exit 1
fi
setup:
#!/bin/bash
set -euo pipefail
if [ -f ../.env.local ]; then
echo ".env.local already exists." >&2
if gum confirm "Do you want to overwrite it?"; then
K8S_CONTEXT=""
SERVER_IP=""
AGENT_IP=""
elif [[ $? -eq 130 ]]; then
echo "Setup cancelled by user." >&2
exit 1
else
echo "Aborting setup." >&2
exit 1
fi
fi
while [ -z "${K8S_CONTEXT}" ]; do
if ! K8S_CONTEXT=$(
gum input --prompt="Context name: " \
--width=100 --placeholder="context"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
if ! K8S_MASTER_NODE_NAME=$(
gum input --prompt="Master Node Hostname: " \
--width=100 --placeholder="Master Node Name"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${SERVER_IP}" ]; do
if ! SERVER_IP=$(
gum input --prompt="IP of Server/Master Node: " \
--width=100 --placeholder="Master Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${AGENT_IP}" ]; do
if ! AGENT_IP=$(
gum input --prompt="IP of Agent Node: " \
--width=100 --placeholder="Agent Node IP"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
if ! METALLB_ADDRESS_RANGE=$(
gum input --prompt="IP Range for LoadBalancer: " \
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
if ! CLOUDFLARE_API_TOKEN=$(
gum input --prompt="Cloudflare API Token: " \
--width=100 --placeholder="API Token" --password
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${ACME_EMAIL}" ]; do
if ! ACME_EMAIL=$(
gum input --prompt="ACME Email for Cert-Manager: " \
--width=100 --placeholder="Email"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
while [ -z "${EXTERNAL_DOMAIN}" ]; do
if ! EXTERNAL_DOMAIN=$(
gum input --prompt="External Domain: " \
--width=100 --placeholder="Domain"
); then
echo "Setup cancelled." >&2
exit 1
fi
done
echo "Generating .env.local file..."
rm -f ../.env.local
gomplate -f env.local.gomplate -o ../.env.local

View File

@@ -14,3 +14,4 @@ flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner b
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/ https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."

12
justfile Normal file
View File

@@ -0,0 +1,12 @@
set dotenv-filename := ".env.local"
export PATH := "./node_modules/.bin:" + env_var('PATH')
[private]
default:
@just --list --unsorted --list-submodules
mod env
mod BasicSetup '01_Basic_Setup'
mod MetalLbSetup 'Metallb_Setup'
mod Traefik

7
mise.toml Normal file
View File

@@ -0,0 +1,7 @@
[tools]
jq = '1.8.1'
k3sup = '0.13.11'
helm = '3.19.0'
gum = '0.16.2'
gomplate = '4.3.3'
just = "1.42.4"