Compare commits
21 Commits
just
...
4075203b1e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4075203b1e | ||
|
|
92decafc3f | ||
|
|
09e1bbbc52 | ||
|
|
48d930fedc | ||
|
|
1f82ce8d02 | ||
|
|
a551f2e4ca | ||
|
|
a80dce42b0 | ||
|
|
63243c6d2e | ||
|
|
1f9f7e275c | ||
|
|
09026d6812 | ||
|
|
24991fce90 | ||
|
|
65a59d2d0c | ||
|
|
85fb620e39 | ||
|
|
b56e02d2ed | ||
|
|
15cb2ce903 | ||
|
|
b47fe8f66b | ||
|
|
c5810661e5 | ||
|
|
7ddc08d622 | ||
|
|
c5aa7f8105 | ||
|
|
0c6cfedcde | ||
| 2be83a977a |
@@ -6,7 +6,7 @@ export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
|
|||||||
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
||||||
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
||||||
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
||||||
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "false")
|
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "true")
|
||||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||||
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
||||||
export USER := env("K3S_USER","basti")
|
export USER := env("K3S_USER","basti")
|
||||||
@@ -33,16 +33,17 @@ install:
|
|||||||
|
|
||||||
args=(
|
args=(
|
||||||
"install"
|
"install"
|
||||||
"--context" "${K8S_CONTEXT}"
|
"--context" "${context}"
|
||||||
"--host" "${K8S_MASTER_NODE_NAME}"
|
"--host" "${K8S_MASTER_NODE_NAME}"
|
||||||
"--user" "${username}"
|
"--user" "${username}"
|
||||||
|
"--no-extras" #
|
||||||
)
|
)
|
||||||
|
|
||||||
if [ -n "${kubeconfig}" ]; then
|
if [ -n "${kubeconfig}" ]; then
|
||||||
mkdir -p "$(dirname "${kubeconfig}")"
|
mkdir -p "$(dirname "${kubeconfig}")"
|
||||||
args+=("--local-path" "${kubeconfig}" "--merge")
|
args+=("--local-path" "${kubeconfig}" "--merge")
|
||||||
fi
|
fi
|
||||||
echo "Running: k3sup ${args[*]}"
|
echo "Running: k3sup ${args[@]}"
|
||||||
k3sup "${args[@]}"
|
k3sup "${args[@]}"
|
||||||
|
|
||||||
if [ -n "${context}" ]; then
|
if [ -n "${context}" ]; then
|
||||||
@@ -133,3 +134,15 @@ add-agent:
|
|||||||
k3sup "${args[@]}"
|
k3sup "${args[@]}"
|
||||||
echo "Agent node at ${new_agent_ip} added to cluster."
|
echo "Agent node at ${new_agent_ip} added to cluster."
|
||||||
|
|
||||||
|
# Configure k3s to use local registry
|
||||||
|
configure-registry:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
echo "Configuring k3s registries.yaml..."
|
||||||
|
|
||||||
|
ssh "${K8S_MASTER_NODE_NAME}" "sudo mkdir -p /etc/rancher/k3s"
|
||||||
|
gomplate -f ./registry/registries.gomplate.yaml | ssh "${K8S_MASTER_NODE_NAME}" "sudo tee /etc/rancher/k3s/registries.yaml > /dev/null"
|
||||||
|
|
||||||
|
echo "Restarting k3s to apply registry configuration..."
|
||||||
|
ssh "${K8S_MASTER_NODE_NAME}" "sudo systemctl restart k3s"
|
||||||
|
echo "✓ Registry configuration applied"
|
||||||
8
08_Vault/auth-token-secret.yaml
Normal file
8
08_Vault/auth-token-secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: vault-auth-token
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/service-account.name: vault-auth
|
||||||
|
type: kubernetes.io/service-account-token
|
||||||
|
|
||||||
126
08_Vault/justfile
Normal file
126
08_Vault/justfile
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
set fallback := true
|
||||||
|
|
||||||
|
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
|
||||||
|
export VAULT_CHART_VERSION := env("VAULT_CHART_VERSION", "0.31.0")
|
||||||
|
export VAULT_HOST := env("VAULT_HOST", "")
|
||||||
|
export VAULT_ADDR := "https://" + VAULT_HOST
|
||||||
|
export VAULT_DEBUG := env("VAULT_DEBUG", "false")
|
||||||
|
SECRET_PATH := "secret"
|
||||||
|
|
||||||
|
|
||||||
|
[private]
|
||||||
|
default:
|
||||||
|
@just --list --unsorted --list-submodules
|
||||||
|
|
||||||
|
# Add Helm repository
|
||||||
|
add-helm-repo:
|
||||||
|
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||||
|
helm repo update
|
||||||
|
|
||||||
|
# Remove Helm repository
|
||||||
|
remove-helm-repo:
|
||||||
|
helm repo remove hashicorp
|
||||||
|
|
||||||
|
|
||||||
|
# Create Vault namespace
|
||||||
|
create-namespace:
|
||||||
|
@kubectl get namespace ${K8S_VAULT_NAMESPACE} > /dev/null || kubectl create namespace ${K8S_VAULT_NAMESPACE}
|
||||||
|
|
||||||
|
# Delete Vault namespace
|
||||||
|
delete-namespace:
|
||||||
|
@kubectl delete namespace ${K8S_VAULT_NAMESPACE} --ignore-not-found
|
||||||
|
|
||||||
|
install:
|
||||||
|
#!/bin/bash
|
||||||
|
set -eu
|
||||||
|
just create-namespace
|
||||||
|
just add-helm-repo
|
||||||
|
|
||||||
|
gomplate -f vault-values.gomplate.yaml -o vault-values.yaml
|
||||||
|
|
||||||
|
helm upgrade \
|
||||||
|
--cleanup-on-fail \
|
||||||
|
--install \
|
||||||
|
vault \
|
||||||
|
hashicorp/vault \
|
||||||
|
--namespace ${K8S_VAULT_NAMESPACE} \
|
||||||
|
--wait \
|
||||||
|
-f vault-values.yaml
|
||||||
|
|
||||||
|
kubectl wait pod --for=condition=PodReadyToStartContainers \
|
||||||
|
-n ${K8S_VAULT_NAMESPACE} vault-0 --timeout=5m
|
||||||
|
|
||||||
|
# Wait for Vault service to be ready to accept connections
|
||||||
|
echo "Waiting for Vault service to be ready..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||||
|
vault status 2>&1 | grep -qE "(Initialized|Sealed)"; then
|
||||||
|
echo "✓ Vault service is ready"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ $i -eq 30 ]; then
|
||||||
|
echo "Error: Timeout waiting for Vault service to be ready"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
init_output=$(kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||||
|
vault operator init -key-shares=1 -key-threshold=1 -format=json || true)
|
||||||
|
|
||||||
|
root_token=""
|
||||||
|
if echo "${init_output}" | grep -q "Vault is already initialized"; then
|
||||||
|
echo "Vault is already initialized"
|
||||||
|
while [ -z "${root_token}" ]; do
|
||||||
|
root_token=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||||
|
done
|
||||||
|
else
|
||||||
|
unseal_key=$(echo "${init_output}" | jq -r '.unseal_keys_b64[0]')
|
||||||
|
root_token=$(echo "${init_output}" | jq -r '.root_token')
|
||||||
|
kubectl exec -n ${K8S_VAULT_NAMESPACE} vault-0 -- \
|
||||||
|
vault operator unseal "${unseal_key}"
|
||||||
|
echo "Vault initialized and unsealed successfully"
|
||||||
|
echo "Root Token: ${root_token}"
|
||||||
|
echo "Unseal Key: ${unseal_key}"
|
||||||
|
echo "Please save these credentials securely!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for all vault instances to pass readiness checks and be ready to serve requests
|
||||||
|
kubectl wait pod --for=condition=ready -n ${K8S_VAULT_NAMESPACE} \
|
||||||
|
-l app.kubernetes.io/name=vault --timeout=5m
|
||||||
|
|
||||||
|
just setup-kubernetes-auth "${root_token}"
|
||||||
|
|
||||||
|
|
||||||
|
# Uninstall Vault
|
||||||
|
uninstall delete-ns='false':
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
helm uninstall vault -n ${K8S_VAULT_NAMESPACE} --ignore-not-found --wait
|
||||||
|
just delete-namespace
|
||||||
|
|
||||||
|
|
||||||
|
# Setup Kubernetes authentication
|
||||||
|
setup-kubernetes-auth root_token='':
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
export VAULT_TOKEN="{{ root_token }}"
|
||||||
|
while [ -z "${VAULT_TOKEN}" ]; do
|
||||||
|
VAULT_TOKEN=$(gum input --prompt="Vault root token: " --password --width=100)
|
||||||
|
done
|
||||||
|
|
||||||
|
gomplate -f ./serviceaccount.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||||
|
gomplate -f ./rolebinding.gomplate.yaml | kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f -
|
||||||
|
kubectl apply -n "${K8S_VAULT_NAMESPACE}" -f ./auth-token-secret.yaml
|
||||||
|
|
||||||
|
SA_SECRET="vault-auth-token"
|
||||||
|
SA_JWT=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.token}' | base64 --decode)
|
||||||
|
SA_CA=$(kubectl get secret -n ${K8S_VAULT_NAMESPACE} ${SA_SECRET} -o jsonpath='{.data.ca\.crt}' | base64 --decode)
|
||||||
|
|
||||||
|
vault auth list -format=json | jq -e '.["kubernetes/"]' >/dev/null 2>&1 || \
|
||||||
|
vault auth enable kubernetes
|
||||||
|
|
||||||
|
vault write auth/kubernetes/config \
|
||||||
|
token_reviewer_jwt="${SA_JWT}" \
|
||||||
|
kubernetes_host="https://kubernetes.default.svc" \
|
||||||
|
kubernetes_ca_cert="${SA_CA}"
|
||||||
12
08_Vault/rolebinding.gomplate.yaml
Normal file
12
08_Vault/rolebinding.gomplate.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: vault-auth-binding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:auth-delegator
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: vault-auth
|
||||||
|
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||||
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
5
08_Vault/serviceaccount.gomplate.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: vault-auth
|
||||||
|
namespace: {{ .Env.K8S_VAULT_NAMESPACE }}
|
||||||
16
08_Vault/vault-values.gomplate.yaml
Normal file
16
08_Vault/vault-values.gomplate.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
server:
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: traefik
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
ingressClassName: traefik
|
||||||
|
hosts:
|
||||||
|
- host: {{ .Env.VAULT_HOST }}
|
||||||
|
paths:
|
||||||
|
- /
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- {{ .Env.VAULT_HOST }}
|
||||||
|
dataStorage:
|
||||||
|
storageClass: longhorn
|
||||||
16
08_Vault/vault-values.yaml
Normal file
16
08_Vault/vault-values.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
server:
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: traefik
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
ingressClassName: traefik
|
||||||
|
hosts:
|
||||||
|
- host: vault.test.k8s.schnrbs.work
|
||||||
|
paths:
|
||||||
|
- /
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- vault.test.k8s.schnrbs.work
|
||||||
|
dataStorage:
|
||||||
|
storageClass: longhorn
|
||||||
9
11_storage_tests/foo-pv.yaml
Normal file
9
11_storage_tests/foo-pv.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: foo-pv
|
||||||
|
spec:
|
||||||
|
storageClassName: "longhorn"
|
||||||
|
claimRef:
|
||||||
|
name: foo-pvc
|
||||||
|
namespace: foo
|
||||||
0
11_storage_tests/foo-pvc.yaml
Normal file
0
11_storage_tests/foo-pvc.yaml
Normal file
@@ -9,7 +9,7 @@ spec:
|
|||||||
volumeMode: Filesystem
|
volumeMode: Filesystem
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
persistentVolumeReclaimPolicy: Retain # Optionally, 'Delete' oder 'Recycle'
|
persistentVolumeReclaimPolicy: Delete # Optionally, 'Delete' oder 'Recycle'
|
||||||
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
|
storageClassName: longhorn # Verwende den Longhorn-StorageClass-Namen
|
||||||
csi:
|
csi:
|
||||||
driver: driver.longhorn.io # Der Longhorn CSI-Treiber
|
driver: driver.longhorn.io # Der Longhorn CSI-Treiber
|
||||||
|
|||||||
@@ -1,16 +1,42 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: foo
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: longhorn-nginx-pvc
|
||||||
|
namespace: foo
|
||||||
|
spec:
|
||||||
|
storageClassName: longhorn # Die gleiche StorageClass wie im PV
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi # Die angeforderte Größe sollte mit der des PV übereinstimmen
|
||||||
|
# volumeName: longhorn-test-pv # Der Name des PV, das für diesen PVC verwendet werden soll
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: longhorn-demo
|
name: longhorn-demo
|
||||||
namespace: default
|
namespace: foo
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: demo-container
|
- name: demo-container
|
||||||
image: nginx:latest
|
image: nginx:latest
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /usr/share/nginx/html
|
- mountPath: /usr/share/nginx/html
|
||||||
name: longhorn-volume
|
name: longhorn-volume
|
||||||
volumes:
|
volumes:
|
||||||
- name: longhorn-volume
|
- name: longhorn-volume
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: longhorn-test-pvc
|
claimName: longhorn-nginx-pvc
|
||||||
|
|||||||
@@ -167,7 +167,8 @@ Mit diesen Schritten hast du ein Persistent Volume (PV) und einen Persistent Vol
|
|||||||
|
|
||||||
|
|
||||||
## Disable Localpath as default
|
## Disable Localpath as default
|
||||||
|
```
|
||||||
kubectl get storageclass
|
kubectl get storageclass
|
||||||
|
|
||||||
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||||
|
```
|
||||||
1
Longhorn/auth
Normal file
1
Longhorn/auth
Normal file
@@ -0,0 +1 @@
|
|||||||
|
basti:$apr1$N23gJpBe$CYlDcwTfp8YsQMq0UcADQ0
|
||||||
67
Longhorn/justfile
Normal file
67
Longhorn/justfile
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
set fallback:=true
|
||||||
|
|
||||||
|
export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE","longhorn-system")
|
||||||
|
export LONGHORN_VERSION := env("LONGHORN_VERSION","1.10.1")
|
||||||
|
|
||||||
|
add-helm-repo:
|
||||||
|
helm repo add longhorn https://charts.longhorn.io --force-update
|
||||||
|
helm repo update
|
||||||
|
|
||||||
|
# Delete namespace
|
||||||
|
delete-namespace:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
if kubectl get namespace ${LONGHORN_NAMESPACE} &>/dev/null; then
|
||||||
|
kubectl delete namespace ${LONGHORN_NAMESPACE} --ignore-not-found
|
||||||
|
else
|
||||||
|
echo "Namespace ${LONGHORN_NAMESPACE} does not exist."
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
install:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
just add-helm-repo
|
||||||
|
|
||||||
|
helm upgrade longhorn longhorn/longhorn \
|
||||||
|
--install \
|
||||||
|
--cleanup-on-fail \
|
||||||
|
--namespace ${LONGHORN_NAMESPACE} \
|
||||||
|
--create-namespace \
|
||||||
|
--version ${LONGHORN_VERSION} \
|
||||||
|
--values longhorn-values.yaml
|
||||||
|
|
||||||
|
# remove default storage class annotation from local-path storage class
|
||||||
|
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
for crd in $(kubectl get crd -o name | grep longhorn); do
|
||||||
|
kubectl patch $crd -p '{"metadata":{"finalizers":[]}}' --type=merge
|
||||||
|
done
|
||||||
|
|
||||||
|
kubectl -n ${LONGHORN_NAMESPACE} patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag || true
|
||||||
|
|
||||||
|
helm uninstall longhorn --namespace ${LONGHORN_NAMESPACE} || true
|
||||||
|
just delete-namespace
|
||||||
|
|
||||||
|
|
||||||
|
install-dashboard-ingress:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
echo "Deploying Longhorn Dashboard Ingress with EXTERNAL_DOMAIN=${EXTERNAL_DOMAIN}"
|
||||||
|
gomplate -f longhorn-certificate-gomplate.yaml | kubectl apply -f -
|
||||||
|
gomplate -f longhorn-ingressroute-gomplate.yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
uninstall-dashboard-ingress:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
kubectl delete -f longhorn-ingressroute-gomplate.yaml || true
|
||||||
|
kubectl delete -f longhorn-certificate-gomplate.yaml || true
|
||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
secretName: longhorn-web-ui-tls
|
secretName: longhorn-web-ui-tls
|
||||||
dnsNames:
|
dnsNames:
|
||||||
- longhorn-dashboard.k8s.schnrbs.work
|
- longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}
|
||||||
issuerRef:
|
issuerRef:
|
||||||
name: cloudflare-cluster-issuer
|
name: cloudflare-cluster-issuer
|
||||||
kind: ClusterIssuer
|
kind: ClusterIssuer
|
||||||
@@ -7,7 +7,7 @@ spec:
|
|||||||
entryPoints:
|
entryPoints:
|
||||||
- websecure
|
- websecure
|
||||||
routes:
|
routes:
|
||||||
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
|
- match: Host(`longhorn-dashboard.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||||
kind: Rule
|
kind: Rule
|
||||||
services:
|
services:
|
||||||
- name: longhorn-frontend
|
- name: longhorn-frontend
|
||||||
@@ -1,18 +1,6 @@
|
|||||||
global:
|
|
||||||
nodeSelector:
|
|
||||||
node.longhorn.io/create-default-disk: "true"
|
|
||||||
|
|
||||||
service:
|
|
||||||
ui:
|
|
||||||
type: NodePort
|
|
||||||
nodePort: 30050
|
|
||||||
manager:
|
|
||||||
type: ClusterIP
|
|
||||||
|
|
||||||
# Replica count for the default Longhorn StorageClass.
|
# Replica count for the default Longhorn StorageClass.
|
||||||
persistence:
|
persistence:
|
||||||
defaultClass: false
|
|
||||||
defaultFsType: ext4
|
|
||||||
defaultClassReplicaCount: 2
|
defaultClassReplicaCount: 2
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
|
|
||||||
@@ -25,12 +13,10 @@ csi:
|
|||||||
|
|
||||||
# Default replica count and storage path
|
# Default replica count and storage path
|
||||||
defaultSettings:
|
defaultSettings:
|
||||||
upgradeChecker: false
|
|
||||||
kubernetesClusterAutoscalerEnabled: false
|
kubernetesClusterAutoscalerEnabled: false
|
||||||
allowCollectingLonghornUsageMetrics: false
|
allowCollectingLonghornUsageMetrics: false
|
||||||
createDefaultDiskLabeledNodes: true
|
|
||||||
defaultReplicaCount: 2
|
defaultReplicaCount: 2
|
||||||
defaultDataPath: "/k8s-data"
|
# defaultDataPath: "/k8s-data"
|
||||||
|
|
||||||
longhornUI:
|
longhornUI:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
40
Longhorn/pod_with_pvc.yaml
Normal file
40
Longhorn/pod_with_pvc.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: longhorn-volv-pvc
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: longhorn
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 2Gi
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: volume-test
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
restartPolicy: Always
|
||||||
|
containers:
|
||||||
|
- name: volume-test
|
||||||
|
image: nginx:stable-alpine
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- ls
|
||||||
|
- /data/lost+found
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 5
|
||||||
|
volumeMounts:
|
||||||
|
- name: volv
|
||||||
|
mountPath: /data
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
volumes:
|
||||||
|
- name: volv
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: longhorn-volv-pvc
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -5,5 +5,4 @@ metadata:
|
|||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
spec:
|
spec:
|
||||||
addresses:
|
addresses:
|
||||||
# - 192.168.178.220-192.168.178.225 #pve-82
|
- {{ .Env.METALLB_ADDRESS_RANGE }}
|
||||||
- 192.168.178.160-192.168.178.180 #pve-83
|
|
||||||
66
Metallb_Setup/justfile
Normal file
66
Metallb_Setup/justfile
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
set fallback := true
|
||||||
|
|
||||||
|
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||||
|
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||||
|
export USER := env("K3S_USER","basti")
|
||||||
|
|
||||||
|
|
||||||
|
[private]
|
||||||
|
default:
|
||||||
|
@just --list --unsorted --list-submodules
|
||||||
|
|
||||||
|
|
||||||
|
install:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
METALLB_VERSION="v0.15.3"
|
||||||
|
|
||||||
|
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||||
|
context=""
|
||||||
|
if gum confirm "Update KUBECONFIG?"; then
|
||||||
|
context=$(
|
||||||
|
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${context}" ]; then
|
||||||
|
kubectl config use-context "${context}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VERSION}/config/manifests/metallb-native.yaml"
|
||||||
|
gum spin --spinner dot --title "Waiting for MetalLB to be ready..." -- kubectl wait --namespace metallb-system --for=condition=available deployment --all --timeout=120s
|
||||||
|
echo "MetalLB ${METALLB_VERSION} installed successfully."
|
||||||
|
|
||||||
|
gomplate -f address-pool.gomplate.yaml | kubectl apply -f -
|
||||||
|
echo "Address pool configured."
|
||||||
|
|
||||||
|
kubectl apply -f advertisement.yaml
|
||||||
|
echo "Advertisement created."
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
kubectl get namespace metallb-system &>/dev/null && kubectl delete ns metallb-system
|
||||||
|
|
||||||
|
test-deployment:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
kubectl apply -f test-deployment.yaml
|
||||||
|
|
||||||
|
echo "Test deployment created. You can check the service with 'kubectl get svc nginx -o wide -n test'."
|
||||||
|
|
||||||
|
echo "To clean up, run 'just test-deployment-cleanup'."
|
||||||
|
|
||||||
|
test-deployment-cleanup:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
kubectl delete -f test-deployment.yaml
|
||||||
|
echo "Test deployment and service deleted."
|
||||||
@@ -9,4 +9,4 @@ spec:
|
|||||||
name: cloudflare-cluster-issuer
|
name: cloudflare-cluster-issuer
|
||||||
kind: ClusterIssuer
|
kind: ClusterIssuer
|
||||||
dnsNames:
|
dnsNames:
|
||||||
- schnipo.k8s.schnrbs.work
|
- schnipo.{{.Env.EXTERNAL_DOMAIN}}
|
||||||
43
Test-Deployment/dishes-deployment.yaml
Normal file
43
Test-Deployment/dishes-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: dishes
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: dish-schnipo
|
||||||
|
namespace: dishes
|
||||||
|
labels:
|
||||||
|
app: dishes
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: dishes
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: dishes
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: dish-schnipo
|
||||||
|
image: bschnorbus/dish-schnipo
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: dish-schnipo
|
||||||
|
namespace: dishes
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
app: dishes
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 8080
|
||||||
|
protocol: TCP
|
||||||
@@ -7,10 +7,12 @@ spec:
|
|||||||
entryPoints:
|
entryPoints:
|
||||||
- websecure
|
- websecure
|
||||||
routes:
|
routes:
|
||||||
- match: Host(`schnipo.k8s.schnrbs.work`)
|
- match: Host(`schnipo.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||||
kind: Rule
|
kind: Rule
|
||||||
services:
|
services:
|
||||||
- name: schnipo
|
- name: schnipo
|
||||||
port: 8080
|
port: 80
|
||||||
|
targetPort: 8080
|
||||||
tls:
|
tls:
|
||||||
secretName: schnipo-certificate-secret
|
secretName: schnipo-certificate-secret
|
||||||
|
|
||||||
37
Test-Deployment/justfile
Normal file
37
Test-Deployment/justfile
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
set fallback:=true
|
||||||
|
|
||||||
|
export EXTERNAL := env("EXTERNAL_DOMAIN", "")
|
||||||
|
|
||||||
|
install-nginx:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
if [ -z "${EXTERNAL}" ]; then
|
||||||
|
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
kubectl apply -f nginx-deployment.yaml
|
||||||
|
gomplate -f nginx-certificate-gomplate.yaml | kubectl apply -f -
|
||||||
|
gomplate -f nginx-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
install-dishes:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
if [ -z "${EXTERNAL}" ]; then
|
||||||
|
echo "ERROR: EXTERNAL_DOMAIN environment variable is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
kubectl apply -f dishes-deployment.yaml
|
||||||
|
gomplate -f dishes-certificate-gomplate.yaml | kubectl apply -f -
|
||||||
|
gomplate -f dishes-ingress-route-gomplate.yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
remove-nginx:
|
||||||
|
kubectl delete ns test || true
|
||||||
|
|
||||||
|
remove-dishes:
|
||||||
|
kubectl delete ns dishes || true
|
||||||
@@ -9,4 +9,4 @@ spec:
|
|||||||
name: cloudflare-cluster-issuer
|
name: cloudflare-cluster-issuer
|
||||||
kind: ClusterIssuer
|
kind: ClusterIssuer
|
||||||
dnsNames:
|
dnsNames:
|
||||||
- nginx-test.k8s.schnrbs.work
|
- nginx-test.{{.Env.EXTERNAL_DOMAIN}}
|
||||||
43
Test-Deployment/nginx-deployment.yaml
Normal file
43
Test-Deployment/nginx-deployment.yaml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
namespace: test
|
||||||
|
labels:
|
||||||
|
app: nginx
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: nginx
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: nginx
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginx:latest
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
namespace: test
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
selector:
|
||||||
|
app: nginx
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 80
|
||||||
|
protocol: TCP
|
||||||
@@ -7,7 +7,7 @@ spec:
|
|||||||
entryPoints:
|
entryPoints:
|
||||||
- websecure
|
- websecure
|
||||||
routes:
|
routes:
|
||||||
- match: Host(`nginx-test.k8s.schnrbs.work`)
|
- match: Host(`nginx-test.{{.Env.EXTERNAL_DOMAIN}}`)
|
||||||
kind: Rule
|
kind: Rule
|
||||||
services:
|
services:
|
||||||
- name: nginx
|
- name: nginx
|
||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
spec:
|
spec:
|
||||||
rules:
|
rules:
|
||||||
- host: nginx-test.k8s.schnrbs.work
|
- host: nginx-test.int.schnrbs.work
|
||||||
http:
|
http:
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
@@ -19,5 +19,5 @@ spec:
|
|||||||
number: 80
|
number: 80
|
||||||
tls:
|
tls:
|
||||||
- hosts:
|
- hosts:
|
||||||
- nginx-test.k8s.schnrbs.work
|
- nginx-test.int.schnrbs.work
|
||||||
secretName: nginx-certificate-secret
|
secretName: nginx-certificate-secret
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
helm repo add traefik https://helm.traefik.io/traefik
|
helm repo add traefik https://helm.traefik.io/traefik
|
||||||
|
|
||||||
|
|
||||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||||
|
|
||||||
|
|
||||||
## Cert-Manager
|
## Cert-Manager
|
||||||
@@ -24,13 +24,15 @@ i.e. general issuer for all namespaces in cluster.
|
|||||||
|
|
||||||
|
|
||||||
## Test Deployment
|
## Test Deployment
|
||||||
|
```
|
||||||
k create ns test
|
k create ns test
|
||||||
kubectl create deploy nginx --image=nginx -n test
|
kubectl create deploy nginx --image=nginx -n test
|
||||||
k create svc -n test clusterip nginx --tcp=80
|
k create svc -n test clusterip nginx --tcp=80
|
||||||
k scale --replicas=3 deployment/nginx -n test
|
k scale --replicas=3 deployment/nginx -n test
|
||||||
|
```
|
||||||
|
|
||||||
## Install Traefik & Cert-Manager
|
## Install Traefik & Cert-Manager
|
||||||
|
```
|
||||||
|
|
||||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||||
|
|
||||||
@@ -40,23 +42,25 @@ helm repo add jetstack https://charts.jetstack.io --force-update
|
|||||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||||
|
|
||||||
|
|
||||||
k apply cert-manager-issuer-secret.yaml
|
k apply -f cert-manager-issuer-secret.yaml
|
||||||
k get secret -n cert-manager
|
k get secret -n cert-manager
|
||||||
|
|
||||||
k apply -f cert-manager-cluster-issuer.yaml
|
k apply -f cert-manager-cluster-issuer.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Switch Test Deployment to https
|
## Switch Test Deployment to https
|
||||||
|
```
|
||||||
k apply -f test/nginx-certificate.yaml
|
k apply -f test/nginx-certificate.yaml
|
||||||
k apply -f test/nginx-ingress.yaml
|
k apply -f test/nginx-ingress.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Troubleshooting steps
|
## Troubleshooting steps
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
k get po -n test -o wide
|
k get po -n test -o wide
|
||||||
k create svc -n test clusterip nginx
|
k create svc -n test clusterip nginx
|
||||||
k create svc -n test clusterip nginx --tcp=80
|
k create svc -n test clusterip nginx --tcp=80
|
||||||
@@ -70,12 +74,11 @@ k apply -f traefik_lempa/nginx-ingress.yaml
|
|||||||
k get svc -n test
|
k get svc -n test
|
||||||
k get ingress
|
k get ingress
|
||||||
k get ingress -n test
|
k get ingress -n test
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
k get svc ingressRoute
|
k get svc ingressRoute
|
||||||
k get svc ingressRoutes
|
k get svc ingressRoutes
|
||||||
k get svc ingressroutes.traefik.io
|
k get svc ingressroutes.traefik.io
|
||||||
@@ -90,3 +93,4 @@ k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
|
|||||||
k get secret
|
k get secret
|
||||||
k get secrets
|
k get secrets
|
||||||
k get clusterissuers.cert-manager.io
|
k get clusterissuers.cert-manager.io
|
||||||
|
```
|
||||||
@@ -4,7 +4,7 @@ metadata:
|
|||||||
name: cloudflare-cluster-issuer
|
name: cloudflare-cluster-issuer
|
||||||
spec:
|
spec:
|
||||||
acme:
|
acme:
|
||||||
email: hello@schnorbus.net
|
email: {{ .Env.ACME_EMAIL }}
|
||||||
server: https://acme-v02.api.letsencrypt.org/directory
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
privateKeySecretRef:
|
privateKeySecretRef:
|
||||||
name: cloudflare-acme-key
|
name: cloudflare-acme-key
|
||||||
@@ -5,4 +5,4 @@ metadata:
|
|||||||
namespace: cert-manager
|
namespace: cert-manager
|
||||||
type: Opaque
|
type: Opaque
|
||||||
stringData:
|
stringData:
|
||||||
api-token: DgU4SMUpQVAoS8IisGxnSQCUI7PbclhvegdqF9I1
|
api-token: {{ .Env.CLOUDFLARE_API_TOKEN }}
|
||||||
62
Traefik/justfile
Normal file
62
Traefik/justfile
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
set fallback:=true
|
||||||
|
|
||||||
|
export CERT_MANAGER_NAMESPACE := env("CERT_MANAGER_NAMESPACE", "cert-manager")
|
||||||
|
export TRAEFIK_NAMESPACE := env("TRAEFIK_NAMESPACE", "traefik")
|
||||||
|
|
||||||
|
add-helm-repos:
|
||||||
|
helm repo add traefik https://helm.traefik.io/traefik --force-update
|
||||||
|
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||||
|
helm repo update
|
||||||
|
|
||||||
|
install:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
just add-helm-repos
|
||||||
|
|
||||||
|
helm upgrade traefik traefik/traefik \
|
||||||
|
--install \
|
||||||
|
--cleanup-on-fail \
|
||||||
|
--namespace ${TRAEFIK_NAMESPACE} \
|
||||||
|
--create-namespace \
|
||||||
|
--values traefik-values.yaml
|
||||||
|
|
||||||
|
helm upgrade cert-manager jetstack/cert-manager \
|
||||||
|
--install \
|
||||||
|
--cleanup-on-fail \
|
||||||
|
--namespace ${CERT_MANAGER_NAMESPACE} \
|
||||||
|
--create-namespace \
|
||||||
|
--values cert-manager-values.yaml
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
|
||||||
|
helm uninstall traefik --namespace ${TRAEFIK_NAMESPACE} || true
|
||||||
|
helm uninstall cert-manager --namespace ${CERT_MANAGER_NAMESPACE} || true
|
||||||
|
|
||||||
|
setup-cluster-issuer:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
just env::check
|
||||||
|
gomplate -f cert-manager-issuer-secret-gomplate.yaml | kubectl apply -f -
|
||||||
|
gomplate -f cert-manager-cluster-issuer-gomplate.yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
# Get status of cert-manager components
|
||||||
|
status:
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
echo "=== cert-manager Components Status ==="
|
||||||
|
echo ""
|
||||||
|
echo "Namespace: ${CERT_MANAGER_NAMESPACE}"
|
||||||
|
echo ""
|
||||||
|
echo "Pods:"
|
||||||
|
kubectl get pods -n ${CERT_MANAGER_NAMESPACE}
|
||||||
|
echo ""
|
||||||
|
echo "Services:"
|
||||||
|
kubectl get services -n ${CERT_MANAGER_NAMESPACE}
|
||||||
|
echo ""
|
||||||
|
echo "CRDs:"
|
||||||
|
kubectl get crd | grep cert-manager.io
|
||||||
@@ -11,5 +11,5 @@ ingressRoute:
|
|||||||
dashboard:
|
dashboard:
|
||||||
enabled: true
|
enabled: true
|
||||||
entryPoints: [web, websecure]
|
entryPoints: [web, websecure]
|
||||||
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
|
matchRule: Host(`traefik-dashboard.{{ .Env.EXTERNAL_DOMAIN }}`)
|
||||||
|
|
||||||
5
env/env.local.gomplate
vendored
5
env/env.local.gomplate
vendored
@@ -3,3 +3,8 @@ K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
|
|||||||
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
||||||
SERVER_IP={{ .Env.SERVER_IP }}
|
SERVER_IP={{ .Env.SERVER_IP }}
|
||||||
AGENT_IP={{ .Env.AGENT_IP }}
|
AGENT_IP={{ .Env.AGENT_IP }}
|
||||||
|
METALLB_ADDRESS_RANGE={{ .Env.METALLB_ADDRESS_RANGE }}
|
||||||
|
CLOUDFLARE_API_TOKEN={{ .Env.CLOUDFLARE_API_TOKEN}}
|
||||||
|
ACME_EMAIL={{ .Env.ACME_EMAIL}}
|
||||||
|
EXTERNAL_DOMAIN={{ .Env.EXTERNAL_DOMAIN }}
|
||||||
|
VAULT_HOST={{ .Env.VAULT_HOST }}
|
||||||
|
|||||||
52
env/justfile
vendored
52
env/justfile
vendored
@@ -79,5 +79,57 @@ setup:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
while [ -z "${METALLB_ADDRESS_RANGE}" ]; do
|
||||||
|
if ! METALLB_ADDRESS_RANGE=$(
|
||||||
|
gum input --prompt="IP Range for LoadBalancer: " \
|
||||||
|
--width=100 --placeholder="[x.x.x.x-y.y.y.y]"
|
||||||
|
); then
|
||||||
|
echo "Setup cancelled." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while [ -z "${CLOUDFLARE_API_TOKEN}" ]; do
|
||||||
|
if ! CLOUDFLARE_API_TOKEN=$(
|
||||||
|
gum input --prompt="Cloudflare API Token: " \
|
||||||
|
--width=100 --placeholder="API Token" --password
|
||||||
|
); then
|
||||||
|
echo "Setup cancelled." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while [ -z "${ACME_EMAIL}" ]; do
|
||||||
|
if ! ACME_EMAIL=$(
|
||||||
|
gum input --prompt="ACME Email for Cert-Manager: " \
|
||||||
|
--width=100 --placeholder="Email"
|
||||||
|
); then
|
||||||
|
echo "Setup cancelled." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while [ -z "${EXTERNAL_DOMAIN}" ]; do
|
||||||
|
if ! EXTERNAL_DOMAIN=$(
|
||||||
|
gum input --prompt="External Domain: " \
|
||||||
|
--width=100 --placeholder="Domain"
|
||||||
|
); then
|
||||||
|
echo "Setup cancelled." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while [ -z "${VAULT_HOST}" ]; do
|
||||||
|
if ! VAULT_HOST=$(
|
||||||
|
gum input --prompt="Vault hostname: " \
|
||||||
|
--width=100 --placeholder="vault"
|
||||||
|
); then
|
||||||
|
echo "Setup cancelled." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Generating .env.local file..."
|
||||||
rm -f ../.env.local
|
rm -f ../.env.local
|
||||||
gomplate -f env.local.gomplate -o ../.env.local
|
gomplate -f env.local.gomplate -o ../.env.local
|
||||||
|
|||||||
@@ -14,3 +14,4 @@ flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner b
|
|||||||
|
|
||||||
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
||||||
|
|
||||||
|
"Make a 4×4 grid starting with the 1880s. In each section, I should appear styled according to that decade (clothing, hairstyle, facial hair, accessories). Use colors, background, & film style accordingly."
|
||||||
4
justfile
4
justfile
@@ -8,3 +8,7 @@ default:
|
|||||||
|
|
||||||
mod env
|
mod env
|
||||||
mod BasicSetup '01_Basic_Setup'
|
mod BasicSetup '01_Basic_Setup'
|
||||||
|
mod MetalLbSetup 'Metallb_Setup'
|
||||||
|
mod Traefik
|
||||||
|
mod Longhorn
|
||||||
|
mod Vault '08_Vault'
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: longhorn-web-ui
|
|
||||||
namespace: longhorn-system
|
|
||||||
annotations:
|
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: longhorn.k8s.internal.schnrbs.work
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: longhorn-frontend
|
|
||||||
port:
|
|
||||||
number: 80
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- longhorn.k8s.internal.schnrbs.work
|
|
||||||
secretName: longhorn-web-ui-tls
|
|
||||||
|
|
||||||
Reference in New Issue
Block a user