Compare commits
33 Commits
0bb0f8d6e9
...
just
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f5a18c84c | ||
|
|
7a54346331 | ||
|
|
5abc0de38a | ||
|
|
29674ae504 | ||
|
|
6abe5d1a8f | ||
|
|
67a6c414f2 | ||
|
|
08212c26a6 | ||
|
|
e4adbfd0b2 | ||
|
|
d7db562a23 | ||
|
|
7896130d05 | ||
|
|
efcb4ee172 | ||
|
|
f58fad216a | ||
|
|
90e0de0804 | ||
|
|
8cb83ffd9c | ||
|
|
cca6f599d5 | ||
|
|
506a199c95 | ||
|
|
d2a16bd55b | ||
|
|
d25c9227c7 | ||
|
|
45c61d5130 | ||
|
|
82c19ff12c | ||
|
|
9695376a0a | ||
|
|
84fd560675 | ||
|
|
5708f841e7 | ||
|
|
97ef02c1da | ||
|
|
65e99a9f83 | ||
|
|
77ad59eae5 | ||
|
|
a13663754d | ||
|
|
5e30b1e83d | ||
|
|
5514b5687f | ||
|
|
a3404bba2b | ||
|
|
0e4ddcefdf | ||
|
|
12546a9669 | ||
|
|
a6ac7b84e4 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env.local
|
||||
@@ -34,4 +34,30 @@ Rancher Installation
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
|
||||
|
||||
# Prevent scheduling on master (optional)
|
||||
|
||||
|
||||
```
|
||||
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
|
||||
```
|
||||
|
||||
# Just Setup // K3sup
|
||||
|
||||
export SERVER_IP=192.168.178.45
|
||||
export AGENT_IP=192.168.178.75
|
||||
export USER=basti
|
||||
|
||||
|
||||
k3sup install \
|
||||
--cluster \
|
||||
--ip 192.168.178.45 \
|
||||
--user $USER \
|
||||
--merge \
|
||||
--local-path $HOME/.kube/config \
|
||||
--context my-k3s
|
||||
|
||||
k3sup join \
|
||||
--ip $AGENT_IP \
|
||||
--server-ip $SERVER_IP \
|
||||
--user $USER
|
||||
|
||||
|
||||
135
01_Basic_Setup/justfile
Normal file
135
01_Basic_Setup/justfile
Normal file
@@ -0,0 +1,135 @@
|
||||
set fallback := true
|
||||
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export EXTERNAL_K8S_HOST := env("EXTERNAL_K8S_HOST", "")
|
||||
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
|
||||
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
|
||||
export K8S_OIDC_CLIENT_ID := env('K8S_OIDC_CLIENT_ID', "k8s")
|
||||
export K3S_ENABLE_REGISTRY := env("K3S_ENABLE_REGISTRY", "false")
|
||||
export SERVER_IP := env("K3S_SERVER_IP","192.168.178.45")
|
||||
export AGENT_IP := env("K3S_AGENT_IP","192.168.178.75")
|
||||
export USER := env("K3S_USER","basti")
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
install:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
kubeconfig=""
|
||||
context=""
|
||||
if gum confirm "Update KUBECONFIG?"; then
|
||||
kubeconfig=$(
|
||||
gum input --prompt="KUBECONFIG file: " --value="${HOME}/.kube/config" --width=100
|
||||
)
|
||||
context=$(
|
||||
gum input --prompt="Context name: " --value="${K8S_CONTEXT}" --width=100
|
||||
)
|
||||
fi
|
||||
|
||||
args=(
|
||||
"install"
|
||||
"--context" "${K8S_CONTEXT}"
|
||||
"--host" "${K8S_MASTER_NODE_NAME}"
|
||||
"--user" "${username}"
|
||||
)
|
||||
|
||||
if [ -n "${kubeconfig}" ]; then
|
||||
mkdir -p "$(dirname "${kubeconfig}")"
|
||||
args+=("--local-path" "${kubeconfig}" "--merge")
|
||||
fi
|
||||
echo "Running: k3sup ${args[*]}"
|
||||
k3sup "${args[@]}"
|
||||
|
||||
if [ -n "${context}" ]; then
|
||||
kubectl config use-context "${context}"
|
||||
fi
|
||||
|
||||
if [ "${K3S_ENABLE_REGISTRY}" = "true" ]; then
|
||||
echo "Setting up local Docker registry..."
|
||||
|
||||
# Deploy Docker registry to cluster
|
||||
kubectl apply -f ./registry/registry.yaml
|
||||
|
||||
# Set Pod Security Standard for registry namespace
|
||||
kubectl label namespace registry pod-security.kubernetes.io/enforce=restricted --overwrite
|
||||
|
||||
# Wait for registry deployment
|
||||
echo "Waiting for registry to be ready..."
|
||||
kubectl wait --for=condition=available --timeout=60s deployment/registry -n registry
|
||||
|
||||
# Configure registries.yaml for k3s
|
||||
just configure-registry
|
||||
|
||||
echo "✓ Local Docker registry deployed and configured"
|
||||
echo ""
|
||||
echo "Registry accessible at:"
|
||||
echo " localhost:30500"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " export DOCKER_HOST=ssh://${K8S_MASTER_NODE_NAME}"
|
||||
echo " docker build -t localhost:30500/myapp:latest ."
|
||||
echo " docker push localhost:30500/myapp:latest"
|
||||
echo " kubectl run myapp --image=localhost:30500/myapp:latest"
|
||||
fi
|
||||
|
||||
echo "k3s cluster installed on ${K8S_MASTER_NODE_NAME}."
|
||||
|
||||
|
||||
uninstall:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if gum confirm "Uninstall k3s from ${K8S_MASTER_NODE_NAME}?"; then
|
||||
|
||||
if gum confirm "Also remove Agent node at ${AGENT_IP}?"; then
|
||||
echo "Removing Agent node at ${AGENT_IP}..."
|
||||
ssh "${AGENT_IP}" "/usr/local/bin/k3s-agent-uninstall.sh"
|
||||
fi
|
||||
|
||||
echo "Removing content of Server node..."
|
||||
ssh "${K8S_MASTER_NODE_NAME}" "/usr/local/bin/k3s-uninstall.sh"
|
||||
echo "Cleaning up kubeconfig entries..."
|
||||
cluster_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.cluster // empty")
|
||||
user_name=$(kubectl config view -o json | jq -r ".contexts[] | select(.name == \"${K8S_CONTEXT}\") | .context.user // empty")
|
||||
if kubectl config get-contexts "${K8S_CONTEXT}" &>/dev/null; then
|
||||
kubectl config delete-context "${K8S_CONTEXT}"
|
||||
echo "Deleted context: ${K8S_CONTEXT}"
|
||||
fi
|
||||
if [ -n "${cluster_name}" ] && kubectl config get-clusters | grep -q "^${cluster_name}$"; then
|
||||
kubectl config delete-cluster "${cluster_name}"
|
||||
echo "Deleted cluster: ${cluster_name}"
|
||||
fi
|
||||
if [ -n "${user_name}" ] && kubectl config get-users | grep -q "^${user_name}$"; then
|
||||
kubectl config delete-user "${user_name}"
|
||||
echo "Deleted user: ${user_name}"
|
||||
fi
|
||||
echo "k3s cluster uninstalled from ${K8S_CONTEXT}."
|
||||
else
|
||||
echo "Uninstallation cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
add-agent:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
just env::check
|
||||
|
||||
username=$(gum input --prompt="SSH username: " --value="${USER}" --width=100)
|
||||
new_agent_ip=$(gum input --prompt="Agent IP to join cluster: " --value="${AGENT_IP}" --width=100)
|
||||
|
||||
args=(
|
||||
"join"
|
||||
"--ip" "${new_agent_ip}"
|
||||
"--server-ip" "${SERVER_IP}"
|
||||
"--user" "${username}"
|
||||
)
|
||||
|
||||
|
||||
echo "Running: k3sup ${args[*]}"
|
||||
k3sup "${args[@]}"
|
||||
echo "Agent node at ${new_agent_ip} added to cluster."
|
||||
|
||||
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
4
01_Basic_Setup/registry/registries.gomplate.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
configs:
|
||||
"localhost:30500":
|
||||
tls:
|
||||
insecure_skip_verify: true
|
||||
109
01_Basic_Setup/registry/registry.yaml
Normal file
109
01_Basic_Setup/registry/registry.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
fsGroup: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: http
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 20Gi
|
||||
env:
|
||||
- name: REGISTRY_STORAGE_DELETE_ENABLED
|
||||
value: "true"
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: "0.0.0.0:5000"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- name: registry-data
|
||||
mountPath: /var/lib/registry
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/
|
||||
port: 5000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: registry-data
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
name: http
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry-nodeport
|
||||
namespace: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
selector:
|
||||
app: registry
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
nodePort: 30500
|
||||
name: http
|
||||
type: NodePort
|
||||
131
08_Vault/README.md
Normal file
131
08_Vault/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Helm
|
||||
|
||||
## Installation
|
||||
helm repo add hashicorp https://helm.releases.hashicorp.com
|
||||
|
||||
helm install vault hashicorp/vault \
|
||||
--set='server.dev.enabled=true' \
|
||||
--set='ui.enabled=true' \
|
||||
--set='ui.serviceType=LoadBalancer' \
|
||||
--namespace vault \
|
||||
--create-namespace
|
||||
|
||||
Running Vault in “dev” mode. This requires no further setup, no state management, and no initialization. This is useful for experimenting with Vault without needing to unseal, store keys, et. al. All data is lost on restart — do not use dev mode for anything other than experimenting. See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more
|
||||
|
||||
|
||||
## Output
|
||||
```
|
||||
$ kubectl get all -n vault
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/vault-0 1/1 Running 0 2m39s
|
||||
pod/vault-agent-injector-8497dd4457-8jgcm 1/1 Running 0 2m39s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/vault ClusterIP 10.245.225.169 <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-agent-injector-svc ClusterIP 10.245.32.56 <none> 443/TCP 2m40s
|
||||
service/vault-internal ClusterIP None <none> 8200/TCP,8201/TCP 2m40s
|
||||
service/vault-ui LoadBalancer 10.245.103.246 24.132.59.59 8200:31764/TCP 2m40s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/vault-agent-injector 1/1 1 1 2m40s
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/vault-agent-injector-8497dd4457 1 1 1 2m40s
|
||||
|
||||
NAME READY AGE
|
||||
statefulset.apps/vault 1/1 2m40s
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
## Enter Pod
|
||||
|
||||
kubectl exec -it vault-0 -n vault -- /bin/sh
|
||||
|
||||
## Create policy
|
||||
```
|
||||
cat <<EOF > /home/vault/read-policy.hcl
|
||||
path "secret*" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
## Apply
|
||||
|
||||
```
|
||||
vault policy write read-policy /home/vault/read-policy.hcl
|
||||
```
|
||||
|
||||
## Enable Kubernetes
|
||||
```
|
||||
vault auth enable kubernetes
|
||||
```
|
||||
|
||||
## Configure Kubernetes Auth
|
||||
|
||||
Configure to communicate with API server
|
||||
```
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
|
||||
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
|
||||
```
|
||||
|
||||
## Create a Role
|
||||
Create a role(vault-role) that binds the above policy to a Kubernetes service account(vault-serviceaccount) in a specific namespace. This allows the service account to access secrets stored in Vault:
|
||||
|
||||
```
|
||||
vault write auth/kubernetes/role/vault-role \
|
||||
bound_service_account_names=vault-serviceaccount \
|
||||
bound_service_account_namespaces=vault \
|
||||
policies=read-policy \
|
||||
ttl=1h
|
||||
```
|
||||
|
||||
# Create Secrets
|
||||
|
||||
## Via CLI
|
||||
|
||||
```
|
||||
vault kv put secret/login pattoken=ytbuytbytbf765rb65u56rv
|
||||
```
|
||||
|
||||
## Via UI
|
||||
|
||||
Now you can login to vault using the Token method, initially use Token=`root` to login.
|
||||
|
||||
|
||||
# Accessing Secrets in Pods
|
||||
|
||||
Using the above steps, we have installed Vault and configured a Vault role(vault-role) to allow the service account(vault-serviceaccount) to access secrets stored in Vault.
|
||||
|
||||
Additionally, we have created two secrets: login and my-first-secret with key-value pairs. Now, let's create a simple Kubernetes deployment and try to access those secrets.
|
||||
|
||||
First, let’s create a service account named vault-serviceaccount in the vault namespace. This service account is granted permissions for the Vault role as defined in the "Create a Role" step above.
|
||||
|
||||
Apply the above manifest using the below command
|
||||
```
|
||||
kubectl apply -f vault-sa.yaml -n vault
|
||||
```
|
||||
|
||||
This deployment manifest creates a single replica of an Nginx pod configured to securely fetch secrets from Vault. The Vault Agent injects the secrets login and my-first-secret into the pod according to the specified templates. The secrets are stored in the pod's filesystem and can be accessed by the application running in the container. The vault-serviceaccount service account, which has the necessary permissions, is used to authenticate with Vault.
|
||||
|
||||
|
||||
```
|
||||
kubectl apply -f vault-secret-test-deploy.yaml -n vault
|
||||
```
|
||||
These annotations are used to configure the Vault Agent to inject secrets into the pod volume.
|
||||
|
||||
-`vault.hashicorp.com/agent-inject: “true”`: Enables Vault Agent injection for this pod.
|
||||
-`vault.hashicorp.com/agent-inject-status: “update”`: Ensures the status of secret injection is updated.
|
||||
-`vault.hashicorp.com/agent-inject-secret-login: “secret/login”`: Specifies that the secret stored at `secret/login` in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-login`: Defines the template for the injected login secret, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/agent-inject-secret-my-first-secret: “secret/my-first-secret”`: Specifies that the secret stored at secret/my-first-secret in Vault should be injected.
|
||||
-`vault.hashicorp.com/agent-inject-template-my-first-secret`: Defines the template for the injected `my-first-secret`, specifying the format in which the secret will be written.
|
||||
-`vault.hashicorp.com/role: “vault-role”`: Specifies the Vault role to be used for authentication.
|
||||
-`serviceAccountName`: Uses the service account `vault-serviceaccount` which has permissions to access Vault.
|
||||
|
||||
Use the below command to check the vault secrets from the pod volume
|
||||
```
|
||||
kubectl exec -it vault-test-84d9dc9986-gcxfv -- sh -c "cat /vault/secrets/login && cat /vault/secrets/my-first-secret" -n vault
|
||||
```
|
||||
6
08_Vault/vault-sa.yaml
Normal file
6
08_Vault/vault-sa.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vault-serviceaccount
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
35
08_Vault/vault-secret-test-deploy.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vault-test
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: read-vault-secret
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-inject-status: "update"
|
||||
vault.hashicorp.com/agent-inject-secret-login: "secret/login"
|
||||
vault.hashicorp.com/agent-inject-template-login: |
|
||||
{{- with secret "secret/login" -}}
|
||||
pattoken={{ .Data.data.pattoken }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/agent-inject-secret-my-first-secret: "secret/my-first-secret"
|
||||
vault.hashicorp.com/agent-inject-template-my-first-secret: |
|
||||
{{- with secret "secret/my-first-secret" -}}
|
||||
username={{ .Data.data.username }}
|
||||
password={{ .Data.data.password }}
|
||||
{{- end }}
|
||||
vault.hashicorp.com/role: "vault-role"
|
||||
labels:
|
||||
app: read-vault-secret
|
||||
spec:
|
||||
serviceAccountName: vault-serviceaccount
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
180
09_Homepage/01_homepage-deployment.yaml
Normal file
180
09_Homepage/01_homepage-deployment.yaml
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
secrets:
|
||||
- name: homepage
|
||||
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: homepage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- traefik.io
|
||||
resources:
|
||||
- ingressroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- gateways
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- nodes
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: homepage
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
annotations:
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 3000
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: homepage
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
annotations:
|
||||
reloader.stakater.com/search: "true"
|
||||
secret.reloader.stakater.com/reload: "homepage"
|
||||
spec:
|
||||
revisionHistoryLimit: 3
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: homepage
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
spec:
|
||||
serviceAccountName: homepage
|
||||
automountServiceAccountToken: true
|
||||
dnsPolicy: ClusterFirst
|
||||
enableServiceLinks: true
|
||||
containers:
|
||||
- name: homepage
|
||||
image: "ghcr.io/gethomepage/homepage:latest"
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: HOMEPAGE_ALLOWED_HOSTS
|
||||
value: homepage.k8s.schnrbs.work # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /app/config/custom.js
|
||||
name: homepage-config
|
||||
subPath: custom.js
|
||||
- mountPath: /app/config/custom.css
|
||||
name: homepage-config
|
||||
subPath: custom.css
|
||||
- mountPath: /app/config/bookmarks.yaml
|
||||
name: homepage-config
|
||||
subPath: bookmarks.yaml
|
||||
- mountPath: /app/config/docker.yaml
|
||||
name: homepage-config
|
||||
subPath: docker.yaml
|
||||
- mountPath: /app/config/kubernetes.yaml
|
||||
name: homepage-config
|
||||
subPath: kubernetes.yaml
|
||||
- mountPath: /app/config/services.yaml
|
||||
name: homepage-config
|
||||
subPath: services.yaml
|
||||
- mountPath: /app/config/settings.yaml
|
||||
name: homepage-config
|
||||
subPath: settings.yaml
|
||||
- mountPath: /app/config/widgets.yaml
|
||||
name: homepage-config
|
||||
subPath: widgets.yaml
|
||||
- mountPath: /app/config/logs
|
||||
name: logs
|
||||
volumes:
|
||||
- name: homepage-config
|
||||
configMap:
|
||||
name: homepage
|
||||
- name: logs
|
||||
emptyDir: {}
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- homepage.k8s.internal.schnrbs.work
|
||||
- homepage.k8s.schnrbs.work
|
||||
24
09_Homepage/03_homepage-ingress-route.yaml
Normal file
24
09_Homepage/03_homepage-ingress-route.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: homepage-ingress-route
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
annotations:
|
||||
gethomepage.dev/description: Dynamically Detected Homepage
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/group: Cluster Management
|
||||
gethomepage.dev/icon: homepage.png
|
||||
gethomepage.dev/name: Homepage
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`homepage.k8s.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: homepage
|
||||
port: 3000
|
||||
tls:
|
||||
secretName: homepage-certificate-secret
|
||||
150
09_Homepage/04_homepage-configmap.yaml
Normal file
150
09_Homepage/04_homepage-configmap.yaml
Normal file
@@ -0,0 +1,150 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: homepage
|
||||
namespace: homepage
|
||||
labels:
|
||||
app.kubernetes.io/name: homepage
|
||||
annotations:
|
||||
reloader.stakater.com/match: "true"
|
||||
data:
|
||||
kubernetes.yaml: |
|
||||
mode: cluster
|
||||
settings.yaml: |
|
||||
background: https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80
|
||||
cardBlur: xs
|
||||
providers:
|
||||
longhorn:
|
||||
url: https://longhorn-dashboard.k8s.schnrbs.work
|
||||
custom.css: ""
|
||||
custom.js: ""
|
||||
bookmarks.yaml: |
|
||||
- Developer:
|
||||
- Github:
|
||||
- abbr: GH
|
||||
href: https://github.com/
|
||||
services.yaml: |
|
||||
- Smart Home:
|
||||
- Home Assistant:
|
||||
icon: home-assistant.png
|
||||
href: https://ha.homeee.schnorbus.net
|
||||
description: Home Assistant is awesome
|
||||
widgets:
|
||||
- type: homeassistant
|
||||
url: https://ha.homeee.schnorbus.net
|
||||
key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MTA1ZmE1MDA5ZTA0MDQxYTc0NzUxZmUwM2NhYWMwZiIsImlhdCI6MTc0NTcxMDY3OCwiZXhwIjoyMDYxMDcwNjc4fQ.EI6-Husovb1IYpVn5RBy8pJ7bcESQHDzIbS22_5abUs
|
||||
- Zigbee2MQTT:
|
||||
icon: zigbee2mqtt.png
|
||||
href: http://muckibude.fritz.box:8383
|
||||
description: Zigbee2MQTT is awesome
|
||||
- Pihole:
|
||||
icon: pi-hole.png
|
||||
href: http://pi.hole
|
||||
description: Pi-hole
|
||||
widgets:
|
||||
- type: pihole
|
||||
url: http://pi.hole
|
||||
version: 6
|
||||
key: 5ipI9bvB
|
||||
- Paperless NGX:
|
||||
icon: paperless-ng.png
|
||||
href: https://ppl.homeee.schnorbus.net
|
||||
widgets:
|
||||
- type: paperlessngx
|
||||
url: https://ppl.homeee.schnorbus.net
|
||||
token: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
|
||||
|
||||
- My Second Group:
|
||||
- Proxmox pve-81:
|
||||
icon: proxmox.png
|
||||
href: http://pve-81.fritz.box:8006
|
||||
description: Homepage is the best
|
||||
- Proxmox pve-82:
|
||||
icon: proxmox.png
|
||||
href: http://pve-82.fritz.box:8006
|
||||
description: Homepage is the best
|
||||
- Proxmox pve-83:
|
||||
icon: proxmox.png
|
||||
href: https://pve-83.fritz.box:8006
|
||||
description: Homepage is the best
|
||||
# widgets:
|
||||
# - type: proxmox
|
||||
# url: https://pve-83.fritz.box:8006
|
||||
# username: homepage_api@pam!homepage_api
|
||||
# password: 0cf8eb062d0ecfc0aa70611125427692cb577d68
|
||||
- Longhorn:
|
||||
icon: longhorn.png
|
||||
href: https://longhorn-dashboard.k8s.schnrbs.work
|
||||
description: Longhorn volume provisioning
|
||||
|
||||
- Party Time:
|
||||
- Immich:
|
||||
icon: immich.png
|
||||
href: https://immich.homeee.schnorbus.net
|
||||
description: Immich is awesome
|
||||
widgets:
|
||||
- type: immich
|
||||
url: https://immich.homeee.schnorbus.net
|
||||
key: deOT6z7AHok30eKWgF2bOSJuOIZXK0eONo7PrR0As
|
||||
version: 2
|
||||
- Linkwarden:
|
||||
icon: linkwarden.png
|
||||
href: https://lw.homeee.schnorbus.net
|
||||
description: Homepage isssss 😎
|
||||
widgets:
|
||||
- type: linkwarden
|
||||
url: http://docker-host-02.fritz.box:9595
|
||||
key: eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..bEvs2PcR0ZTNpb8b.Lhe1-00LlVVC97arojvhh7IK4VADR82AMAzK5sd7AcUhs2WUQmu8Q-cOAKFGVlgPgdk-w1Pa8CJJHF71opWJk85aJXkTcdl7jANwN8PqgHXsSPoqtvzX.5GFRIAMo31sw5GStVlznHQ
|
||||
- Nginx Proxy Manager:
|
||||
icon: nginx-proxy-manager.png
|
||||
href: http://192.168.178.42:8181
|
||||
description: Nginx Proxy Manager is awesome
|
||||
widgets:
|
||||
- type: npm
|
||||
url: http://192.168.178.42:8181
|
||||
username: bastian@schnorbus.net
|
||||
password: abcd1234
|
||||
- Plex:
|
||||
icon: plex.png
|
||||
href: http://diskstation.fritz.box:32400/web/index.html#!/
|
||||
description: Watch movies and TV shows.
|
||||
server: http://diskstation.fritz.box:32400/web/index.html#!/
|
||||
container: plex
|
||||
widgets:
|
||||
- type: plex
|
||||
url: http://diskstation.fritz.box:32400
|
||||
key: aNcUss31qsVsea5bsDf9
|
||||
widgets.yaml: |
|
||||
- kubernetes:
|
||||
cluster:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
label: "cluster"
|
||||
nodes:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
- longhorn:
|
||||
# Show the expanded view
|
||||
expanded: true
|
||||
# Shows a node representing the aggregate values
|
||||
total: true
|
||||
# Shows the node names as labels
|
||||
labels: true
|
||||
# Show the nodes
|
||||
nodes: true
|
||||
- resources:
|
||||
backend: resources
|
||||
expanded: true
|
||||
cpu: true
|
||||
memory: true
|
||||
network: default
|
||||
- search:
|
||||
provider: duckduckgo
|
||||
target: _blank
|
||||
|
||||
docker.yaml: ""
|
||||
@@ -1,6 +1,35 @@
|
||||
|
||||
Install via helm:
|
||||
## Installation
|
||||
|
||||
### Install via helm
|
||||
https://gethomepage.dev/installation/k8s/#install-with-helm
|
||||
|
||||
|
||||
```
|
||||
helm upgrade --install homepage jameswynn/homepage -f homepage-values.yaml --create-namespace --namespace homepage
|
||||
```
|
||||
|
||||
|
||||
### Install via deployment
|
||||
|
||||
```
|
||||
k create ns homepage
|
||||
k apply -f 01_homepage-deployment.yaml
|
||||
```
|
||||
|
||||
## Setup Https & Certificate
|
||||
|
||||
```
|
||||
k apply -f 02_homepage-certificate.yaml
|
||||
k apply -f 03_homepage-ingress-route.yaml
|
||||
```
|
||||
|
||||
|
||||
## Upload Content
|
||||
```
|
||||
k apply -f 04_homepage-configmap.yaml
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
Open Browser and navigate to:
|
||||
https://homepage.k8s.schnrbs.work
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: homepage-ingress-route
|
||||
namespace: homepage
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`homepage.k8s.internal.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: homepage
|
||||
port: 3000
|
||||
tls:
|
||||
secretName: homepage-certificate-secret
|
||||
@@ -1,72 +0,0 @@
|
||||
config:
|
||||
bookmarks:
|
||||
- Developer:
|
||||
- Github:
|
||||
- abbr: GH
|
||||
href: https://github.com/
|
||||
services:
|
||||
- My First Group:
|
||||
- My First Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is awesome
|
||||
|
||||
- My Second Group:
|
||||
- My Second Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is the best
|
||||
|
||||
- My Third Group:
|
||||
- My Third Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is 😎
|
||||
widgets:
|
||||
# show the kubernetes widget, with the cluster summary and individual nodes
|
||||
- kubernetes:
|
||||
cluster:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
label: "cluster"
|
||||
nodes:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
- pihole:
|
||||
show: true
|
||||
url: http://192.168.178.202
|
||||
key: 1eae9e87f4b4710981639ee591b7d75734811d61697092110cb748c3244e01cc
|
||||
- fritzbox:
|
||||
show: true
|
||||
url: http://192.168.178.1
|
||||
- search:
|
||||
provider: duckduckgo
|
||||
target: _blank
|
||||
kubernetes:
|
||||
mode: cluster
|
||||
settings:
|
||||
|
||||
# The service account is necessary to allow discovery of other services
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: homepage
|
||||
|
||||
# This enables the service account to access the necessary resources
|
||||
enableRbac: true
|
||||
|
||||
ingress:
|
||||
main:
|
||||
enabled: false
|
||||
annotations:
|
||||
# Example annotations to add Homepage to your Homepage!
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/name: "Homepage"
|
||||
gethomepage.dev/description: "Dynamically Detected Homepage"
|
||||
gethomepage.dev/group: "Dynamic"
|
||||
gethomepage.dev/icon: "homepage.png"
|
||||
hosts:
|
||||
- host: homepage.k8s.internal.schnrbs.work
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
@@ -2,7 +2,7 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: longhorn-test-pv
|
||||
namespace: default
|
||||
namespace: test
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi # Setze die gewünschte Speichergröße
|
||||
|
||||
@@ -2,7 +2,7 @@ apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: longhorn-test-pvc
|
||||
namespace: default
|
||||
namespace: test
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
|
||||
10
12_reloader/README.md
Normal file
10
12_reloader/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
helm install reloader stakater/reloader --namespace reloader --create-namespace
|
||||
|
||||
flux create source helm stakater --url https://stakater.github.io/stakater-charts --namespace reloader
|
||||
|
||||
flux create helmrelease my-reloader --chart stakater/reloader \
|
||||
--source HelmRepository/stakater \
|
||||
--chart-version 2.1.3 \
|
||||
--namespace reloader
|
||||
2
Database-manual-deployment/README.md
Normal file
2
Database-manual-deployment/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
https://igeadetokunbo.medium.com/how-to-run-databases-on-kubernetes-an-8-step-guide-b75ce9117600
|
||||
|
||||
36
Database-manual-deployment/mysql-statefulset.yaml
Normal file
36
Database-manual-deployment/mysql-statefulset.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
serviceName: "mysql"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql
|
||||
image: mysql:8.4.0-oraclelinux8
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: "your_password"
|
||||
volumeMounts:
|
||||
- name: mysql-storage
|
||||
mountPath: /var/lib/mysql
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: mysql-storage
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: longhorn
|
||||
14
Database-manual-deployment/pv.yaml
Normal file
14
Database-manual-deployment/pv.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: mysql-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: longhorn
|
||||
hostPath:
|
||||
path: /mnt/data # Specify a path in the host for storage
|
||||
|
||||
11
Database-manual-deployment/pvc.yaml
Normal file
11
Database-manual-deployment/pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mysql-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: longhorn
|
||||
13
Database-manual-deployment/svc.yaml
Normal file
13
Database-manual-deployment/svc.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Headless service
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
selector:
|
||||
app: mysql
|
||||
@@ -133,6 +133,151 @@ spec:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: zwavejs2mqtt-pvc
|
||||
labels:
|
||||
app: zwavejs2mqtt
|
||||
namespace: home-assistant
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 500Mi---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: home-assistant
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: home-assistant
|
||||
name: home-assistant
|
||||
spec:
|
||||
selector:
|
||||
app: home-assistant
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8123
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: home-assistant
|
||||
name: home-assistant
|
||||
labels:
|
||||
app: home-assistant
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: home-assistant
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: home-assistant
|
||||
spec:
|
||||
containers:
|
||||
- name: bluez
|
||||
image: ghcr.io/mysticrenji/bluez-service:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
- name: home-assistant
|
||||
image: ghcr.io/mysticrenji/homeassistant-arm64:2023.3.0
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
ports:
|
||||
- containerPort: 8123
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: /config/configuration.yaml
|
||||
subPath: configuration.yaml
|
||||
name: configmap-file
|
||||
- mountPath: /config/automations.yaml
|
||||
subPath: automations.yaml
|
||||
name: configmap-file
|
||||
- mountPath: /media
|
||||
name: media-volume
|
||||
# - mountPath: /run/dbus
|
||||
# name: d-bus
|
||||
# readOnly: true
|
||||
- mountPath: /dev/ttyUSB1
|
||||
name: zigbee
|
||||
#- mountPath: /dev/video0
|
||||
# name: cam
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_ADMIN
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: config
|
||||
persistentVolumeClaim:
|
||||
claimName: home-assistant-pvc
|
||||
- name: media-volume
|
||||
hostPath:
|
||||
path: /tmp/media
|
||||
- name: configmap-file
|
||||
configMap:
|
||||
name: home-assistant-configmap
|
||||
# hostPath:
|
||||
# path: /tmp/home-assistant
|
||||
# type: DirectoryOrCreate
|
||||
# - name: d-bus
|
||||
# hostPath:
|
||||
# path: /run/dbus
|
||||
- name: zigbee
|
||||
hostPath:
|
||||
path: /dev/ttyACM0
|
||||
#- name: cam
|
||||
# hostPath:
|
||||
# path: /dev/video0
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: home-assistant-configmap
|
||||
namespace: home-assistant
|
||||
data:
|
||||
known_devices.yaml: |
|
||||
automations.yaml: |
|
||||
configuration.yaml: |-
|
||||
default_config:
|
||||
frontend:
|
||||
themes: !include_dir_merge_named themes
|
||||
automation: !include automations.yaml
|
||||
http:
|
||||
use_x_forwarded_for: true
|
||||
trusted_proxies:
|
||||
- 10.10.0.0/16
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: home-assistant-pvc
|
||||
labels:
|
||||
app: home-assistant
|
||||
namespace: home-assistant
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 9Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: zwavejs2mqtt-pvc
|
||||
labels:
|
||||
|
||||
@@ -5,4 +5,5 @@ metadata:
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.178.220-192.168.178.250
|
||||
# - 192.168.178.220-192.168.178.225 #pve-82
|
||||
- 192.168.178.160-192.168.178.180 #pve-83
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
Metallb Installation
|
||||
|
||||
## Used IP Range
|
||||
|
||||
Metallb will advertise IPs of the range:
|
||||
192.168.178.226-192.168.178.240
|
||||
|
||||
First Address x.x.x.226 will be the traefik reverse proxy deployment.
|
||||
|
||||
|
||||
https://canthonyscott.com/setting-up-a-k3s-kubernetes-cluster-within-proxmox/
|
||||
|
||||
Following https://metallb.universe.tf/installation/ (0.14.3)
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml
|
||||
|
||||
|
||||
@@ -27,7 +27,8 @@ kubectl expose deploy schnipo --port=80 --target-port=8080 --type=LoadBalancer -
|
||||
|
||||
```
|
||||
#Create deploy
|
||||
kubectl create deploy nginx --image=nginx
|
||||
k create ns test
|
||||
kubectl create deploy nginx --image=nginx -n test
|
||||
|
||||
kubectl scale --replicas=3 deployment/nginx -n test
|
||||
|
||||
|
||||
@@ -7,11 +7,56 @@
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
|
||||
## Troubleshooting steps
|
||||
## Cert-Manager
|
||||
|
||||
Cert Manager will be used as it will store certs in a secret, therefore accessible for every pod.
|
||||
In contrast to this, Traefik stores certs on disk, so a volume would be needed in RWX mode (too much effort).
|
||||
|
||||
### Issuer - CA
|
||||
An issuer is a CA. This can be done with 2 different kinds.
|
||||
|
||||
#### Issuer
|
||||
can be used in the namespace they are created in.
|
||||
|
||||
#### Cluster Issuer
|
||||
can be used throughout the whole cluster, not limited to a specific namespace.
|
||||
i.e. general issuer for all namespaces in cluster.
|
||||
|
||||
|
||||
## Test Deployment
|
||||
k create ns test
|
||||
kubectl create deploy nginx --image=nginx -n test
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
k scale --replicas=3 deployment/nginx -n test
|
||||
|
||||
|
||||
## Install Traefik & Cert-Manager
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace --values traefik-values.yaml
|
||||
|
||||
traefik-dashboard.k8s.schnrbs.work
|
||||
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||
|
||||
|
||||
k apply cert-manager-issuer-secret.yaml
|
||||
k get secret -n cert-manager
|
||||
|
||||
k apply -f cert-manager-cluster-issuer.yaml
|
||||
|
||||
|
||||
## Switch Test Deployment to https
|
||||
|
||||
k apply -f test/nginx-certificate.yaml
|
||||
k apply -f test/nginx-ingress.yaml
|
||||
|
||||
|
||||
|
||||
## Troubleshooting steps
|
||||
|
||||
|
||||
|
||||
k get po -n test -o wide
|
||||
k create svc -n test clusterip nginx
|
||||
k create svc -n test clusterip nginx --tcp=80
|
||||
@@ -25,41 +70,23 @@ k apply -f traefik_lempa/nginx-ingress.yaml
|
||||
k get svc -n test
|
||||
k get ingress
|
||||
k get ingress -n test
|
||||
git staus
|
||||
git status
|
||||
git diff
|
||||
git commit -am "wip thing"
|
||||
git checkout master
|
||||
git pull --rebase
|
||||
git merge wip
|
||||
git push
|
||||
git log
|
||||
git checkout master
|
||||
cd traefik_lempa
|
||||
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
|
||||
cd ..
|
||||
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
k get svc ingressRoute
|
||||
k get svc ingressRoutes
|
||||
k get svc ingressroutes.traefik.io
|
||||
k get svc ingressroutes.traefik.io --all-namespaces
|
||||
k get ingressroutes.traefik.io --all-namespaces
|
||||
helm upgrade traefik traefik/traefik --namespace traefik --create-namespace --values traefik_lempa/traefik-values.yaml
|
||||
exit
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values cert-manager-values.yaml
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --values traefik_lempa/cert-manager-values.yaml
|
||||
cert-manager-values.yaml
|
||||
echo -n 'P96My4uiHudZtiC2ymjSGQ0174CoRBnI9ztmA0Wh' | base64
|
||||
k get po
|
||||
alias k=kubectl
|
||||
k get po
|
||||
k apply traefik_lempa/cert-manager-issuer-secret.yaml
|
||||
k apply -f traefik_lempa/cert-manager-issuer-secret.yaml
|
||||
k get secret
|
||||
k get secrets
|
||||
k get secret -n cert-manager
|
||||
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
|
||||
k get clusterissuers.cert-manager.io
|
||||
k apply -f traefik_lempa/nginx-certificate.yaml
|
||||
k apply -f traefik_lempa/nginx-ingress.yaml
|
||||
k apply -f traefik_lempa/cert-manager-cluster-issuer.yaml
|
||||
k get clusterissuers.cert-manager.io
|
||||
12
Traefik/test/dishes-certificate.yaml
Normal file
12
Traefik/test/dishes-certificate.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: schnipo-ingress-certificate
|
||||
namespace: dishes
|
||||
spec:
|
||||
secretName: schnipo-certificate-secret
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- schnipo.k8s.schnrbs.work
|
||||
16
Traefik/test/dishes-ingress-route.yaml
Normal file
16
Traefik/test/dishes-ingress-route.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: schnipo-ingress-route
|
||||
namespace: dishes
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`schnipo.k8s.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: schnipo
|
||||
port: 8080
|
||||
tls:
|
||||
secretName: schnipo-certificate-secret
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- nginx-test.k8s.internal.schnrbs.work
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`nginx-test.k8s.internal.schnrbs.work`)
|
||||
- match: Host(`nginx-test.k8s.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: nginx
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
spec:
|
||||
rules:
|
||||
- host: nginx-test.k8s.internal.schnrbs.work
|
||||
- host: nginx-test.k8s.schnrbs.work
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -19,5 +19,5 @@ spec:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- nginx-test.k8s.internal.schnrbs.work
|
||||
- nginx-test.k8s.schnrbs.work
|
||||
secretName: nginx-certificate-secret
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
ports:
|
||||
web:
|
||||
redirectTo:
|
||||
port: websecure
|
||||
redirections:
|
||||
entryPoint:
|
||||
to: websecure
|
||||
scheme: https
|
||||
logs:
|
||||
general:
|
||||
level: DEBUG
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
entryPoints: [web, websecure]
|
||||
matchRule: Host(`traefik-dashboard.k8s.redacted`)
|
||||
matchRule: Host(`traefik-dashboard.k8s.schnrbs.work`)
|
||||
|
||||
12
echo-pod/echo-pod-deployment/echo-pod-cert.yaml
Normal file
12
echo-pod/echo-pod-deployment/echo-pod-cert.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: echopod-ingress-certificate
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: echopod-certificate-secret
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- echopod.k8s.schnrbs.work
|
||||
@@ -30,10 +30,30 @@ kind: Service
|
||||
metadata:
|
||||
name: echopod-service
|
||||
spec:
|
||||
type: NodePort # Change to LoadBalancer if using a cloud provider
|
||||
type: LoadBalancer # Change to LoadBalancer if using a cloud provider
|
||||
# type: NodePort # Change to LoadBalancer if using a cloud provider
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
nodePort: 30080 # Port to expose on the node
|
||||
# nodePort: 30080 # Port to expose on the node
|
||||
selector:
|
||||
app: echopod
|
||||
app: echopod
|
||||
|
||||
---
|
||||
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: echopod-ingress-route
|
||||
namespace: default
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`echopod.k8s.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: echopod-service
|
||||
port: 80
|
||||
tls:
|
||||
secretName: echopod-certificate-secret
|
||||
|
||||
5
env/env.local.gomplate
vendored
Normal file
5
env/env.local.gomplate
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# shellcheck disable=all
|
||||
K8S_CONTEXT={{ .Env.K8S_CONTEXT }}
|
||||
K8S_MASTER_NODE_NAME={{ .Env.K8S_MASTER_NODE_NAME }}
|
||||
SERVER_IP={{ .Env.SERVER_IP }}
|
||||
AGENT_IP={{ .Env.AGENT_IP }}
|
||||
83
env/justfile
vendored
Normal file
83
env/justfile
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
set fallback := true
|
||||
|
||||
export ENV_FILE := ".env.local"
|
||||
export K8S_CONTEXT := env("K8S_CONTEXT", "")
|
||||
export K8S_MASTER_NODE_NAME := env("K8S_MASTER_NODE_NAME", "")
|
||||
export SERVER_IP := env("SERVER_IP", "")
|
||||
export AGENT_IP := env("AGENT_IP", "")
|
||||
|
||||
check:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -z "${K8S_CONTEXT}" ]; then
|
||||
echo "K8S_CONTEXT is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${K8S_MASTER_NODE_NAME}" ]; then
|
||||
echo "K8S_MASTER_NODE_NAME is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${SERVER_IP}" ]; then
|
||||
echo "SERVER_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${AGENT_IP}" ]; then
|
||||
echo "AGENT_IP is not set. Please execute 'just env::setup'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
setup:
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
if [ -f ../.env.local ]; then
|
||||
echo ".env.local already exists." >&2
|
||||
if gum confirm "Do you want to overwrite it?"; then
|
||||
K8S_CONTEXT=""
|
||||
SERVER_IP=""
|
||||
AGENT_IP=""
|
||||
elif [[ $? -eq 130 ]]; then
|
||||
echo "Setup cancelled by user." >&2
|
||||
exit 1
|
||||
else
|
||||
echo "Aborting setup." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
while [ -z "${K8S_CONTEXT}" ]; do
|
||||
if ! K8S_CONTEXT=$(
|
||||
gum input --prompt="Context name: " \
|
||||
--width=100 --placeholder="context"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${K8S_MASTER_NODE_NAME}" ]; do
|
||||
if ! K8S_MASTER_NODE_NAME=$(
|
||||
gum input --prompt="Master Node Hostname: " \
|
||||
--width=100 --placeholder="Master Node Name"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${SERVER_IP}" ]; do
|
||||
if ! SERVER_IP=$(
|
||||
gum input --prompt="IP of Server/Master Node: " \
|
||||
--width=100 --placeholder="Master Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while [ -z "${AGENT_IP}" ]; do
|
||||
if ! AGENT_IP=$(
|
||||
gum input --prompt="IP of Agent Node: " \
|
||||
--width=100 --placeholder="Agent Node IP"
|
||||
); then
|
||||
echo "Setup cancelled." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
rm -f ../.env.local
|
||||
gomplate -f env.local.gomplate -o ../.env.local
|
||||
16
gitops/README.md
Normal file
16
gitops/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
https://www.reddit.com/r/GitOps/comments/1ih3b4a/discussion_setting_up_fluxcd_on_k3s_for_home_labs/
|
||||
|
||||
https://bash.ghost.io/k8s-home-lab-gitops-with-fluxcd/
|
||||
|
||||
# Setup using internal Gitea server
|
||||
## Create a Gitea personal access token and export it as an env var
|
||||
```
|
||||
export GITEA_TOKEN=<my-token>
|
||||
```
|
||||
## Bootstrap
|
||||
```
|
||||
flux bootstrap gitea --repository=k3s-homelab --branch=main --personal --owner baschno --hostname gitty.homeee.schnorbus.net --ssh-hostname=gitty.fritz.box:2221 --verbose --path=./clusters/homelab
|
||||
```
|
||||
|
||||
https://bash.ghost.io/secure-kubernetes-secrets-disaster-recovery-with-sops-gitops-fluxcd/
|
||||
|
||||
10
justfile
Normal file
10
justfile
Normal file
@@ -0,0 +1,10 @@
|
||||
set dotenv-filename := ".env.local"
|
||||
|
||||
export PATH := "./node_modules/.bin:" + env_var('PATH')
|
||||
|
||||
[private]
|
||||
default:
|
||||
@just --list --unsorted --list-submodules
|
||||
|
||||
mod env
|
||||
mod BasicSetup '01_Basic_Setup'
|
||||
@@ -5,15 +5,66 @@ Hier sind die Schritte, um ein Persistent Volume für Longhorn zu erstellen:
|
||||
### 1. Stelle sicher, dass Longhorn installiert ist
|
||||
Zuerst solltest du sicherstellen, dass Longhorn auf deinem Cluster installiert ist. Falls Longhorn noch nicht installiert ist, kannst du es mit Helm oder direkt aus den YAML-Dateien installieren.
|
||||
|
||||
|
||||
#### Node Labeling
|
||||
|
||||
In the case not all nodes should provide disk, e.g. certain nodes have special/fast disks.
|
||||
In this case the StorageClass needs to be adapted and added with a nodeselector [1].
|
||||
```
|
||||
k label nodes k3s-prod-worker-{1..3} node.longhorn.io/create-default-disk=true
|
||||
```
|
||||
[1] https://longhorn.io/kb/tip-only-use-storage-on-a-set-of-nodes/
|
||||
|
||||
|
||||
#### Mit Helm:
|
||||
```bash
|
||||
helm repo add longhorn https://charts.longhorn.io
|
||||
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
|
||||
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --values longhorn-values.yaml
|
||||
```
|
||||
|
||||
#### Mit kubectl:
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.2/deploy/install.yaml
|
||||
#### Adding additional disks
|
||||
https://medium.com/btech-engineering/longhorn-storage-solution-for-kubernetes-cluster-645bc1b98a5e
|
||||
|
||||
Add disk in Proxmox, which appears as:
|
||||
Run in worker node:
|
||||
|
||||
```
|
||||
$ lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 30G 0 disk
|
||||
├─sda1 8:1 0 29G 0 part /
|
||||
├─sda14 8:14 0 4M 0 part
|
||||
├─sda15 8:15 0 106M 0 part /boot/efi
|
||||
└─sda16 259:0 0 913M 0 part /boot
|
||||
sdb 8:16 0 250G 0 disk
|
||||
sr0 11:0 1 4M 0 rom
|
||||
```
|
||||
|
||||
SDB...
|
||||
|
||||
```
|
||||
fdisk /dev/sdb
|
||||
|
||||
# Hit n(new), p(primary), Enter, Enter
|
||||
# w(write to disk and exit)
|
||||
|
||||
mkfs.ext4 /dev/sdb1
|
||||
|
||||
mkdir /mnt/nvmedisk1
|
||||
|
||||
nano /etc/fstab
|
||||
->
|
||||
/dev/sdb1 /mnt/nvmedisk1 ext4
|
||||
|
||||
systemctl daemon-reload
|
||||
|
||||
mount -a
|
||||
```
|
||||
|
||||
|
||||
### Check via UI
|
||||
```
|
||||
k port-forward svc/longhorn-frontend 8000:80 -n longhorn-system
|
||||
```
|
||||
|
||||
### 2. Erstelle ein PersistentVolume (PV) und ein PersistentVolumeClaim (PVC)
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
spec:
|
||||
secretName: longhorn-web-ui-tls
|
||||
dnsNames:
|
||||
- longhorn.k8s.internal.schnrbs.work
|
||||
- longhorn-dashboard.k8s.schnrbs.work
|
||||
issuerRef:
|
||||
name: cloudflare-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
@@ -1,18 +1,16 @@
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: longhorn-web-ui
|
||||
name: longhorn-ingress-route
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
properties:
|
||||
entrypoints:
|
||||
- websecure
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`longhorn.k8s.internal.schnrbs.work`)
|
||||
- match: Host(`longhorn-dashboard.k8s.schnrbs.work`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: longhorn-frontend
|
||||
port: 80
|
||||
tls:
|
||||
secretName: longhorn-web-ui-tls
|
||||
tls:
|
||||
secretName: longhorn-web-ui-tls
|
||||
@@ -1,2 +1,36 @@
|
||||
global:
|
||||
nodeSelector:
|
||||
node.longhorn.io/create-default-disk: "true"
|
||||
|
||||
service:
|
||||
ui:
|
||||
type: NodePort
|
||||
nodePort: 30050
|
||||
manager:
|
||||
type: ClusterIP
|
||||
|
||||
# Replica count for the default Longhorn StorageClass.
|
||||
persistence:
|
||||
defaultClass: false
|
||||
defaultFsType: ext4
|
||||
defaultClassReplicaCount: 2
|
||||
reclaimPolicy: Delete
|
||||
|
||||
# Replica counts for CSI Attacher, Provisioner, Resizer, Snapshotter
|
||||
csi:
|
||||
attacherReplicaCount: 2
|
||||
provisionerReplicaCount: 2
|
||||
resizerReplicaCount: 2
|
||||
snapshotterReplicaCount: 2
|
||||
|
||||
# Default replica count and storage path
|
||||
defaultSettings:
|
||||
upgradeChecker: false
|
||||
kubernetesClusterAutoscalerEnabled: false
|
||||
allowCollectingLonghornUsageMetrics: false
|
||||
createDefaultDiskLabeledNodes: true
|
||||
defaultReplicaCount: 2
|
||||
defaultDataPath: "/k8s-data"
|
||||
|
||||
longhornUI:
|
||||
replicas: 1
|
||||
|
||||
7
mise.toml
Normal file
7
mise.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[tools]
|
||||
jq = '1.8.1'
|
||||
k3sup = '0.13.11'
|
||||
helm = '3.19.0'
|
||||
gum = '0.16.2'
|
||||
gomplate = '4.3.3'
|
||||
just = "1.42.4"
|
||||
@@ -11,7 +11,10 @@ Use for `helm` values:
|
||||
https://github.com/cablespaghetti/k3s-monitoring/blob/master/kube-prometheus-stack-values.yaml
|
||||
|
||||
```
|
||||
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --create-namespace --namespace monitoring --values kube-prometheus-stack-values.yaml
|
||||
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
|
||||
--create-namespace \
|
||||
--namespace monitoring \
|
||||
--values kube-prometheus-stack-values.yaml
|
||||
```
|
||||
|
||||
Accessing UIs via PortForwarding
|
||||
|
||||
19
prometheus-stack/helm-install-output.txt
Normal file
19
prometheus-stack/helm-install-output.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
NAME: kube-prometheus-stack
|
||||
LAST DEPLOYED: Wed Jun 11 19:32:51 2025
|
||||
NAMESPACE: monitoring
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
NOTES:
|
||||
kube-prometheus-stack has been installed. Check its status by running:
|
||||
kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack"
|
||||
|
||||
Get Grafana 'admin' user password by running:
|
||||
|
||||
kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
|
||||
|
||||
Access Grafana local instance:
|
||||
|
||||
export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname)
|
||||
kubectl --namespace monitoring port-forward $POD_NAME 3000
|
||||
|
||||
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
|
||||
37
statefulset/depl.yaml
Normal file
37
statefulset/depl.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: web
|
||||
namespace: test
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx # has to match .spec.template.metadata.labels
|
||||
serviceName: "nginx"
|
||||
replicas: 3 # by default is 1
|
||||
minReadySeconds: 10 # by default is 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx # has to match .spec.selector.matchLabels
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx
|
||||
image: registry.k8s.io/nginx-slim:0.24
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "longhorn"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
18
statefulset/svc.yaml
Normal file
18
statefulset/svc.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: test
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: nginx
|
||||
template:
|
||||
spec:
|
||||
nodeSelector:
|
||||
node.longhorn.io/create-default-disk: "true" # this is required to create a disk on the node
|
||||
Reference in New Issue
Block a user