diff --git a/CLAUDE.md b/CLAUDE.md index 6ca24dc..5e9ff26 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -40,6 +40,7 @@ create-user-and-db username='' db_name='' password='': ``` **Important Notes:** + - Parameters must be passed in the exact order they appear in the recipe definition - Named parameter syntax in the recipe definition is only for documentation - Always quote parameters that contain special characters or spaces @@ -100,10 +101,12 @@ kubectl --context -oidc get nodes # Test OIDC auth ### Gomplate Template Pattern **Environment Variable Management:** + - Justfile manages environment variables and their default values - Gomplate templates access variables using `{{ .Env.VAR }}` **Example justfile pattern:** + ```just # At the top of justfile - define variables with defaults export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring") @@ -118,6 +121,7 @@ install: ``` **Example gomplate template:** + ```yaml # values.gomplate.yaml namespace: {{ .Env.PROMETHEUS_NAMESPACE }} @@ -130,6 +134,53 @@ ingress: {{- end }} ``` +### Prometheus ServiceMonitor Pattern + +```just +export MONITORING_ENABLED := env("MONITORING_ENABLED", "") +export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring") + +install: + if helm status kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE} &>/dev/null; then + if [ -z "${MONITORING_ENABLED}" ]; then + if gum confirm "Enable Prometheus monitoring?"; then + MONITORING_ENABLED="true" + fi + fi + else + MONITORING_ENABLED="false" + fi + # ... helm install + + if [ "${MONITORING_ENABLED}" = "true" ]; then + kubectl label namespace ${NAMESPACE} buun.channel/enable-monitoring=true --overwrite + gomplate -f servicemonitor.gomplate.yaml | kubectl apply -f - + fi +``` + +ServiceMonitor template (`servicemonitor.gomplate.yaml`): +```yaml +{{- if eq .Env.MONITORING_ENABLED "true" }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: my-service + namespace: {{ .Env.NAMESPACE }} + labels: + release: kube-prometheus-stack +spec: + selector: + matchLabels: + app: my-service + endpoints: + - port: http + path: /metrics + interval: 30s +{{- end }} +``` + +**Requirements:** (1) Namespace label `buun.channel/enable-monitoring=true`, (2) ServiceMonitor label `release=kube-prometheus-stack`, (3) Deploy after helm install. + ### Authentication Flow 1. Keycloak provides OIDC identity for all services diff --git a/jupyterhub/jupyterhub-servicemonitor.gomplate.yaml b/jupyterhub/jupyterhub-servicemonitor.gomplate.yaml new file mode 100644 index 0000000..784d8d9 --- /dev/null +++ b/jupyterhub/jupyterhub-servicemonitor.gomplate.yaml @@ -0,0 +1,22 @@ +{{- if eq .Env.MONITORING_ENABLED "true" }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: jupyterhub-hub + namespace: {{ .Env.JUPYTERHUB_NAMESPACE }} + labels: + app.kubernetes.io/name: jupyterhub + app.kubernetes.io/component: hub + release: kube-prometheus-stack +spec: + jobLabel: jupyterhub-hub + selector: + matchLabels: + app.kubernetes.io/name: jupyterhub + app.kubernetes.io/component: hub + endpoints: + - port: hub + path: /hub/metrics + interval: 30s + scrapeTimeout: 10s +{{- end }} diff --git a/jupyterhub/jupyterhub-values.gomplate.yaml b/jupyterhub/jupyterhub-values.gomplate.yaml index 5dbcd80..b584d28 100644 --- a/jupyterhub/jupyterhub-values.gomplate.yaml +++ b/jupyterhub/jupyterhub-values.gomplate.yaml @@ -39,6 +39,7 @@ hub: JupyterHub: authenticator_class: generic-oauth admin_access: false + authenticate_prometheus: false Authenticator: enable_auth_state: true @@ -132,6 +133,19 @@ hub: podSecurityContext: fsGroup: {{ .Env.JUPYTER_FSGID }} + networkPolicy: + ingress: + - from: + - podSelector: + matchLabels: + hub.jupyter.org/network-access-hub: "true" + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Env.PROMETHEUS_NAMESPACE }} + ports: + - port: http + protocol: TCP + proxy: service: type: ClusterIP diff --git a/jupyterhub/justfile b/jupyterhub/justfile index 17b05af..d852d50 100644 --- a/jupyterhub/justfile +++ b/jupyterhub/justfile @@ -35,6 +35,8 @@ export LONGHORN_NAMESPACE := env("LONGHORN_NAMESPACE", "longhorn") export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack") export VAULT_HOST := env("VAULT_HOST", "") export VAULT_ADDR := "https://" + VAULT_HOST +export MONITORING_ENABLED := env("MONITORING_ENABLED", "") +export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring") [private] default: @@ -78,6 +80,18 @@ install root_token='': echo "✓ JUPYTERHUB_CRYPT_KEY generated and saved to .env.local" fi + if helm status kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE} &>/dev/null; then + if [ -z "${MONITORING_ENABLED}" ]; then + if gum confirm "Enable Prometheus monitoring (ServiceMonitor)?"; then + MONITORING_ENABLED="true" + else + MONITORING_ENABLED="false" + fi + fi + else + MONITORING_ENABLED="false" + fi + just create-namespace # just k8s::copy-regcred ${JUPYTERHUB_NAMESPACE} just keycloak::create-client realm=${KEYCLOAK_REALM} client_id=${JUPYTERHUB_OIDC_CLIENT_ID} \ @@ -186,6 +200,14 @@ install root_token='': # wait deployments manually because `helm upgrade --wait` does not work for JupyterHub just k8s::wait-deployments-ready ${JUPYTERHUB_NAMESPACE} hub proxy + if [ "${MONITORING_ENABLED}" = "true" ]; then + echo "Enabling Prometheus monitoring for namespace ${JUPYTERHUB_NAMESPACE}..." + kubectl label namespace ${JUPYTERHUB_NAMESPACE} buun.channel/enable-monitoring=true --overwrite + echo "Deploying ServiceMonitor for Prometheus..." + gomplate -f jupyterhub-servicemonitor.gomplate.yaml | kubectl apply -f - + echo "✓ ServiceMonitor deployed" + fi + # Uninstall JupyterHub uninstall: #!/bin/bash