Compare commits

..

9 Commits

Author SHA1 Message Date
baschno
50b0094e86 bump b3sup 2025-12-13 14:25:43 +01:00
Masaki Yatsu
0d45433ea9 fix(litellm): fix langfuse integration 2025-12-11 11:43:37 +09:00
Masaki Yatsu
a8599b66f4 fix(minio): fix OIDC and add public access recipes 2025-12-10 13:26:41 +09:00
Masaki Yatsu
1924e56ad7 chore(ollama): set ollama resource by env-vars 2025-12-10 10:16:05 +09:00
Masaki Yatsu
dae4e9d7ac feat(temporal): enable worker insight 2025-12-08 20:47:19 +09:00
Masaki Yatsu
cfcb278c4d fix(miniflux): fix login error 2025-12-08 13:22:00 +09:00
Masaki Yatsu
022c85c0dc feat(temporal): upgrade Temporal 2025-12-08 09:23:17 +09:00
Masaki Yatsu
3ac8a72df6 docs: write about Temporal 2025-12-07 16:20:17 +09:00
Masaki Yatsu
ca0a8dacba feat(temporal): install Temporal 2025-12-07 16:18:50 +09:00
18 changed files with 1369 additions and 30 deletions

View File

@@ -71,6 +71,7 @@ A remotely accessible Kubernetes home lab with OIDC authentication. Build a mode
- **[Dagster](https://dagster.io/)**: Modern data orchestration platform
- **[Apache Airflow](https://airflow.apache.org/)**: Workflow orchestration and task scheduling
- **[Temporal](https://temporal.io/)**: Durable workflow execution for distributed applications
### Security & Compliance (Optional)
@@ -379,6 +380,17 @@ Workflow orchestration platform:
[📖 See Airflow Documentation](./airflow/README.md)
### Temporal
Durable workflow execution platform:
- **Durable Execution**: Workflows survive process and infrastructure failures
- **Saga Pattern**: Implement distributed transactions with compensating actions
- **Multi-Language SDKs**: Go, Python, TypeScript, Java, .NET, PHP
- **Keycloak Authentication**: OAuth2 for Web UI access
[📖 See Temporal Documentation](./temporal/README.md)
### Fairwinds Polaris
Kubernetes configuration validation and best practices auditing:

View File

@@ -33,33 +33,55 @@ install:
)
done
if [ -z "${MINIFLUX_DB_PASSWORD}" ]; then
MINIFLUX_DB_PASSWORD=$(
gum input --prompt="Database password (empty to auto-generate): " \
--width=100 --password
)
# Check if PostgreSQL user already exists
if just postgres::user-exists ${MINIFLUX_DB_USERNAME} &>/dev/null; then
echo "PostgreSQL user '${MINIFLUX_DB_USERNAME}' already exists."
# Use existing password from Vault
if existing_db_pw=$(just vault::get miniflux/db password 2>/dev/null); then
echo "Using existing database password from Vault."
export MINIFLUX_DB_PASSWORD="${existing_db_pw}"
else
echo "Error: User exists but password not found in Vault." >&2
echo "Please delete the user first: just postgres::delete-user ${MINIFLUX_DB_USERNAME}" >&2
exit 1
fi
else
if [ -z "${MINIFLUX_DB_PASSWORD}" ]; then
MINIFLUX_DB_PASSWORD=$(just utils::random-password)
echo "Generated random password: ${MINIFLUX_DB_PASSWORD}"
MINIFLUX_DB_PASSWORD=$(
gum input --prompt="Database password (empty to auto-generate): " \
--width=100 --password
)
if [ -z "${MINIFLUX_DB_PASSWORD}" ]; then
MINIFLUX_DB_PASSWORD=$(just utils::random-password)
echo "Generated random password for database."
fi
fi
export MINIFLUX_DB_PASSWORD
just postgres::create-user-and-db \
${MINIFLUX_DB_USERNAME} ${MINIFLUX_DB_NAME} ${MINIFLUX_DB_PASSWORD}
just vault::put miniflux/db username=${MINIFLUX_DB_USERNAME} \
password=${MINIFLUX_DB_PASSWORD} database=${MINIFLUX_DB_NAME}
fi
just postgres::create-user-and-db \
${MINIFLUX_DB_USERNAME} ${MINIFLUX_DB_NAME} ${MINIFLUX_DB_PASSWORD}
just vault::put miniflux/db username=${MINIFLUX_DB_USERNAME} \
password=${MINIFLUX_DB_PASSWORD} database=${MINIFLUX_DB_NAME}
if [ -z "${MINIFLUX_ADMIN_PASSWORD}" ]; then
MINIFLUX_ADMIN_PASSWORD=$(
gum input --prompt="Admin password (empty to auto-generate): " \
--width=100 --password
)
# Check if admin password exists in Vault
if existing_admin_pw=$(just vault::get miniflux/admin password 2>/dev/null); then
echo "Using existing admin password from Vault."
export MINIFLUX_ADMIN_PASSWORD="${existing_admin_pw}"
else
if [ -z "${MINIFLUX_ADMIN_PASSWORD}" ]; then
MINIFLUX_ADMIN_PASSWORD=$(just utils::random-password)
echo "Generated random password: ${MINIFLUX_ADMIN_PASSWORD}"
MINIFLUX_ADMIN_PASSWORD=$(
gum input --prompt="Admin password (empty to auto-generate): " \
--width=100 --password
)
if [ -z "${MINIFLUX_ADMIN_PASSWORD}" ]; then
MINIFLUX_ADMIN_PASSWORD=$(just utils::random-password)
echo "Generated random password for admin."
fi
fi
export MINIFLUX_ADMIN_PASSWORD
just vault::put miniflux/admin username=${MINIFLUX_ADMIN_USERNAME} \
password=${MINIFLUX_ADMIN_PASSWORD}
fi
just vault::put miniflux/admin username=${MINIFLUX_ADMIN_USERNAME} \
password=${MINIFLUX_ADMIN_PASSWORD}
# https://github.com/gabe565/charts/tree/main/charts/miniflux
MINIFLUX_NAMESPACE=${MINIFLUX_NAMESPACE} \
@@ -80,3 +102,26 @@ admin-username:
# Print admin password
admin-password:
@just vault::get miniflux/admin password
# Reset admin password (deletes admin user from DB so it gets recreated on pod restart)
reset-admin-password:
#!/bin/bash
set -euo pipefail
echo "This will reset the admin password to the value stored in Vault."
if ! gum confirm "Continue?"; then
echo "Cancelled."
exit 0
fi
echo "Deleting admin user from database..."
kubectl exec -n postgres postgres-cluster-1 -c postgres -- \
psql -U postgres -d ${MINIFLUX_DB_NAME} -c "DELETE FROM users WHERE username = '${MINIFLUX_ADMIN_USERNAME}';"
echo "Restarting Miniflux pod..."
kubectl rollout restart deployment/miniflux -n ${MINIFLUX_NAMESPACE}
kubectl rollout status deployment/miniflux -n ${MINIFLUX_NAMESPACE} --timeout=60s
echo ""
echo "Admin password has been reset."
echo "Username: $(just vault::get miniflux/admin username)"
echo "Password: $(just vault::get miniflux/admin password)"

View File

@@ -15,6 +15,8 @@ ingress:
env:
DATABASE_URL: "postgresql://{{ .Env.MINIFLUX_DB_USERNAME }}:{{ .Env.MINIFLUX_DB_PASSWORD }}@postgres-cluster-rw.postgres:5432/{{ .Env.MINIFLUX_DB_NAME }}"
BASE_URL: "https://{{ .Env.MINIFLUX_HOST }}"
HTTPS: "1"
ADMIN_USERNAME: {{ .Env.MINIFLUX_ADMIN_USERNAME }}
ADMIN_PASSWORD: {{ .Env.MINIFLUX_ADMIN_PASSWORD }}

View File

@@ -39,6 +39,7 @@ mod qdrant
mod querybook
mod security
mod superset
mod temporal
mod trino
mod utils
mod vault

View File

@@ -1032,7 +1032,7 @@ set-team-langfuse-project team_id='' public_key='' secret_key='':
\"callback_name\": \"langfuse\",
\"callback_vars\": {
\"langfuse_public_key\": \"${public_key}\",
\"langfuse_secret\": \"${secret_key}\",
\"langfuse_secret_key\": \"${secret_key}\",
\"langfuse_host\": \"https://${LANGFUSE_HOST}\"
}
}]

View File

@@ -221,6 +221,83 @@ just minio::bucket-exists mybucket
This returns exit code 0 if the bucket exists, 1 otherwise.
## Public Access
MinIO allows you to configure anonymous (public) access to buckets or specific prefixes for serving static content like images.
### Set Public Download Access
Enable public read access for a bucket or prefix:
```bash
# Set public access for entire bucket
just minio::set-public-download mybucket
# Set public access for specific prefix only
just minio::set-public-download mybucket/public
```
After setting public access, files can be accessed without authentication:
```text
https://your-minio-host/mybucket/public/image.png
```
### Check Public Access Status
View current anonymous access policy:
```bash
just minio::show-public-access mybucket
```
Possible values:
- `private`: No anonymous access (default)
- `download`: Public read access
- `upload`: Public write access
- `public`: Public read and write access
- `custom`: Custom policy applied
### Remove Public Access
Revoke anonymous access:
```bash
just minio::remove-public-access mybucket/public
```
### Using mc Commands
```bash
# Set public download (read-only)
mc anonymous set download myminio/mybucket/public
# Set public upload (write-only)
mc anonymous set upload myminio/mybucket/uploads
# Set full public access (read and write)
mc anonymous set public myminio/mybucket
# Remove public access
mc anonymous set none myminio/mybucket
# Check current policy
mc anonymous get myminio/mybucket
```
### Presigned URLs (Temporary Access)
For temporary access to private objects, use presigned URLs:
```bash
# Generate URL valid for 7 days
mc share download myminio/mybucket/private-file.pdf --expire=168h
# Generate upload URL
mc share upload myminio/mybucket/uploads/ --expire=1h
```
## User Management
### Create MinIO User

View File

@@ -47,6 +47,7 @@ create-root-credentials:
gomplate -f minio-root-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
sleep 2
kubectl wait --for=condition=Ready externalsecret/minio \
-n ${MINIO_NAMESPACE} --timeout=60s
else
@@ -96,7 +97,12 @@ install:
--placeholder="e.g., minio-console.example.com"
)
fi
# Generate OIDC client secret for confidential client
OIDC_CLIENT_SECRET=$(just utils::random-password)
just keycloak::create-client realm=${KEYCLOAK_REALM} client_id=${MINIO_OIDC_CLIENT_ID} \
client_secret="${OIDC_CLIENT_SECRET}" \
redirect_url="https://${MINIO_HOST}/oauth_callback,https://${MINIO_CONSOLE_HOST}/oauth_callback"
just add-keycloak-minio-policy
just create-namespace
@@ -105,6 +111,28 @@ install:
pod-security.kubernetes.io/enforce=restricted --overwrite
just create-root-credentials
# Store OIDC client secret
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "Storing OIDC client secret in Vault..."
just vault::put minio/oidc client_id="${MINIO_OIDC_CLIENT_ID}" client_secret="${OIDC_CLIENT_SECRET}"
kubectl delete externalsecret minio-oidc -n ${MINIO_NAMESPACE} --ignore-not-found
gomplate -f minio-oidc-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
sleep 2
kubectl wait --for=condition=Ready externalsecret/minio-oidc \
-n ${MINIO_NAMESPACE} --timeout=60s
else
echo "Creating OIDC client secret directly..."
kubectl delete secret minio-oidc -n ${MINIO_NAMESPACE} --ignore-not-found
kubectl create secret generic minio-oidc -n ${MINIO_NAMESPACE} \
--from-literal=clientId="${MINIO_OIDC_CLIENT_ID}" \
--from-literal=clientSecret="${OIDC_CLIENT_SECRET}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put minio/oidc client_id="${MINIO_OIDC_CLIENT_ID}" client_secret="${OIDC_CLIENT_SECRET}"
fi
fi
just add-helm-repo
gomplate -f minio-values.gomplate.yaml -o minio-values.yaml
helm upgrade --install minio minio/minio \
@@ -260,3 +288,70 @@ grant-policy user='' policy='':
mc admin policy attach local ${POLICY} --user=${USER}"
echo "✅ Policy ${POLICY} granted to user ${USER}"
# Set public download access for a bucket or prefix
set-public-download path='':
#!/bin/bash
set -euo pipefail
PATH_ARG="{{ path }}"
while [ -z "${PATH_ARG}" ]; do
PATH_ARG=$(
gum input --prompt="Bucket/prefix path: " --width=100 \
--placeholder="e.g., my-bucket/public"
)
done
ROOT_USER=$(just root-username)
ROOT_PASSWORD=$(just root-password)
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc alias set local http://localhost:9000 ${ROOT_USER} ${ROOT_PASSWORD}
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc anonymous set download local/${PATH_ARG}
echo "✅ Public download access enabled for ${PATH_ARG}"
# Remove public access from a bucket or prefix
remove-public-access path='':
#!/bin/bash
set -euo pipefail
PATH_ARG="{{ path }}"
while [ -z "${PATH_ARG}" ]; do
PATH_ARG=$(
gum input --prompt="Bucket/prefix path: " --width=100 \
--placeholder="e.g., my-bucket/public"
)
done
ROOT_USER=$(just root-username)
ROOT_PASSWORD=$(just root-password)
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc alias set local http://localhost:9000 ${ROOT_USER} ${ROOT_PASSWORD}
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc anonymous set none local/${PATH_ARG}
echo "✅ Public access removed from ${PATH_ARG}"
# Show anonymous access policy for a bucket or prefix
show-public-access path='':
#!/bin/bash
set -euo pipefail
PATH_ARG="{{ path }}"
while [ -z "${PATH_ARG}" ]; do
PATH_ARG=$(
gum input --prompt="Bucket/prefix path: " --width=100 \
--placeholder="e.g., my-bucket"
)
done
ROOT_USER=$(just root-username)
ROOT_PASSWORD=$(just root-password)
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc alias set local http://localhost:9000 ${ROOT_USER} ${ROOT_PASSWORD}
kubectl -n ${MINIO_NAMESPACE} exec deploy/minio -- \
mc anonymous get local/${PATH_ARG}

View File

@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: minio-oidc
namespace: {{ .Env.MINIO_NAMESPACE }}
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-secret-store
kind: ClusterSecretStore
target:
name: minio-oidc
creationPolicy: Owner
data:
- secretKey: clientId
remoteRef:
key: minio/oidc
property: client_id
- secretKey: clientSecret
remoteRef:
key: minio/oidc
property: client_secret

View File

@@ -7,8 +7,9 @@ existingSecret: "minio"
oidc:
enabled: true
configUrl: "https://{{ .Env.KEYCLOAK_HOST }}/realms/{{ .Env.KEYCLOAK_REALM }}/.well-known/openid-configuration"
clientId: "{{ .Env.MINIO_OIDC_CLIENT_ID }}"
clientSecret: ""
existingClientSecretName: "minio-oidc"
existingClientIdKey: "clientId"
existingClientSecretKey: "clientSecret"
claimName: "minioPolicy"
scopes: "openid,profile,email"
redirectUri: "https://{{ .Env.MINIO_CONSOLE_HOST }}/oauth_callback"

View File

@@ -3,11 +3,11 @@ gomplate = "4.3.3"
gum = "0.16.2"
helm = "3.19.0"
just = "1.42.4"
k3sup = "0.13.10"
k3sup = "0.13.11"
kubelogin = "1.34.0"
node = "22.18.0"
python = "3.12.11"
telepresence = "2.25.0"
telepresence = "2.25.1"
trivy = "0.67.2"
uv = "0.8.7"
vault = "1.20.2"

View File

@@ -8,7 +8,11 @@ export OLLAMA_GPU_TYPE := env("OLLAMA_GPU_TYPE", "nvidia")
export OLLAMA_GPU_COUNT := env("OLLAMA_GPU_COUNT", "1")
export OLLAMA_MODELS := env("OLLAMA_MODELS", "")
export OLLAMA_STORAGE_SIZE := env("OLLAMA_STORAGE_SIZE", "30Gi")
export OLLAMA_HELM_TIMEOUT := env("OLLAMA_HELM_TIMEOUT", "10m")
export OLLAMA_HELM_TIMEOUT := env("OLLAMA_HELM_TIMEOUT", "60m")
export OLLAMA_MEMORY_REQUEST := env("OLLAMA_MEMORY_REQUEST", "2Gi")
export OLLAMA_MEMORY_LIMIT := env("OLLAMA_MEMORY_LIMIT", "12Gi")
export OLLAMA_CPU_REQUEST := env("OLLAMA_CPU_REQUEST", "25m")
export OLLAMA_CPU_LIMIT := env("OLLAMA_CPU_LIMIT", "100m")
[private]
default:

View File

@@ -36,11 +36,11 @@ securityContext:
resources:
requests:
cpu: 25m
memory: 2Gi
cpu: {{ .Env.OLLAMA_CPU_REQUEST }}
memory: {{ .Env.OLLAMA_MEMORY_REQUEST }}
limits:
cpu: 100m
memory: 8Gi
cpu: {{ .Env.OLLAMA_CPU_LIMIT }}
memory: {{ .Env.OLLAMA_MEMORY_LIMIT }}
persistentVolume:
enabled: true

1
temporal/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
temporal-values.yaml

401
temporal/README.md Normal file
View File

@@ -0,0 +1,401 @@
# Temporal
Durable workflow execution platform for building reliable distributed applications:
- **Durable Execution**: Workflows survive process and infrastructure failures
- **Language Support**: SDKs for Go, Java, Python, TypeScript, .NET, PHP
- **Visibility**: Query and observe workflow state via Web UI and APIs
- **Scalability**: Horizontally scalable architecture
- **Multi-tenancy**: Namespace-based isolation for workflows
## Prerequisites
- Kubernetes cluster (k3s)
- PostgreSQL cluster (CloudNativePG)
- Keycloak installed and configured
- Vault for secrets management
- External Secrets Operator (optional, for Vault integration)
## Installation
```bash
just temporal::install
```
You will be prompted for:
- **Temporal host (FQDN)**: e.g., `temporal.example.com`
- **Keycloak host (FQDN)**: e.g., `auth.example.com`
- **Enable Prometheus monitoring**: If kube-prometheus-stack is installed
### What Gets Installed
- Temporal Server (frontend, history, matching, worker services)
- Temporal Web UI with Keycloak OIDC authentication
- Temporal Admin Tools for cluster management
- PostgreSQL databases (`temporal`, `temporal_visibility`)
- Keycloak OAuth client (confidential client)
- Vault secrets (if External Secrets Operator is available)
## Configuration
Environment variables (set in `.env.local` or override):
| Variable | Default | Description |
| -------- | ------- | ----------- |
| `TEMPORAL_NAMESPACE` | `temporal` | Kubernetes namespace |
| `TEMPORAL_CHART_VERSION` | `0.52.0` | Helm chart version |
| `TEMPORAL_HOST` | (prompt) | External hostname (FQDN) |
| `TEMPORAL_OIDC_CLIENT_ID` | `temporal` | Keycloak client ID |
| `KEYCLOAK_HOST` | (prompt) | Keycloak hostname (FQDN) |
| `KEYCLOAK_REALM` | `buunstack` | Keycloak realm |
| `MONITORING_ENABLED` | (prompt) | Enable Prometheus ServiceMonitor |
## Architecture
```plain
External Users
|
Cloudflare Tunnel (HTTPS)
|
Traefik Ingress (HTTPS)
|
Temporal Web UI (HTTP inside cluster)
|-- OAuth --> Keycloak (authentication)
|
Temporal Server
|-- Frontend Service (gRPC :7233)
| |-- Client connections
| |-- Workflow/Activity APIs
|
|-- History Service
| |-- Workflow state management
| |-- Event sourcing
|
|-- Matching Service
| |-- Task queue management
| |-- Worker polling
|
|-- Worker Service
| |-- System workflows
| |-- Archival
|
PostgreSQL (temporal, temporal_visibility)
```
**Key Components**:
- **Frontend**: Entry point for all client requests (gRPC API)
- **History**: Maintains workflow execution history and state
- **Matching**: Routes tasks to appropriate workers
- **Worker**: Executes internal system workflows
- **Web UI**: Browser-based workflow monitoring and management
- **Admin Tools**: CLI tools for cluster administration
## Usage
### Access Web UI
1. Navigate to `https://your-temporal-host/`
2. Authenticate via Keycloak SSO
3. Select a namespace to view workflows
### Temporal CLI Setup (Local Development)
The Temporal gRPC endpoint is only accessible within the cluster network. Use [Telepresence](https://www.telepresence.io/) to connect from your local machine.
#### Step 1: Connect to the Cluster
```bash
telepresence connect
```
#### Step 2: Configure Temporal CLI
Set environment variables (add to `.bashrc`, `.zshrc`, or use direnv):
```bash
export TEMPORAL_ADDRESS="temporal-frontend.temporal:7233"
export TEMPORAL_NAMESPACE="default"
```
Or create a named environment for multiple clusters:
```bash
# Configure named environment
temporal env set --env buun -k address -v temporal-frontend.temporal:7233
temporal env set --env buun -k namespace -v default
# Use with commands
temporal workflow list --env buun
```
#### Step 3: Verify Connection
```bash
# Check telepresence status
telepresence status
# Test Temporal connection
temporal operator namespace list
```
#### CLI Examples
```bash
# List workflows
temporal workflow list
# Describe a workflow
temporal workflow describe --workflow-id my-workflow-id
# Query workflow state
temporal workflow query --workflow-id my-workflow-id --type my-query
# Signal a workflow
temporal workflow signal --workflow-id my-workflow-id --name my-signal
# Terminate a workflow
temporal workflow terminate --workflow-id my-workflow-id --reason "manual termination"
```
### Create a Temporal Namespace
Before running workflows, create a namespace:
```bash
just temporal::create-temporal-namespace default
```
With custom retention period:
```bash
just temporal::create-temporal-namespace myapp 7d
```
### List Temporal Namespaces
```bash
just temporal::list-temporal-namespaces
```
### Cluster Health Check
```bash
just temporal::cluster-info
```
### Connect Workers
Workers connect to the Temporal Frontend service. From within the cluster:
```text
temporal-frontend.temporal:7233
```
Example Python worker:
```python
from temporalio.client import Client
from temporalio.worker import Worker
async def main():
client = await Client.connect("temporal-frontend.temporal:7233")
worker = Worker(
client,
task_queue="my-task-queue",
workflows=[MyWorkflow],
activities=[my_activity],
)
await worker.run()
```
Example Go worker:
```go
import (
"go.temporal.io/sdk/client"
"go.temporal.io/sdk/worker"
)
func main() {
c, _ := client.Dial(client.Options{
HostPort: "temporal-frontend.temporal:7233",
})
defer c.Close()
w := worker.New(c, "my-task-queue", worker.Options{})
w.RegisterWorkflow(MyWorkflow)
w.RegisterActivity(MyActivity)
w.Run(worker.InterruptCh())
}
```
## Authentication
### Web UI (OIDC)
- Users authenticate via Keycloak
- Standard OIDC flow with Authorization Code grant
- Configured via environment variables in the Web UI deployment
### gRPC API
- By default, no authentication is required for gRPC connections within the cluster
- For production, configure mTLS or JWT-based authorization
## Management
### Upgrade Temporal
```bash
just temporal::upgrade
```
### Uninstall
```bash
just temporal::uninstall
```
This removes:
- Helm release and all Kubernetes resources
- Namespace
- Keycloak client
**Note**: The following resources are NOT deleted:
- PostgreSQL databases (`temporal`, `temporal_visibility`)
- Vault secrets
### Full Cleanup
To remove everything including databases and Vault secrets:
```bash
just temporal::uninstall true
```
Or manually:
```bash
just temporal::delete-postgres-user-and-db
```
## Troubleshooting
### Check Pod Status
```bash
kubectl get pods -n temporal
```
Expected pods:
- `temporal-frontend-*` - Frontend service
- `temporal-history-*` - History service
- `temporal-matching-*` - Matching service
- `temporal-worker-*` - Worker service
- `temporal-web-*` - Web UI
- `temporal-admintools-*` - Admin tools
### View Logs
```bash
# Frontend logs
kubectl logs -n temporal deployment/temporal-frontend --tail=100
# History logs
kubectl logs -n temporal deployment/temporal-history --tail=100
# Web UI logs
kubectl logs -n temporal deployment/temporal-web --tail=100
```
### Database Connection Issues
Check PostgreSQL connectivity:
```bash
kubectl exec -n temporal deployment/temporal-admintools -- \
psql -h postgres-cluster-rw.postgres -U temporal -d temporal -c "SELECT 1"
```
### Schema Issues
If schema initialization fails, check the schema job:
```bash
kubectl logs -n temporal -l app.kubernetes.io/component=schema --all-containers
```
### Service Discovery Issues
Verify services are running:
```bash
kubectl get svc -n temporal
```
Test frontend connectivity from admin tools:
```bash
kubectl exec -n temporal deployment/temporal-admintools -- \
tctl cluster health
```
### Web UI Login Issues
Verify Keycloak client configuration:
```bash
just keycloak::get-client buunstack temporal
```
Check Web UI environment variables:
```bash
kubectl get deployment temporal-web -n temporal -o jsonpath='{.spec.template.spec.containers[0].env}' | jq
```
## Configuration Files
| File | Description |
| ---- | ----------- |
| `temporal-values.gomplate.yaml` | Helm values template |
| `postgres-external-secret.gomplate.yaml` | PostgreSQL credentials ExternalSecret |
| `keycloak-auth-external-secret.gomplate.yaml` | Keycloak OIDC credentials ExternalSecret |
## Security Considerations
- **Pod Security Standards**: Namespace configured with **baseline** enforcement
- **Server Security**: Temporal server components run with restricted-compliant security contexts
### Why Not Restricted?
The namespace cannot use `restricted` Pod Security Standards due to the Temporal Web UI image (`temporalio/ui`):
- The image writes configuration files to `./config/docker.yaml` at startup
- The container's filesystem is owned by root (UID 0)
- When running as non-root user (UID 1000), the container cannot write to these paths
- Error: `unable to create open ./config/docker.yaml: permission denied`
The Temporal server components (frontend, history, matching, worker) **do** meet `restricted` requirements and run with full security hardening. Only the Web UI component requires `baseline`.
### Server Security Context
Temporal server components (frontend, history, matching, worker) run with:
- `runAsNonRoot: true`
- `runAsUser: 1000`
- `allowPrivilegeEscalation: false`
- `seccompProfile.type: RuntimeDefault`
- `capabilities.drop: [ALL]`
## References
- [Temporal Documentation](https://docs.temporal.io/)
- [Temporal GitHub](https://github.com/temporalio/temporal)
- [Temporal Helm Charts](https://github.com/temporalio/helm-charts)

434
temporal/justfile Normal file
View File

@@ -0,0 +1,434 @@
set fallback := true
export TEMPORAL_NAMESPACE := env("TEMPORAL_NAMESPACE", "temporal")
export TEMPORAL_CHART_VERSION := env("TEMPORAL_CHART_VERSION", "0.72.0")
export TEMPORAL_HOST := env("TEMPORAL_HOST", "")
export TEMPORAL_OIDC_CLIENT_ID := env("TEMPORAL_OIDC_CLIENT_ID", "temporal")
export EXTERNAL_SECRETS_NAMESPACE := env("EXTERNAL_SECRETS_NAMESPACE", "external-secrets")
export PROMETHEUS_NAMESPACE := env("PROMETHEUS_NAMESPACE", "monitoring")
export MONITORING_ENABLED := env("MONITORING_ENABLED", "")
export KEYCLOAK_REALM := env("KEYCLOAK_REALM", "buunstack")
export KEYCLOAK_HOST := env("KEYCLOAK_HOST", "")
export K8S_VAULT_NAMESPACE := env("K8S_VAULT_NAMESPACE", "vault")
[private]
default:
@just --list --unsorted --list-submodules
# Add Helm repository
add-helm-repo:
helm repo add temporal https://go.temporal.io/helm-charts
helm repo update temporal
# Remove Helm repository
remove-helm-repo:
helm repo remove temporal
# Create Temporal namespace
create-namespace:
kubectl get namespace ${TEMPORAL_NAMESPACE} &>/dev/null || \
kubectl create namespace ${TEMPORAL_NAMESPACE}
# Delete Temporal namespace
delete-namespace:
kubectl delete namespace ${TEMPORAL_NAMESPACE} --ignore-not-found
# Create PostgreSQL user and databases for Temporal
create-postgres-user-and-db:
#!/bin/bash
set -euo pipefail
if just postgres::user-exists temporal &>/dev/null; then
echo "PostgreSQL user 'temporal' already exists"
else
echo "Creating PostgreSQL user and databases..."
PG_PASSWORD=$(just utils::random-password)
just postgres::create-user-and-db temporal temporal "${PG_PASSWORD}"
just postgres::create-db temporal_visibility
just postgres::grant temporal_visibility temporal
just vault::put temporal/db username=temporal password="${PG_PASSWORD}"
echo "PostgreSQL user and databases created."
fi
# Delete PostgreSQL user and databases
delete-postgres-user-and-db:
#!/bin/bash
set -euo pipefail
if gum confirm "Delete PostgreSQL user and databases for Temporal?"; then
just postgres::delete-db temporal || true
just postgres::delete-db temporal_visibility || true
just postgres::delete-user temporal || true
just vault::delete temporal/db || true
echo "PostgreSQL user and databases deleted."
else
echo "Cancelled."
fi
# Create Postgres secret
create-postgres-secret:
#!/bin/bash
set -euo pipefail
if kubectl get secret temporal-postgres-auth -n ${TEMPORAL_NAMESPACE} &>/dev/null; then
echo "Postgres auth secret already exists"
exit 0
fi
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Creating ExternalSecret..."
kubectl delete externalsecret temporal-postgres-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
gomplate -f postgres-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/temporal-postgres-auth \
-n ${TEMPORAL_NAMESPACE} --timeout=60s
else
echo "Creating Kubernetes Secret directly..."
PG_USERNAME=$(just vault::get temporal/db username)
PG_PASSWORD=$(just vault::get temporal/db password)
kubectl create secret generic temporal-postgres-auth \
--from-literal=password="${PG_PASSWORD}" \
-n ${TEMPORAL_NAMESPACE}
fi
# Delete Postgres secret
delete-postgres-secret:
kubectl delete externalsecret temporal-postgres-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
kubectl delete secret temporal-postgres-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
# Create Keycloak client for Temporal Web UI
create-keycloak-client:
#!/bin/bash
set -euo pipefail
while [ -z "${TEMPORAL_HOST}" ]; do
TEMPORAL_HOST=$(
gum input --prompt="Temporal host (FQDN): " --width=100 \
--placeholder="e.g., temporal.example.com"
)
done
echo "Creating Keycloak client for Temporal..."
just keycloak::delete-client ${KEYCLOAK_REALM} ${TEMPORAL_OIDC_CLIENT_ID} || true
CLIENT_SECRET=$(just utils::random-password)
just keycloak::create-client \
realm=${KEYCLOAK_REALM} \
client_id=${TEMPORAL_OIDC_CLIENT_ID} \
redirect_url="https://${TEMPORAL_HOST}/*" \
client_secret="${CLIENT_SECRET}"
kubectl delete secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} --ignore-not-found
kubectl create secret generic temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} \
--from-literal=client_id="${TEMPORAL_OIDC_CLIENT_ID}" \
--from-literal=client_secret="${CLIENT_SECRET}"
echo "Keycloak client created successfully"
echo "Client ID: ${TEMPORAL_OIDC_CLIENT_ID}"
echo "Redirect URI: https://${TEMPORAL_HOST}/*"
# Delete Keycloak client
delete-keycloak-client:
#!/bin/bash
set -euo pipefail
echo "Deleting Keycloak client for Temporal..."
just keycloak::delete-client ${KEYCLOAK_REALM} ${TEMPORAL_OIDC_CLIENT_ID} || true
kubectl delete secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} --ignore-not-found
if just vault::exist keycloak/client/temporal &>/dev/null; then
just vault::delete keycloak/client/temporal
fi
# Create Keycloak auth secret
create-keycloak-auth-secret:
#!/bin/bash
set -euo pipefail
if kubectl get secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} &>/dev/null; then
oauth_client_id=$(kubectl get secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} \
-o jsonpath='{.data.client_id}' | base64 -d)
oauth_client_secret=$(kubectl get secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} \
-o jsonpath='{.data.client_secret}' | base64 -d)
elif helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null && \
just vault::get keycloak/client/temporal client_secret &>/dev/null; then
oauth_client_id=$(just vault::get keycloak/client/temporal client_id)
oauth_client_secret=$(just vault::get keycloak/client/temporal client_secret)
else
echo "Error: Cannot retrieve OAuth client secret. Please run 'just temporal::create-keycloak-client' first."
exit 1
fi
if helm status external-secrets -n ${EXTERNAL_SECRETS_NAMESPACE} &>/dev/null; then
echo "External Secrets Operator detected. Storing secrets in Vault..."
just vault::put keycloak/client/temporal \
client_id="${oauth_client_id}" \
client_secret="${oauth_client_secret}"
kubectl delete secret temporal-web-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
kubectl delete externalsecret temporal-web-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
gomplate -f keycloak-auth-external-secret.gomplate.yaml | kubectl apply -f -
echo "Waiting for ExternalSecret to sync..."
kubectl wait --for=condition=Ready externalsecret/temporal-web-auth \
-n ${TEMPORAL_NAMESPACE} --timeout=60s
echo "ExternalSecret created successfully"
else
echo "External Secrets Operator not found. Creating Kubernetes Secret directly..."
kubectl delete secret temporal-web-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
kubectl create secret generic temporal-web-auth -n ${TEMPORAL_NAMESPACE} \
--from-literal=TEMPORAL_AUTH_CLIENT_ID="${oauth_client_id}" \
--from-literal=TEMPORAL_AUTH_CLIENT_SECRET="${oauth_client_secret}"
if helm status vault -n ${K8S_VAULT_NAMESPACE} &>/dev/null; then
just vault::put keycloak/client/temporal \
client_id="${oauth_client_id}" \
client_secret="${oauth_client_secret}"
fi
echo "Kubernetes Secret created successfully"
fi
kubectl delete secret temporal-oauth-temp -n ${TEMPORAL_NAMESPACE} --ignore-not-found
# Delete Keycloak auth secret
delete-keycloak-auth-secret:
kubectl delete externalsecret temporal-web-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
kubectl delete secret temporal-web-auth -n ${TEMPORAL_NAMESPACE} --ignore-not-found
# Initialize Temporal database schema
init-schema:
#!/bin/bash
set -euo pipefail
echo "Initializing Temporal database schema..."
PG_HOST="postgres-cluster-rw.postgres"
PG_PORT="5432"
PG_USER=$(just vault::get temporal/db username)
PG_PASSWORD=$(just vault::get temporal/db password)
POD_NAME=$(kubectl get pods -n ${TEMPORAL_NAMESPACE} -l app.kubernetes.io/name=temporal-admintools \
-o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -z "${POD_NAME}" ]; then
echo "Admin tools pod not found. Running schema setup job..."
exit 0
fi
echo "Setting up main database schema..."
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- \
temporal-sql-tool --plugin postgres12 \
--endpoint ${PG_HOST} --port ${PG_PORT} \
--user ${PG_USER} --password ${PG_PASSWORD} \
--database temporal \
setup-schema -v 0.0
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- \
temporal-sql-tool --plugin postgres12 \
--endpoint ${PG_HOST} --port ${PG_PORT} \
--user ${PG_USER} --password ${PG_PASSWORD} \
--database temporal \
update-schema -d /etc/temporal/schema/postgresql/v12/temporal/versioned
echo "Setting up visibility database schema..."
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- \
temporal-sql-tool --plugin postgres12 \
--endpoint ${PG_HOST} --port ${PG_PORT} \
--user ${PG_USER} --password ${PG_PASSWORD} \
--database temporal_visibility \
setup-schema -v 0.0
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- \
temporal-sql-tool --plugin postgres12 \
--endpoint ${PG_HOST} --port ${PG_PORT} \
--user ${PG_USER} --password ${PG_PASSWORD} \
--database temporal_visibility \
update-schema -d /etc/temporal/schema/postgresql/v12/visibility/versioned
echo "Schema initialization complete."
# Install Temporal
install:
#!/bin/bash
set -euo pipefail
while [ -z "${TEMPORAL_HOST}" ]; do
TEMPORAL_HOST=$(gum input --prompt="Temporal host (FQDN): " --width=80 \
--placeholder="e.g., temporal.example.com")
done
while [ -z "${KEYCLOAK_HOST}" ]; do
KEYCLOAK_HOST=$(gum input --prompt="Keycloak host (FQDN): " --width=80 \
--placeholder="e.g., auth.example.com")
done
if helm status kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE} &>/dev/null; then
if [ -z "${MONITORING_ENABLED}" ]; then
if gum confirm "Enable Prometheus monitoring?"; then
MONITORING_ENABLED="true"
fi
fi
fi
echo "Installing Temporal..."
just create-namespace
kubectl label namespace ${TEMPORAL_NAMESPACE} \
pod-security.kubernetes.io/enforce=baseline --overwrite
if [ "${MONITORING_ENABLED}" = "true" ]; then
kubectl label namespace ${TEMPORAL_NAMESPACE} \
buun.channel/enable-monitoring=true --overwrite
fi
echo "Setting up PostgreSQL database..."
just create-postgres-user-and-db
just create-postgres-secret
echo "Setting up Keycloak OIDC authentication..."
just create-keycloak-client
just create-keycloak-auth-secret
echo "Generating Helm values..."
just add-helm-repo
gomplate -f temporal-values.gomplate.yaml -o temporal-values.yaml
echo "Installing Temporal Helm chart..."
helm upgrade --cleanup-on-fail --install temporal temporal/temporal \
--version ${TEMPORAL_CHART_VERSION} -n ${TEMPORAL_NAMESPACE} --wait \
-f temporal-values.yaml --timeout 15m
echo "Configuring dynamic config for Worker Insights..."
kubectl patch configmap temporal-dynamic-config -n ${TEMPORAL_NAMESPACE} --type merge -p '{
"data": {
"dynamic_config.yaml": "frontend.WorkerHeartbeatsEnabled:\n - value: true\nfrontend.ListWorkersEnabled:\n - value: true\n"
}
}'
echo "Restarting frontend to apply dynamic config..."
kubectl rollout restart deployment temporal-frontend -n ${TEMPORAL_NAMESPACE}
kubectl rollout status deployment temporal-frontend -n ${TEMPORAL_NAMESPACE} --timeout=120s
echo ""
echo "Temporal installed successfully!"
echo "Access Temporal Web UI at: https://${TEMPORAL_HOST}"
echo ""
echo "OIDC authentication is configured with Keycloak."
echo "Users can login with their Keycloak credentials."
# Upgrade Temporal
upgrade:
#!/bin/bash
set -euo pipefail
while [ -z "${TEMPORAL_HOST}" ]; do
TEMPORAL_HOST=$(gum input --prompt="Temporal host (FQDN): " --width=80)
done
while [ -z "${KEYCLOAK_HOST}" ]; do
KEYCLOAK_HOST=$(gum input --prompt="Keycloak host (FQDN): " --width=80)
done
if helm status kube-prometheus-stack -n ${PROMETHEUS_NAMESPACE} &>/dev/null; then
if [ -z "${MONITORING_ENABLED}" ]; then
if gum confirm "Enable Prometheus monitoring?"; then
MONITORING_ENABLED="true"
fi
fi
fi
if [ "${MONITORING_ENABLED}" = "true" ]; then
kubectl label namespace ${TEMPORAL_NAMESPACE} \
buun.channel/enable-monitoring=true --overwrite
fi
echo "Upgrading Temporal..."
gomplate -f temporal-values.gomplate.yaml -o temporal-values.yaml
helm upgrade temporal temporal/temporal \
--version ${TEMPORAL_CHART_VERSION} -n ${TEMPORAL_NAMESPACE} --wait \
-f temporal-values.yaml --timeout 15m
echo "Configuring dynamic config for Worker Insights..."
kubectl patch configmap temporal-dynamic-config -n ${TEMPORAL_NAMESPACE} --type merge -p '{
"data": {
"dynamic_config.yaml": "frontend.WorkerHeartbeatsEnabled:\n - value: true\nfrontend.ListWorkersEnabled:\n - value: true\n"
}
}'
echo "Restarting frontend to apply dynamic config..."
kubectl rollout restart deployment temporal-frontend -n ${TEMPORAL_NAMESPACE}
kubectl rollout status deployment temporal-frontend -n ${TEMPORAL_NAMESPACE} --timeout=120s
echo ""
echo "Temporal upgraded successfully!"
echo "Access Temporal Web UI at: https://${TEMPORAL_HOST}"
# Uninstall Temporal (delete-data: true to delete database and Vault secrets)
uninstall delete-data='false':
#!/bin/bash
set -euo pipefail
if ! gum confirm "Uninstall Temporal?"; then
echo "Cancelled."
exit 0
fi
echo "Uninstalling Temporal..."
helm uninstall temporal -n ${TEMPORAL_NAMESPACE} --ignore-not-found --wait
just delete-keycloak-auth-secret || true
just delete-keycloak-client || true
just delete-postgres-secret
just delete-namespace
if [ "{{ delete-data }}" = "true" ]; then
echo "Deleting database and Vault secrets..."
just postgres::delete-db temporal || true
just postgres::delete-db temporal_visibility || true
just postgres::delete-user temporal || true
just vault::delete temporal/db || true
just vault::delete keycloak/client/temporal || true
echo "Temporal uninstalled with all data deleted."
else
echo "Temporal uninstalled."
echo ""
echo "Note: The following resources were NOT deleted:"
echo " - PostgreSQL user and databases (temporal, temporal_visibility)"
echo " - Vault secrets (temporal/db, keycloak/client/temporal)"
echo ""
echo "To delete all data, run:"
echo " just temporal::uninstall true"
fi
# Create a Temporal namespace (workflow namespace, not Kubernetes)
create-temporal-namespace name='' retention='3d':
#!/bin/bash
set -euo pipefail
name="{{ name }}"
retention="{{ retention }}"
while [ -z "${name}" ]; do
name=$(gum input --prompt="Namespace name: " --width=80 --placeholder="e.g., default")
done
POD_NAME=$(kubectl get pods -n ${TEMPORAL_NAMESPACE} -l app.kubernetes.io/name=temporal-admintools \
-o jsonpath='{.items[0].metadata.name}')
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- \
tctl --namespace "${name}" namespace register --retention "${retention}"
echo "Namespace '${name}' created with retention ${retention}."
# List Temporal namespaces
list-temporal-namespaces:
#!/bin/bash
set -euo pipefail
POD_NAME=$(kubectl get pods -n ${TEMPORAL_NAMESPACE} -l app.kubernetes.io/name=temporal-admintools \
-o jsonpath='{.items[0].metadata.name}')
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- tctl namespace list
# Get Temporal cluster info
cluster-info:
#!/bin/bash
set -euo pipefail
POD_NAME=$(kubectl get pods -n ${TEMPORAL_NAMESPACE} -l app.kubernetes.io/name=temporal-admintools \
-o jsonpath='{.items[0].metadata.name}')
kubectl exec -n ${TEMPORAL_NAMESPACE} ${POD_NAME} -- tctl cluster health

View File

@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: temporal-web-auth
namespace: {{ .Env.TEMPORAL_NAMESPACE }}
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-secret-store
kind: ClusterSecretStore
target:
name: temporal-web-auth
creationPolicy: Owner
data:
- secretKey: TEMPORAL_AUTH_CLIENT_ID
remoteRef:
key: keycloak/client/temporal
property: client_id
- secretKey: TEMPORAL_AUTH_CLIENT_SECRET
remoteRef:
key: keycloak/client/temporal
property: client_secret

View File

@@ -0,0 +1,18 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: temporal-postgres-auth
namespace: {{ .Env.TEMPORAL_NAMESPACE }}
spec:
refreshInterval: 1h
secretStoreRef:
name: vault-secret-store
kind: ClusterSecretStore
target:
name: temporal-postgres-auth
creationPolicy: Owner
data:
- secretKey: password
remoteRef:
key: temporal/db
property: password

View File

@@ -0,0 +1,204 @@
server:
replicaCount: 1
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
config:
persistence:
default:
driver: "sql"
sql:
driver: "postgres12"
host: "postgres-cluster-rw.postgres"
port: 5432
database: temporal
user: temporal
existingSecret: temporal-postgres-auth
maxConns: 20
maxIdleConns: 20
maxConnLifetime: "1h"
visibility:
driver: "sql"
sql:
driver: "postgres12"
host: "postgres-cluster-rw.postgres"
port: 5432
database: temporal_visibility
user: temporal
existingSecret: temporal-postgres-auth
maxConns: 20
maxIdleConns: 20
maxConnLifetime: "1h"
{{- if .Env.MONITORING_ENABLED }}
metrics:
serviceMonitor:
enabled: true
additionalLabels:
release: kube-prometheus-stack
{{- end }}
frontend:
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
history:
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
matching:
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
worker:
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
admintools:
enabled: true
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
web:
enabled: true
replicaCount: 1
service:
type: ClusterIP
port: 8080
ingress:
enabled: true
className: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
hosts:
- {{ .Env.TEMPORAL_HOST }}
tls:
- secretName: temporal-web-tls
hosts:
- {{ .Env.TEMPORAL_HOST }}
additionalEnv:
- name: TEMPORAL_AUTH_ENABLED
value: "true"
- name: TEMPORAL_AUTH_PROVIDER_URL
value: "https://{{ .Env.KEYCLOAK_HOST }}/realms/{{ .Env.KEYCLOAK_REALM }}"
- name: TEMPORAL_AUTH_SCOPES
value: "openid,profile,email"
- name: TEMPORAL_AUTH_CALLBACK_URL
value: "https://{{ .Env.TEMPORAL_HOST }}/auth/sso/callback"
additionalEnvSecretName: temporal-web-auth
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
cassandra:
enabled: false
mysql:
enabled: false
postgresql:
enabled: false
elasticsearch:
enabled: false
prometheus:
enabled: false
grafana:
enabled: false
schema:
createDatabase:
enabled: false
setup:
enabled: true
backoffLimit: 100
update:
enabled: true
backoffLimit: 100
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false