From 6c6f0e5aa1911c7b3a58fa50a4c1eb73ce8a0018 Mon Sep 17 00:00:00 2001 From: Masaki Yatsu Date: Wed, 3 Dec 2025 14:08:39 +0900 Subject: [PATCH] feat(ollama): install Ollama --- justfile | 1 + ollama/.gitignore | 1 + ollama/justfile | 125 ++++++++++++++++++++++++++++++++++++ ollama/values.gomplate.yaml | 20 ++++++ 4 files changed, 147 insertions(+) create mode 100644 ollama/.gitignore create mode 100644 ollama/justfile create mode 100644 ollama/values.gomplate.yaml diff --git a/justfile b/justfile index 3ffc0ed..a23add4 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,7 @@ mod minio mod nvidia-device-plugin mod fairwinds-polaris mod oauth2-proxy +mod ollama mod postgres mod prometheus mod qdrant diff --git a/ollama/.gitignore b/ollama/.gitignore new file mode 100644 index 0000000..7f47975 --- /dev/null +++ b/ollama/.gitignore @@ -0,0 +1 @@ +values.yaml diff --git a/ollama/justfile b/ollama/justfile new file mode 100644 index 0000000..534924c --- /dev/null +++ b/ollama/justfile @@ -0,0 +1,125 @@ +set fallback := true + +export OLLAMA_NAMESPACE := env("OLLAMA_NAMESPACE", "ollama") +export OLLAMA_CHART_VERSION := env("OLLAMA_CHART_VERSION", "1.35.0") +export OLLAMA_HOST := env("OLLAMA_HOST", "") +export OLLAMA_GPU_ENABLED := env("OLLAMA_GPU_ENABLED", "") +export OLLAMA_GPU_TYPE := env("OLLAMA_GPU_TYPE", "nvidia") +export OLLAMA_GPU_COUNT := env("OLLAMA_GPU_COUNT", "1") +export OLLAMA_MODELS := env("OLLAMA_MODELS", "") +export OLLAMA_STORAGE_SIZE := env("OLLAMA_STORAGE_SIZE", "30Gi") + +[private] +default: + @just --list --unsorted --list-submodules + +# Add Helm repository +add-helm-repo: + helm repo add ollama https://otwld.github.io/ollama-helm + helm repo update ollama + +# Remove Helm repository +remove-helm-repo: + helm repo remove ollama + +# Create Ollama namespace +create-namespace: + #!/bin/bash + set -euo pipefail + if ! kubectl get namespace ${OLLAMA_NAMESPACE} &>/dev/null; then + kubectl create namespace ${OLLAMA_NAMESPACE} + fi + +# Delete Ollama namespace +delete-namespace: + kubectl delete namespace ${OLLAMA_NAMESPACE} --ignore-not-found + +# Install Ollama +install: + #!/bin/bash + set -euo pipefail + + just create-namespace + just add-helm-repo + + if [ -z "${OLLAMA_GPU_ENABLED}" ]; then + if gum confirm "Enable GPU support?"; then + OLLAMA_GPU_ENABLED="true" + else + OLLAMA_GPU_ENABLED="false" + fi + fi + + if [ -z "${OLLAMA_MODELS}" ]; then + OLLAMA_MODELS=$( + gum input --prompt="Models to pull (comma-separated): " --width=100 \ + --placeholder="e.g., llama3.2:1b,deepseek-r1:7b" \ + --value="llama3.2:1b" + ) + fi + + gomplate -f values.gomplate.yaml -o values.yaml + helm upgrade --install ollama ollama/ollama \ + --version ${OLLAMA_CHART_VERSION} -n ${OLLAMA_NAMESPACE} --wait \ + -f values.yaml + + echo "" + echo "Ollama installed successfully" + echo "GPU enabled: ${OLLAMA_GPU_ENABLED}" + echo "Models: ${OLLAMA_MODELS}" + +# Upgrade Ollama +upgrade: + #!/bin/bash + set -euo pipefail + + if [ -z "${OLLAMA_GPU_ENABLED}" ]; then + if gum confirm "Enable GPU support?"; then + OLLAMA_GPU_ENABLED="true" + else + OLLAMA_GPU_ENABLED="false" + fi + fi + + if [ -z "${OLLAMA_MODELS}" ]; then + OLLAMA_MODELS=$( + gum input --prompt="Models to pull (comma-separated): " --width=100 \ + --placeholder="e.g., llama3.2:1b,deepseek-r1:7b" \ + --value="llama3.2:1b" + ) + fi + + gomplate -f values.gomplate.yaml -o values.yaml + helm upgrade ollama ollama/ollama \ + --version ${OLLAMA_CHART_VERSION} -n ${OLLAMA_NAMESPACE} --wait \ + -f values.yaml + + echo "Ollama upgraded successfully" + +# Uninstall Ollama +uninstall: + #!/bin/bash + set -euo pipefail + helm uninstall ollama -n ${OLLAMA_NAMESPACE} --wait --ignore-not-found + just delete-namespace + echo "Ollama uninstalled" + +# Pull a model +pull model: + kubectl exec -it -n ${OLLAMA_NAMESPACE} deploy/ollama -- ollama pull {{ model }} + +# Run a model +run model: + kubectl exec -it -n ${OLLAMA_NAMESPACE} deploy/ollama -- ollama run {{ model }} + +# List models +list: + kubectl exec -it -n ${OLLAMA_NAMESPACE} deploy/ollama -- ollama list + +# Show Ollama logs +logs: + kubectl logs -n ${OLLAMA_NAMESPACE} deploy/ollama -f + +# Get pod status +status: + kubectl get pods -n ${OLLAMA_NAMESPACE} diff --git a/ollama/values.gomplate.yaml b/ollama/values.gomplate.yaml new file mode 100644 index 0000000..99e0ff8 --- /dev/null +++ b/ollama/values.gomplate.yaml @@ -0,0 +1,20 @@ +ollama: + gpu: + enabled: {{ if eq .Env.OLLAMA_GPU_ENABLED "true" }}true{{ else }}false{{ end }} + type: {{ .Env.OLLAMA_GPU_TYPE }} + number: {{ .Env.OLLAMA_GPU_COUNT }} + + models: + # https://ollama.com/search + pull: +{{- $models := .Env.OLLAMA_MODELS | strings.Split "," }} +{{- range $models }} +{{- $model := . | strings.TrimSpace }} +{{- if ne $model "" }} + - {{ $model }} +{{- end }} +{{- end }} + +persistentVolume: + enabled: true + size: {{ .Env.OLLAMA_STORAGE_SIZE }}