addiing longorn and loki

This commit is contained in:
baschno
2025-01-03 00:02:11 +01:00
parent fc3af3dee4
commit 85a5b94dd8
7 changed files with 2853 additions and 0 deletions

View File

@@ -0,0 +1,648 @@
# -- Overrides the chart's name
nameOverride: null
# -- Overrides the chart's computed fullname
fullnameOverride: null
global:
# -- Allow parent charts to override registry hostname
imageRegistry: ""
# -- Allow parent charts to override registry credentials
imagePullSecrets: []
daemonset:
# -- Deploys Promtail as a DaemonSet
enabled: true
autoscaling:
# -- Creates a VerticalPodAutoscaler for the daemonset
enabled: false
# Recommender responsible for generating recommendation for the object.
# List should be empty (then the default recommender will generate the recommendation)
# or contain exactly one recommender.
# recommenders:
# - name: custom-recommender-performance
# -- List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
# controlledValues: RequestsAndLimits
# -- Defines the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# -- Defines the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
# updatePolicy:
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
# minReplicas: 1
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
# updateMode: Auto
deployment:
# -- Deploys Promtail as a Deployment
enabled: false
replicaCount: 1
autoscaling:
# -- Creates a HorizontalPodAutoscaler for the deployment
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage:
# behavior: {}
# -- Set deployment object update strategy
strategy:
type: RollingUpdate
service:
enabled: false
# -- Labels for the service
labels: {}
# -- Annotations for the service
annotations: {}
secret:
# -- Labels for the Secret
labels: {}
# -- Annotations for the Secret
annotations: {}
configmap:
# -- If enabled, promtail config will be created as a ConfigMap instead of a secret
enabled: false
initContainer: []
# # -- Specifies whether the init container for setting inotify max user instances is to be enabled
# - name: init
# # -- Docker registry, image and tag for the init container image
# image: docker.io/busybox:1.33
# # -- Docker image pull policy for the init container image
# imagePullPolicy: IfNotPresent
# # -- The inotify max user instances to configure
# command:
# - sh
# - -c
# - sysctl -w fs.inotify.max_user_instances=128
# securityContext:
# privileged: true
image:
# -- The Docker registry
registry: docker.io
# -- Docker image repository
repository: grafana/promtail
# -- Overrides the image tag whose default is the chart's appVersion
tag: ""
# -- Docker image pull policy
pullPolicy: IfNotPresent
# -- Image pull secrets for Docker images
imagePullSecrets: []
# -- hostAliases to add
hostAliases: []
# - ip: 1.2.3.4
# hostnames:
# - domain.tld
# -- Controls whether the pod has the `hostNetwork` flag set.
hostNetwork: null
# -- Annotations for the DaemonSet
annotations: {}
# -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# -- The update strategy for the DaemonSet
updateStrategy: {}
# -- Pod labels
podLabels: {}
# -- Pod annotations
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "http-metrics"
# -- The name of the PriorityClass
priorityClassName: null
# -- Liveness probe
livenessProbe: {}
# -- Readiness probe
# @default -- See `values.yaml`
readinessProbe:
failureThreshold: 5
httpGet:
path: "{{ printf `%s/ready` .Values.httpPathPrefix }}"
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# -- Resource requests and limits
resources: {}
# limits:
# cpu: 200m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- The security context for pods
podSecurityContext:
runAsUser: 0
runAsGroup: 0
# -- The security context for containers
containerSecurityContext:
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
rbac:
# -- Specifies whether RBAC resources are to be created
create: true
# -- Specifies whether a PodSecurityPolicy is to be created
pspEnabled: false
# -- The name of the Namespace to deploy
# If not set, `.Release.Namespace` is used
namespace: null
serviceAccount:
# -- Specifies whether a ServiceAccount should be created
create: true
# -- The name of the ServiceAccount to use.
# If not set and `create` is true, a name is generated using the fullname template
name: null
# -- Image pull secrets for the service account
imagePullSecrets: []
# -- Annotations for the service account
annotations: {}
# -- Automatically mount a ServiceAccount's API credentials
automountServiceAccountToken: true
# -- Automatically mount API credentials for a particular Pod
automountServiceAccountToken: true
# -- Node selector for pods
nodeSelector: {}
# -- Affinity configuration for pods
affinity: {}
# -- Tolerations for pods. By default, pods will be scheduled on master/control-plane nodes.
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
# -- Default volumes that are mounted into pods. In most cases, these should not be changed.
# Use `extraVolumes`/`extraVolumeMounts` for additional custom volumes.
# @default -- See `values.yaml`
defaultVolumes:
- name: run
hostPath:
path: /run/promtail
- name: containers
hostPath:
path: /var/lib/docker/containers
- name: pods
hostPath:
path: /var/log/pods
# -- Default volume mounts. Corresponds to `volumes`.
# @default -- See `values.yaml`
defaultVolumeMounts:
- name: run
mountPath: /run/promtail
- name: containers
mountPath: /var/lib/docker/containers
readOnly: true
- name: pods
mountPath: /var/log/pods
readOnly: true
# Extra volumes to be added in addition to those specified under `defaultVolumes`.
extraVolumes: []
# Extra volume mounts together. Corresponds to `extraVolumes`.
extraVolumeMounts: []
# Extra args for the Promtail container.
extraArgs: []
# -- Example:
# -- extraArgs:
# -- - -client.external-labels=hostname=$(HOSTNAME)
# -- Extra environment variables. Set up tracing enviroment variables here if .Values.config.enableTracing is true.
# Tracing currently only support configure via environment variables. See:
# https://grafana.com/docs/loki/latest/clients/promtail/configuration/#tracing_config
# https://www.jaegertracing.io/docs/1.16/client-features/
extraEnv: []
# -- Extra environment variables from secrets or configmaps
extraEnvFrom: []
# -- Configure enableServiceLinks in pod
enableServiceLinks: true
# ServiceMonitor configuration
serviceMonitor:
# -- If enabled, ServiceMonitor resources for Prometheus Operator are created
enabled: false
# -- Alternative namespace for ServiceMonitor resources
namespace: null
# -- Namespace selector for ServiceMonitor resources
namespaceSelector: {}
# -- ServiceMonitor annotations
annotations: {}
# -- Additional ServiceMonitor labels
labels: {}
# -- ServiceMonitor scrape interval
interval: null
# -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s)
scrapeTimeout: null
# -- ServiceMonitor relabel configs to apply to samples before scraping
# https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
# (defines `relabel_configs`)
relabelings: []
# -- ServiceMonitor relabel configs to apply to samples as the last
# step before ingestion
# https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
# (defines `metric_relabel_configs`)
metricRelabelings: []
# -- ServiceMonitor will add labels from the service to the Prometheus metric
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec
targetLabels: []
# -- ServiceMonitor will use http by default, but you can pick https as well
scheme: http
# -- ServiceMonitor will use these tlsConfig settings to make the health check requests
tlsConfig: null
# -- Prometheus rules will be deployed for alerting purposes
prometheusRule:
enabled: false
additionalLabels: {}
# namespace:
rules: []
# - alert: PromtailRequestErrors
# expr: 100 * sum(rate(promtail_request_duration_seconds_count{status_code=~"5..|failed"}[1m])) by (namespace, job, route, instance) / sum(rate(promtail_request_duration_seconds_count[1m])) by (namespace, job, route, instance) > 10
# for: 5m
# labels:
# severity: critical
# annotations:
# description: |
# The {{ $labels.job }} {{ $labels.route }} is experiencing
# {{ printf \"%.2f\" $value }} errors.
# VALUE = {{ $value }}
# LABELS = {{ $labels }}
# summary: Promtail request errors (instance {{ $labels.instance }})
# - alert: PromtailRequestLatency
# expr: histogram_quantile(0.99, sum(rate(promtail_request_duration_seconds_bucket[5m])) by (le)) > 1
# for: 5m
# labels:
# severity: critical
# annotations:
# summary: Promtail request latency (instance {{ $labels.instance }})
# description: |
# The {{ $labels.job }} {{ $labels.route }} is experiencing
# {{ printf \"%.2f\" $value }}s 99th percentile latency.
# VALUE = {{ $value }}
# LABELS = {{ $labels }}
# Extra containers created as part of a Promtail Deployment resource
# - spec for Container:
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core
#
# Note that the key is used as the `name` field, i.e. below will create a
# container named `promtail-proxy`.
extraContainers: {}
# promtail-proxy:
# image: nginx
# ...
# -- Configure additional ports and services. For each configured port, a corresponding service is created.
# See values.yaml for details
extraPorts: {}
# syslog:
# name: tcp-syslog
# annotations: {}
# labels: {}
# containerPort: 1514
# protocol: TCP
# service:
# type: ClusterIP
# clusterIP: null
# port: 1514
# externalIPs: []
# nodePort: null
# loadBalancerIP: null
# loadBalancerSourceRanges: []
# externalTrafficPolicy: null
# ingress:
# # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# # ingressClassName: nginx
# # Values can be templated
# annotations: {}
# # kubernetes.io/ingress.class: nginx
# # kubernetes.io/tls-acme: "true"
# paths: "/"
# hosts:
# - chart-example.local
#
# tls: []
# # - secretName: chart-example-tls
# # hosts:
# # - chart-example.local
# -- PodSecurityPolicy configuration.
# @default -- See `values.yaml`
podSecurityPolicy:
privileged: true
allowPrivilegeEscalation: true
volumes:
- 'secret'
- 'hostPath'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
# -- Section for crafting Promtails config file. The only directly relevant value is `config.file`
# which is a templated string that references the other values and snippets below this key.
# @default -- See `values.yaml`
config:
# -- Enable Promtail config from Helm chart
# Set `configmap.enabled: true` and this to `false` to manage your own Promtail config
# See default config in `values.yaml`
enabled: true
# -- The log level of the Promtail server
# Must be reference in `config.file` to configure `server.log_level`
# See default config in `values.yaml`
logLevel: info
# -- The log format of the Promtail server
# Must be reference in `config.file` to configure `server.log_format`
# Valid formats: `logfmt, json`
# See default config in `values.yaml`
logFormat: logfmt
# -- The port of the Promtail server
# Must be reference in `config.file` to configure `server.http_listen_port`
# See default config in `values.yaml`
serverPort: 3101
# -- The config of clients of the Promtail server
# Must be reference in `config.file` to configure `clients`
# @default -- See `values.yaml`
clients:
- url: http://loki-loki-distributed-gateway.grafana-loki.svc.cluster.local/loki/api/v1/push
# -- Configures where Promtail will save it's positions file, to resume reading after restarts.
# Must be referenced in `config.file` to configure `positions`
positions:
filename: /run/promtail/positions.yaml
# -- The config to enable tracing
enableTracing: false
# -- A section of reusable snippets that can be reference in `config.file`.
# Custom snippets may be added in order to reduce redundancy.
# This is especially helpful when multiple `kubernetes_sd_configs` are use which usually have large parts in common.
# @default -- See `values.yaml`
snippets:
pipelineStages:
- cri: {}
common:
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node_name
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
replacement: $1
separator: /
source_labels:
- namespace
- app
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- action: replace
replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: replace
replacement: /var/log/pods/*$1/*.log
regex: true/(.*)
separator: /
source_labels:
- __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash
- __meta_kubernetes_pod_annotation_kubernetes_io_config_hash
- __meta_kubernetes_pod_container_name
target_label: __path__
# If set to true, adds an additional label for the scrape job.
# This helps debug the Promtail config.
addScrapeJobLabel: false
# -- You can put here any keys that will be directly added to the config file's 'limits_config' block.
# @default -- empty
extraLimitsConfig: ""
# -- You can put here any keys that will be directly added to the config file's 'server' block.
# @default -- empty
extraServerConfigs: ""
# -- You can put here any additional scrape configs you want to add to the config file.
# @default -- empty
extraScrapeConfigs: ""
# -- You can put here any additional relabel_configs to "kubernetes-pods" job
extraRelabelConfigs: []
scrapeConfigs: |
# See also https://github.com/grafana/loki/blob/master/production/ksonnet/promtail/scrape_config.libsonnet for reference
- job_name: kubernetes-pods
pipeline_stages:
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_controller_name
regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})?
action: replace
target_label: __tmp_controller_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
- __meta_kubernetes_pod_label_app
- __tmp_controller_name
- __meta_kubernetes_pod_name
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: app
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_instance
- __meta_kubernetes_pod_label_instance
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: instance
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
- __meta_kubernetes_pod_label_component
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: component
{{- if .Values.config.snippets.addScrapeJobLabel }}
- replacement: kubernetes-pods
target_label: scrape_job
{{- end }}
{{- toYaml .Values.config.snippets.common | nindent 4 }}
{{- with .Values.config.snippets.extraRelabelConfigs }}
{{- toYaml . | nindent 4 }}
{{- end }}
# -- Config file contents for Promtail.
# Must be configured as string.
# It is templated so it can be assembled from reusable snippets in order to avoid redundancy.
# @default -- See `values.yaml`
file: |
server:
log_level: {{ .Values.config.logLevel }}
log_format: {{ .Values.config.logFormat }}
http_listen_port: {{ .Values.config.serverPort }}
{{- with .Values.httpPathPrefix }}
http_path_prefix: {{ . }}
{{- end }}
{{- tpl .Values.config.snippets.extraServerConfigs . | nindent 2 }}
clients:
{{- tpl (toYaml .Values.config.clients) . | nindent 2 }}
positions:
{{- tpl (toYaml .Values.config.positions) . | nindent 2 }}
scrape_configs:
{{- tpl .Values.config.snippets.scrapeConfigs . | nindent 2 }}
{{- tpl .Values.config.snippets.extraScrapeConfigs . | nindent 2 }}
limits_config:
{{- tpl .Values.config.snippets.extraLimitsConfig . | nindent 2 }}
tracing:
enabled: {{ .Values.config.enableTracing }}
networkPolicy:
# -- Specifies whether Network Policies should be created
enabled: false
metrics:
# -- Specifies the Pods which are allowed to access the metrics port.
# As this is cross-namespace communication, you also neeed the namespaceSelector.
podSelector: {}
# -- Specifies the namespaces which are allowed to access the metrics port
namespaceSelector: {}
# -- Specifies specific network CIDRs which are allowed to access the metrics port.
# In case you use namespaceSelector, you also have to specify your kubelet networks here.
# The metrics ports are also used for probes.
cidrs: []
k8sApi:
# -- Specify the k8s API endpoint port
port: 8443
# -- Specifies specific network CIDRs you want to limit access to
cidrs: []
# -- Base path to server all API routes fro
httpPathPrefix: ""
sidecar:
configReloader:
enabled: false
image:
# -- The Docker registry for sidecar config-reloader
registry: ghcr.io
# -- Docker image repository for sidecar config-reloader
repository: jimmidyson/configmap-reload
# -- Docker image tag for sidecar config-reloader
tag: v0.12.0
# -- Docker image pull policy for sidecar config-reloader
pullPolicy: IfNotPresent
# Extra args for the config-reloader container.
extraArgs: []
# -- Extra environment variables for sidecar config-reloader
extraEnv: []
# -- Extra environment variables from secrets or configmaps for sidecar config-reloader
extraEnvFrom: []
# -- The security context for containers for sidecar config-reloader
containerSecurityContext:
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
# -- Readiness probe for sidecar config-reloader
readinessProbe: {}
# -- Liveness probe for sidecar config-reloader
livenessProbe: {}
# -- Resource requests and limits for sidecar config-reloader
resources: {}
# limits:
# cpu: 200m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
config:
# -- The port of the config-reloader server
serverPort: 9533
serviceMonitor:
enabled: true
# -- Extra K8s manifests to deploy
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"
# kind: ExternalSecret
# metadata:
# name: promtail-secrets
# spec:
# backendType: gcpSecretsManager
# data:
# - key: promtail-oauth2-creds
# name: client_secret