scale-catalog/incubator/mimir/0.2.0/ix_values.yaml
Julian Haseleu 5d1ab94342 cleanup
2024-07-14 21:54:53 +00:00

376 lines
7.8 KiB
YAML

image:
repository: tccr.io/truecharts/scratch
pullPolicy: IfNotPresent
tag: 18.0.1@sha256:49df5708d7fc085acf76c7868f10f25fa7ba947c8a7d3354a97207ba69c85cc3
service:
main:
enabled: false
ports:
main:
enabled: false
workload:
main:
enabled: false
podSpec:
containers:
main:
enabled: false
portal:
open:
enabled: false
configmap:
grafana-datasource:
enabled: "{{ if .Values.grafana.datasource.enabled }}true{{ else }}false{{ end }}"
labels:
grafana_datasources: "1"
data:
datasource.yaml: |-
apiVersion: 1
datasources:
- name: Mimir
type: prometheus
uid: {{ .Values.grafana.datasource.uid | default "prometheus" }}
url: http://{{ if ne .Release.Name "mimir" }}{{ .Release.Name }}-{{ end }}mimir-nginx.{{ .Release.Namespace }}/prometheus
access: proxy
isDefault: {{ .Values.grafana.datasource.default | default true }}
jsonData:
httpMethod: {{ .Values.grafana.datasource.httpMethod | default "POST" }}
timeInterval: {{ .Values.grafana.datasource.scrapeInterval | default "30s" }}
{{- if .Values.grafana.datasource.timeout }}
timeout: {{ .Values.grafana.datasource.timeout }}
{{- end }}
grafana:
datasource:
enabled: true
default: true
httpMethod: "POST"
scrapeInterval: "30s"
uid: "prometheus"
# -- Mimir chart values. Resources are set to a minimum by default.
mimir:
metaMonitoring:
# Dashboard configuration for deploying Grafana dashboards for Mimir
dashboards:
enabled: true
annotations: {}
serviceMonitor:
enabled: true
grafanaAgent:
enabled: true
installOperator: false
metrics:
remote:
url: http://{{ if ne .Release.Name "mimir" }}{{ .Release.Name }}-{{ end }}mimir-nginx.{{ .Release.Namespace }}/prometheus
headers: {}
scrapeK8s:
enabled: false
alertmanager:
persistentVolume:
enabled: true
replicas: 2
resources:
limits:
cpu: 1.4
memory: 1.4Gi
requests:
cpu: 20m
memory: 10Mi
statefulSet:
enabled: true
compactor:
persistentVolume:
size: 20Gi
resources:
limits:
cpu: 1.4
memory: 2.1Gi
requests:
cpu: 20m
memory: 10Mi
distributor:
replicas: 2
resources:
limits:
cpu: 3.5
memory: 5.7Gi
requests:
cpu: 20m
memory: 10Mi
ingester:
persistentVolume:
size: 50Gi
replicas: 3
resources:
limits:
cpu: 5
memory: 12Gi
requests:
cpu: 20m
memory: 10Mi
topologySpreadConstraints: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: target
operator: In
values:
- ingester
topologyKey: 'kubernetes.io/hostname'
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- ingester
topologyKey: 'kubernetes.io/hostname'
zoneAwareReplication:
topologyKey: 'kubernetes.io/hostname'
admin-cache:
enabled: true
replicas: 2
chunks-cache:
enabled: true
replicas: 2
resources:
requests:
cpu: 20m
memory: 10Mi
index-cache:
enabled: true
replicas: 3
metadata-cache:
enabled: true
results-cache:
enabled: true
minio:
enabled: true
overrides_exporter:
replicas: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 20m
memory: 10Mi
querier:
replicas: 1
resources:
limits:
cpu: 2.8
memory: 5.6Gi
requests:
cpu: 20m
memory: 10Mi
query_frontend:
replicas: 1
resources:
limits:
cpu: 2.8
memory: 2.8Gi
requests:
cpu: 20m
memory: 10Mi
ruler:
replicas: 1
resources:
limits:
cpu: 1.4
memory: 2.8Gi
requests:
cpu: 20m
memory: 10Mi
store_gateway:
persistentVolume:
size: 10Gi
replicas: 3
resources:
limits:
cpu: 1.4
memory: 2.1Gi
requests:
cpu: 20m
memory: 10Mi
topologySpreadConstraints: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: target
operator: In
values:
- store-gateway
topologyKey: 'kubernetes.io/hostname'
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- store-gateway
topologyKey: 'kubernetes.io/hostname'
zoneAwareReplication:
topologyKey: 'kubernetes.io/hostname'
nginx:
replicas: 1
resources:
limits:
cpu: 1.4
memory: 731Mi
requests:
cpu: 20m
memory: 10Mi
kps:
## Install Prometheus Operator CRDs
##
crds:
enabled: false
## Manages Prometheus and Alertmanager components
##
prometheusOperator:
enabled: false
####
##
## Everything down here, explicitly disables everything BUT the operator itself
##
####
##
global:
rbac:
create: true
## Create default rules for monitoring the cluster
##
defaultRules:
create: true
windowsMonitoring:
## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
enabled: false
## Configuration for prometheus-windows-exporter
## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
##
prometheus-windows-exporter:
## Enable ServiceMonitor and set Kubernetes label to use as a job label
##
prometheus:
monitor:
enabled: false
## Configuration for alertmanager
## ref: https://prometheus.io/docs/alerting/alertmanager/
##
alertmanager:
## Deploy alertmanager
##
enabled: false
## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
##
grafana:
enabled: false
forceDeployDashboards: true
defaultDashboardsEnabled: true
## Flag to disable all the kubernetes component scrapers
##
kubernetesServiceMonitors:
enabled: true
## Component scraping the kube api server
##
kubeApiServer:
enabled: true
## Component scraping the kubelet and kubelet-hosted cAdvisor
##
kubelet:
enabled: true
## Component scraping the kube controller manager
##
kubeControllerManager:
enabled: true
## Component scraping coreDns. Use either this or kubeDns
##
coreDns:
enabled: true
## Component scraping kubeDns. Use either this or coreDns
##
kubeDns:
enabled: false
## Component scraping etcd
##
kubeEtcd:
enabled: true
## Component scraping kube scheduler
##
kubeScheduler:
enabled: true
## Component scraping kube proxy
##
kubeProxy:
enabled: false
## Component scraping kube state metrics
##
kubeStateMetrics:
enabled: true
## dontDeploy node exporter as a daemonset to all nodes
##
nodeExporter:
enabled: true
## dont Deploy a Prometheus instance
##
prometheus:
enabled: false
## Configuration for thanosRuler
## ref: https://thanos.io/tip/components/rule.md/
##
thanosRuler:
## Dont Deploy thanosRuler
##
enabled: false