Skip to main content

Complete Configurations

Below you'll find all the available configurations for AHD Commons, with the relative default values.

# Default values for commons.
# This is a YAML-formatted file.

# -- ecrCredentials
# Automation to enable pulling AWS Elastic Container Registry private images
# @default -- See `values.yaml`
ecrCredentials:
# -- Install ECR Credentials
enabled: true
# -- Access Key ID of the AWS IAM User
accessKey: ""
# -- Secret Access Key ID of the AWS IAM User
secretKey: ""
# -- Kubernetes Secret Name for Docker Registry Credentials
ecrSecretName: aws-ecr-regcred
# @ignored AWS account ID for smeup LAB
awsAccount: "AWS_ACCOUNT_ID"
# @ignored AWS region for smeup LAB private images
awsRegion: "AWS_REGION"
# @ignored Pod Security Context
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000

# -- Telemetry
# @default -- See `values.yaml`
telemetry:
enabled: true
image:
repository: "AWS_ECR_REPOSITORY"
tag: "v1.1.0"
# -- Telemetry Agent config
# @default -- See `values.yaml`
cfg:
collector:
configuration:
enabled: true
scrape_interval_minutes: 180
system:
enabled: true
scrape_interval_minutes: 180
workz:
enabled: true
scrape_interval_minutes: 180
exporter:
http:
enabled: true
iam_sign: true
logger:
enabled: true
log:
encoding: json
level: info
stacktrace: true
middleware:
cache:
enabled: true
metadata:
enabled: true
# -- Telemetry Agent Environment Variables
envs: []

# -- CSI Driver SMB
# Required to enable samba mounts
# @default -- See `values.yaml`
csiDriverSmb:
# -- Enable CSI Driver SMB
enabled: false
# -- SMB protocol version
defaultSmbVersion: "2.0"
# -- Define custom shares (network mounts)
# NOTE: webup3 shares should not be defined here, but in the webup3 chart instead.
shares: []
# ALWAYS use different names for your shares!
# - name: share1
# device: 172.16.2.19/smeup
# user: user1
# password: pwd1
# smbVersion: "3.0" # optional, if not provided defaultSmbVersion will be used
# domain: domain1
# - name: share2
# device: 10.250.0.10/test
# user: user2
# password: pwd2
# domain: domain2

# @ignored csi driver smb chart overrides
csi-driver-smb:
feature:
enableInlineVolume: false

# -- Traefik TLS ingress
# @default -- See `values.yaml`
traefikConfig:
# -- Install Traefik chart
# Note: Only required if Traefik is not already installed
install: false
# -- Install Traefik middlewares and TLSOption
installMiddleware: true
# -- Install Traefik CRDs
installCRDs: true
# -- Traefik TLS configurations
tls:
# -- base64 of certificate
crt: ""
# -- base64 of key
key: ""

# @ignored Traefik Helm Chart configurations
traefik:
ingressClass:
enabled: true
isDefaultClass: true
name: traefik
ingressRoute:
dashboard:
enabled: false
# @ignored Traefik service configurations
service:
annotations: {}
# -- EXAMPLE AWS SPECIFIC ANNOTATIONS --
# -- IF YOU CHANGE THESE CONFIGURATIONS AFTER THE FIRST APPLY YOU WILL NEED TO DELETE THE LOADBALANCER SVC AND RECREATE IT --
# service.beta.kubernetes.io/aws-load-balancer-type: nlb
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: <Replace this with your certificate ARN>
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443"
# service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
# --------------------------------------

# -- Local path provisioner - enable only in k3s environments
localPathProvisioner:
# -- Enable retain to create the "local-path-retain" storageClass
# set to "false" in non-k3s environments
enableRetain: true

# -- Mandatory, AWS ElasticFileSystem (EFS) - enable only in EKS environments
# @default -- See `values.yaml`
efs:
# -- Enable EFS to create a custom storage class
enabled: false
# -- The name of the storage class
storageClassName: efs-sc
# -- (string) the filesystem ID
fileSystemId:

# -- Grafana Google OAuth Credentials
grafanaConfig:
client_id: ""
client_secret: ""

# -- Grafana configurations
# @default -- See `values.yaml`
grafana:
# -- Install the Grafana helm chart
enabled: false
# -- Username for Web Console login
adminUser: smeup
# -- Password for Web Console login **CHANGE IF EXPOSED**
adminPassword: smeup
# -- Grafana ingress configurations
# @default -- Traefik compatible configurations, see `values.yaml`
ingress:
enabled: true
hosts: []
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
path: /grafana
envFromSecret: grafana-oauth-credentials

# @ignored Grafana Pod Security Context
initChownData:
enabled: false
podSecurityContext:
runAsUser: 472
runAsGroup: 472
fsGroup: 472
fsGroupChangePolicy: "OnRootMismatch"
containerSecurityContext:
runAsUser: 472
runAsGroup: 472

# @ignored Grafana configurations
grafana.ini:
security:
allow_embedding: true
server:
root_url: http://localhost:3000/grafana
serve_from_sub_path: true

# -- Enable OAuth with Google as Identity Provider and assign roles based on email address
auth.google:
enabled: false
allow_sign_up: true
auto_login: false
client_id: $__env{CLIENT_ID}
client_secret: $__env{CLIENT_SECRET}
scopes: openid email profile
auth_url: https://accounts.google.com/o/oauth2/v2/auth
token_url: https://oauth2.googleapis.com/token
api_url: https://openidconnect.googleapis.com/v1/userinfo
# allowed_domains: smeup.com
# hosted_domain: smeup.com
use_pkce: true
# role_attribute_path: >
# contains(['user1@smeup.com', 'user2@smeup.com'], email) && 'Admin' ||
# contains(['user3@smeup.com', 'user4@smeup.com'], email) && 'Editor' || 'Viewer'
skip_org_role_sync: false
role_attribute_strict: true
users:
auto_assign_org_role: Viewer

# @ignored Enable grafana persistence, do not modify
persistence:
enabled: true
# @ignored Grafana default data sources, do not modify
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
url: http://{{ .Release.Name }}-prometheus-server:80
isDefault: true
- name: Loki
type: loki
uid: loki
url: http://{{ .Release.Name }}-loki:3100
- name: Jaeger
type: jaeger
uid: jaeger
url: http://{{ ternary .Release.Name (printf "%s-jaeger" .Release.Name) (contains "jaeger" .Release.Name) }}:16686/jaeger
jsonData:
nodeGraph:
enabled: true
tracesToLogsV2:
datasourceUid: loki
tags:
- key: service.name
value: service_name
- key: service.namespace
value: service_namespace
spanStartTimeShift: '-5m'
spanEndTimeShift: '5m'

# @ignored Grafana dashboards configuration
dashboards:
default:
nodeexporter:
gnetId: 1860
revision: 37
datasource: Prometheus

# @ignored Grafana dashboards configuration
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
disableDeletion: false
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
- name: 'custom-dashboards'
type: file
disableDeletion: false
allowUiUpdates: true
options:
path: /tmp/dashboards

# # @ignored Grafana dashboard sidecar configuration
sidecar:
skipTlsVerify: true
dashboards:
enabled: true
label: grafana-dashboard
labelValue: "1"
searchNamespace: ALL
watchMethod: WATCH
resource: configmap

# -- Loki configuration
# @default -- See `values.yaml`
loki:
# -- Install the Loki Helm Chart
enabled: false
# @ignored Loki Chart values, do not modify
deploymentMode: SingleBinary
singleBinary:
replicas: 1
gateway:
enabled: false
lokiCanary:
enabled: false
test:
enabled: false
loki:
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "3100"
prometheus.io/path: /metrics
ingester:
chunk_encoding: snappy
querier:
max_concurrent: 2
server:
log_level: warn
compactor:
delete_request_store: filesystem
working_directory: /var/loki/compactor
retention_enabled: true
retention_delete_delay: 2h
limits_config:
retention_period: 14d
# Alloy tails pod log files directly from the node, so the first sync can
# burst significantly while positions are still empty.
ingestion_rate_mb: 16
ingestion_burst_size_mb: 32
per_stream_rate_limit: 16MB
per_stream_rate_limit_burst: 32MB
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: 2024-04-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
storage:
type: 'filesystem'
auth_enabled: false
chunksCache:
enabled: false
resultsCache:
enabled: false
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0

# @ignored Alloy (log collector) configurations — replaces Promtail
# The River config is defined as a named template in templates/grafana/alloy-configmap.yaml
# and rendered here via tpl — edit that file to customise log collection.
alloy:
alloy:
configMap:
create: true
content: '{{ include "commons.alloyConfig" . }}'
extraEnv:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
mounts:
varlog: true
controller:
type: daemonset
# Grant Alloy access to Kubernetes pod/namespace metadata
rbac:
create: true
serviceAccount:
create: true

# -- Prometheus Configurations
# @default -- See 'values.yaml'
prometheus:
# -- Install Prometheus
enabled: false
# @ignored disable alertmanager
alertmanager:
enabled: false
# @ignored disable pushgateway
# may need to enable to monitor ECR token refresh jobs
prometheus-pushgateway:
enabled: false

server:
persistentVolume:
size: 10Gi
# retention period
retention: "15d"
## Prometheus' data retention size. Supported units: B, KB, MB, GB, TB, PB, EB.
retentionSize: "10GB"

# -- @ignored Prometheus exporter for MongoDB
prometheus-mongodb-exporter:
# -- Enable MongoDB exporter
enabled: false
existingSecret:
name: "mongodb-creds"
key: "uri"
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9216"
prometheus.io/path: /metrics

# -- @ignored Prometheus exporter for PostgreSQL
prometheus-postgres-exporter:
# -- Enable PostgreSQL exporter
enabled: false
config:
datasource:
host: postgres-svc
database: postgres
userSecret:
name: postgres-creds
key: username
passwordSecret:
name: postgres-creds
key: password
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
prometheus.io/path: /metrics

# -- ArgoCD Configurations
# @default -- See 'values.yaml'
argo-cd:
# -- Install ArgoCD
enabled: false
# @ignored Disable DEX Server
dex:
enabled: false
# @ignored Disable Notifications server
notifications:
enabled: false
# @ignored enable controller metrics
controller:
metrics:
enabled: true
# @ignored enable repoServer metrics
repoServer:
metrics:
enabled: true
# @igored enable applicationSet metrics
applicationSet:
metrics:
enabled: true
# @ignored enable redis metrics
redis:
metrics:
enabled: true
# -- ArgoCD Chart configs
# @default -- See 'values.yaml'
configs:
params:
server.rootpath: "/argocd"
server.insecure: true
repositories:
smartkit:
url: "https://repo.smeup.cloud/nexus/repository/smartkit"
name: "smartkit"
type: "helm"
# -- Create new argocd user
cm:
# accounts.<name>: <capabilities>
# There are 2 capabilities apiKey and login
accounts.readonly: login,apiKey
# -- RBAC policy configuration
rbac:
# Policy rules are in the form:
# p, subject, resource, action, object, effect
# Role definitions and bindings are in the form:
# g, subject, inherited-subject
policy.csv: ''
# E.g. define a readonly account
# policy.csv: |
# p, role:readonly, applications, get, */*, allow
# p, role:readonly, repositories, get, *, allow
# p, role:readonly, logs, get, */*, allow
# p, role:readonly, exec, get, */*, allow
# p, role:readonly, projects, get, *, allow
# p, role:readonly, clusters, get, *, allow
# g, ReadOnly, role:readonly
# -- Provide configurations to connect argocd with one or more external cluster
clusterCredentials: {}
# my-cluster-1:
# server: https://<ip>:6443
# labels: {}
# annotations: {}
# config:
# bearerToken: "<bearer token service account>"
# tlsClientConfig:
# insecure: false
# caData: "<base64 encoded certificate>"
# -- ArgoCD Server configs
# @default -- See 'values.yaml'
server:
metrics:
enabled: true
ingress:
enabled: true
annotations:
# default annotations for HTTPS ingress
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
# comment the above annotations and uncomment the one below for HTTP ingress
# traefik.ingress.kubernetes.io/router.entrypoints: web
path: /argo-cd # Dummy method to avoid conflicts with the extraRules path
pathType: Prefix
extraRules:
- http:
paths:
- path: /argocd
pathType: Prefix
backend:
service:
name: '{{ include "argo-cd.server.fullname" . }}'
port:
name: '{{ .Values.server.service.servicePortHttpName }}'

# -- Jaeger configuration
# @default -- See `values.yaml`
jaeger:
# -- Install Jaeger
enabled: false
# -- OpenTelemetry Instrumentation CR configuration
instrumentation:
sampler:
# -- Sampler type: always_on, always_off, parentbased_traceidratio, traceidratio
type: always_on
# -- Sampler argument (e.g. ratio for traceidratio: "0.25")
argument: ""

# @ignored Jaeger OTEL configuration
userconfig:
service:
extensions: [jaeger_storage, jaeger_query, healthcheckv2]
pipelines:
traces:
receivers: [otlp, jaeger]
processors: [batch]
exporters: [jaeger_storage_exporter]
telemetry:
resource:
service.name: jaeger
metrics:
level: detailed
readers:
- pull:
exporter:
prometheus:
host: 0.0.0.0
port: 8888

extensions:
healthcheckv2:
use_v2: true
http:
endpoint: 0.0.0.0:13133
jaeger_query:
storage:
traces: primary
base_path: /jaeger
jaeger_storage:
backends:
primary:
memory:
max_traces: 30000

receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
thrift_compact:
endpoint: 0.0.0.0:6831
thrift_binary:
endpoint: 0.0.0.0:6832

processors:
batch:
timeout: 5s
send_batch_size: 1024

exporters:
jaeger_storage_exporter:
trace_storage: primary

# @ignored Jaeger service annotations for Prometheus scraping
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8888"
prometheus.io/path: /metrics

resources:
limits:
memory: 2Gi
requests:
cpu: 500m
memory: 1Gi
podSecurityContext: 1000

# @ignored OpenTelemetry Operator chart
opentelemetry-operator:
manager:
collectorImage:
repository: "otel/opentelemetry-collector-k8s"
admissionWebhooks:
# Use Helm auto-generated self-signed certs
certManager:
enabled: false
autoGenerateCert:
enabled: true
recreate: true

# -- Create a service account for remote management by external ArgoCD instance
# Note: this is outside of `argo-cd` field
createArgoCDServiceAccount: true

# -- Cluster Autoscaler
# At the moment only AWS is supported
# @default -- See `values.yaml`
autoscaler:
# -- Enable Cluster Autoscaler, set to `true` only if you have an EKS cluster
enabled: false
# -- Mandatory, must match the EKS cluster name
clusterName: ""
# -- Mandatory, must match the version provided by the release here: https://github.com/kubernetes/autoscaler/releases
imageTag: ""
# -- Mandatory, AWS Region where the EKS cluster is running
awsRegion: ""
# -- Mandatory, AWS IAM User Access Key ID that has the required permissions to manage the EKS cluster
accessKey: ""
# -- Mandatory, AWS IAM User Secret Access Key that has the required permissions to manage the EKS cluster
secretKey: ""
serviceAccount:
name: cluster-autoscaler

# -- Keycloak Identity Provider
# @default -- See `values.yaml`
keycloak:
# -- Enables Keycloak IDP
enabled: false
# -- Keycloak startup mode
startupMode: start
# -- Keycloak service name
serviceName: keycloak
# -- The number of Keycloak replicas (set >1 and cache.mode=ispn for clustered setup)
replicas: 1
# -- Keycloak Docker image configuration
image:
## -- Keycloak Docker image repository
repository: keycloak/keycloak
## -- Keycloak Docker image tag
tag: 26.6.1
# -- Keycloak Pod resource requests and limits
resources:
limits:
cpu: 2000m
memory: 2000Mi
requests:
cpu: 500m
memory: 1700Mi
# -- Pod-level security context for Keycloak StatefulSet
podSecurityContext:
runAsNonRoot: true
fsGroup: 1000
# -- Container-level security context for Keycloak
containerSecurityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
cfg:
# -- Initial admin user credentials
# WARNING: must be changed from defaults before deploying
admin:
bootstrapUsr: admin
bootstrapPwd: admin
# -- Keycloak server configurations
network:
# -- Public hostname used by Keycloak for redirects and frontend endpoints.
# -- If empty, the chart falls back to keycloak.ingress.hostname when set.
hostname: ""
# -- Enables Keycloak to trust X-Forwarded-* headers from reverse proxy
proxyHeadersMode: xforwarded
# -- Enable HTTP listener on port 8080 (TLS terminated at ingress/proxy)
httpEnabled: false
# -- Use a strict hostname for Keycloak
strictHostname: false
# -- Enable health probes
healthProbesEnabled: true
# -- Keycloak caching configuration
cache:
# -- 'local' for single instance setups (default), 'ispn' for clustered multi-node setups
mode: local
# -- Keycloak extra configurations
customEnv: []
# -- Keycloak theme configuration
theme:
# -- Enable custom Keycloak themes
volume:
# -- The size of the persistent volume for themes
size: 1Gi
# -- The storage class name for the persistent volume for themes
storageClassName: ""

# -- Liviness, readiness and startup probes configuration can be provided here
# -- If not provided, default probes will be used
livenessProbe: {}
readinessProbe: {}
startupProbe: {}

# -- Database StatefulSet configurations
db:
# -- Enable the PostgreSQL database StatefulSet for Keycloak
enabled: true
# -- Name of the StatefulSet and related resources
name: kc-postgres
# -- Number of database replicas
replicas: 1
# -- Image repository for the PostgreSQL database
image:
repository: mirror.gcr.io/postgres
tag: 17
# -- Database schema/name
schema: keycloak
# -- Database host (must match db.name when using the built-in StatefulSet)
host: kc-postgres
# -- Database username
# WARNING: must be changed from defaults before deploying
username: keycloak
# -- Database password
# WARNING: must be changed from defaults before deploying
password: keycloak
# -- Controls which SQL statements are logged. Values: "none", "ddl", "mod", "all"
logStatement: all
# -- Database port
databasePort: 5432
# -- Pod resource requests and limits for the database
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 250m
memory: 512Mi
# -- Pod-level security context for the database StatefulSet
podSecurityContext:
runAsNonRoot: true
runAsUser: 999
runAsGroup: 999
fsGroup: 999
# -- Container-level security context for the database
containerSecurityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
# -- Database Volume configurations
volume:
# -- Mount path inside the PostgreSQL container
mountPath: /var/lib/postgresql/data
# -- Data directory name override inside the mountPath
dataDirNameOverride: "pgdata"
# -- Access mode for the persistent volume claim
accessMode: ReadWriteOnce
# -- Size of the persistent volume claim
size: 10Gi
# -- Storage class name for the persistent volume claim, leave empty for default
storageClassName: ""

ingress:
# -- Enable Keycloak Ingress
enabled: false
className: ""
annotations: {}
hostname: ""
applicationContext: ""
# -- TLS configuration for the Ingress
# Example:
# tls:
# - secretName: keycloak-tls
# hosts:
# - keycloak.example.com
tls: []