Configuration Settings
The configuration is validated against a schema definition. You can validate your custom configuration with helm lint
.
Available services
A list of all services and their default state based on the enabled flag.
Global parameters
Common parameters
Ibexa parameters
Iframely parameters
Solr parameters
See more parameters…
Tika parameters
See more parameters…
SMTP parameters
See more parameters…
RabbitMQ parameters
See more parameters…
Redis Cluster (sessions) parameters
See more parameters…
Redis Cluster (persistent cache) parameters
See more parameters…
Varnish parameters
Monitoring parameters
Grafana and Prometheus are subcharts of the monitoring chart.
Default configuration of the values.yaml
---
global:
default_host: &default_host null
routes:
admin: []
frontend: []
# - *default_host
# - "example.com"
ibexa:
user_hash: "ecaea5a638cb64ce41e9266e550963228d0bb58ed86ca7278f1b3e135c155669"
imageCredentials: []
# Reference to one or more secrets to be used when pulling images
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
release:
registry: $CI_RELEASE_REGISTRY
repository: $CI_RELEASE_REPOSITORY
tag: $CI_CONTAINER_RELEASE_TAG
nameOverride: ""
fullnameOverride: ""
debug: false
# Amount of minium replicas
# replicas: 1
ports:
http: 8080
https: 8443
# ssh: 2222
# dev-server: 4300
# This will set the environment var foo with the value bar
env: []
# - name: "foo"
# value: "bar"
kubernetes:
optimize: false
image:
registry: $CI_RELEASE_REGISTRY
repository: $CI_RELEASE_REPOSITORY/ibexa-$IBEXA_PACKAGE
tag: $CI_CONTAINER_RELEASE_TAG
pullPolicy: IfNotPresent
imageCredentials:
registry: "registry.gitlab.com"
username: "read-token"
password: "3gzMqr9wywNyVrXCqFy1"
serviceAccount:
create: false
name: null
installation:
package: ibexa-$IBEXA_PACKAGE
script: |-
echo "No custom install instructions"
timezone: "Europe/Berlin"
admin:
hpa:
enabled: false
maxReplicas: 5
minReplicas: 2
replicas: 1
resources:
requests:
cpu: 500m
memory: 500Mi
limits:
cpu: 1500m
memory: 3000Mi
nodeSelector: {}
tolerations: []
affinity: {}
hpa:
enabled: false
maxReplicas: 10
minReplicas: 2
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
resources:
requests:
cpu: 500m
memory: 500Mi
limits:
cpu: 1500m
memory: 3000Mi
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
php:
# @TODO per default an automatic settings based on max mem
memory_limit: "2048M"
error_reporting: "E_ALL & ~E_NOTICE & ~E_USER_WARNING & ~E_USER_NOTICE & ~E_DEPRECATED & ~E_USER_DEPRECATED"
symfony:
env: prod
debug: false
trusted_proxies: 127.0.0.1,192.168.0.0/16,172.16.0.0/12,10.0.0.0/8
runtime:
name: null
# name: "Runtime\Swoole\Runtime"
options: null
session:
# handler_id: 'app.session.handler.native_rediscluster'
handler_id: null
ingress:
enabled: true
# whitelist:
# - "180.5.61.153"
# - "192.168.1.0/24"
# - "10.0.0.0/8"
RateLimitConnections: false
RateLimitConnectionsRateHttp: "100"
tls: true
annotations:
kubernetes.io/tls-acme: "true"
haproxy.router.openshift.io/timeout: "60s"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
storage:
enabled: true
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
class: null
existingClaim: false
mount: "/opt/app-root/src/public/var"
size: 5Gi
accessMode: ReadWriteMany
annotations:
helm.sh/resource-policy: "keep"
# Sample set of annotations if you want to destroy on uninstall
# annotations: {}
cron:
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
cpu: 1000m
memory: 2000Mi
#crons:
# - name: check-urls
# command: ibexa:check-urls
# schedule: 0 0 * * 0
# - name: ibexa-cron-run
# command: ibexa:cron:run
# schedule: * * * * *
# - name: check-urls
# type: bash
# command: bin/console ibexa:check-urls
# schedule: 5 4 1-7,15-21 * 6
# Optionally use the default cert manager.
certManager:
enabled: false
issuer: false
# email: "mail@example.com"
# route53:
# id: "AAAAAAAAAAAAAAAAAAAAAA"
# secret: "AA/BBB"
# region: "eu-central-1"
tika:
enabled: false
image:
tag: "1.28"
vendor:
imageCredentials:
registry: "registry.gitlab.com"
username: "read-token"
password: "3gzMqr9wywNyVrXCqFy1"
redischeck:
enabled: true
image:
registry: $CI_RELEASE_REGISTRY
repository: $CI_RELEASE_REPOSITORY/redischeck
tag: $CI_CONTAINER_RELEASE_TAG
pullPolicy: IfNotPresent
pullSecrets:
- vendor-chart-registry
# If you enable the session service, make sure that Ibexa has the following service definition:
# app.session.handler.native_rediscluster:
# class: Ibexa\Bundle\Core\Session\Handler\NativeSessionHandler
# arguments:
# - '%session.save_path%'
# - rediscluster
sessionservice:
enabled: false
podSecurityContext:
enabled: false
containerSecurityContext:
enabled: false
# @TODO
# sysctls:
# - name: net.core.somaxconn
# value: "10000"
# @TODO PVC autodelete not possible yet
persistence:
enabled: false
annotations: {}
resources:
requests:
memory: 100Mi
limits:
memory: 350Mi
usePassword: false
redis:
startupProbe:
enabled: true
initialDelaySeconds: 120
useAOFPersistence: "no"
configmap: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly no
# Disable RDB persistence, AOF persistence already enabled.
save ""
maxmemory-policy volatile-lru
maxmemory 300M
io-threads 2
io-threads-do-reads yes
repl-diskless-sync yes
cluster:
nodes: 3
replicas: 0
varnish:
enabled: true
fastly:
enabled: false
key: null
service_id: null
purge_server: "https://api.fastly.com"
# See https://github.com/bitnami/charts/blob/solr/8.10.0/bitnami/solr/README.md for parameters
solr:
enabled: true
image:
registry: $CI_RELEASE_REGISTRY
repository: $CI_RELEASE_REPOSITORY/solr
tag: $CI_CONTAINER_RELEASE_TAG
pullSecrets:
- vendor-chart-registry
auth:
adminUsername: ibexa
adminPassword: ibexa
coreNames: []
collection: "ibexa"
collectionReplicas: 1
extraEnvVars:
- name: SOLR_LOG_LEVEL
value: WARN
podSecurityContext:
enabled: true
# fsGroup should match that of the 'hcloud-csi' storage
fsGroup: 1001
containerSecurityContext:
enabled: false
startupProbe:
enabled: true
javaMem: "-Xms2g -Xmx3g"
replicaCount: 1
debug: false
metrics:
enabled: false
# @TODO Remove with solr 9
configFile: "/opt/bitnami/solr/contrib/prometheus-exporter/conf/solr-exporter-config.xml"
podSecurityContext:
enabled: false
containerSecurityContext:
enabled: false
podAnnotations:
prometheus.io/scrape: 'true'
zookeeper:
enabled: true
startupProbe:
enabled: true
podSecurityContext:
enabled: true
# fsGroup should match that of the 'hcloud-csi' storage
fsGroup: 1001
containerSecurityContext:
enabled: false
# See https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#permission-errors-when-enabling-persistence
volumePermissions:
enabled: true
replicaCount: 1
resources:
requests:
cpu: 50m
memory: 800Mi
iframely:
enabled: false
smtp:
enabled: true
domain: mail.example.com
persistence:
enabled: false
# Configure sending email through an other mailserver
relay: {}
# ip: 192.168.0.246
# port: 558
cache:
enabled: true
auth:
enabled: false
master:
podSecurityContext:
enabled: false
# sysctls:
# - name: net.core.somaxconn
# value: "10000"
containerSecurityContext:
enabled: false
startupProbe:
enabled: true
persistence:
enabled: false
annotations: {}
resources:
requests:
memory: 100Mi
limits:
memory: 1Gi
disableCommands: []
commonConfiguration: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly no
# Disable RDB persistence, AOF persistence already enabled.
save ""
maxmemory-policy volatile-lru
maxmemory 800M
io-threads 8
io-threads-do-reads yes
repl-diskless-sync yes
replica:
replicaCount: 0
rabbitmq:
enabled: true
auth:
username: ibexa
password: TnAV7NP0HeKYXc0F
erlangCookie: TnAV7NP0HeKYXc0F
persistence:
enabled: false
podSecurityContext:
enabled: false
containerSecurityContext: {}
serviceAccount:
create: false
rbac:
create: false
messagequeue:
enabled: true
command: ['/scripts/messagequeue-start.sh']
args: []
mysql:
enabled: true
serviceAccount:
create: false
auth:
rootPassword: ibexa
username: ibexa
password: ibexa
database: ibexa
image:
registry: docker.io
repository: mysql
tag: "8.0.41"
# @TODO OracleOS crash loops for unkown reasons
# tag: "8-oracle"
primary:
podAnnotations:
# Refers to volumeMount name
# This annotation may be ignored depending on the Velero backup configuration (such as namespace filter).
backup.velero.io/backup-volumes: data
podSecurityContext:
enabled: false
containerSecurityContext:
enabled: false
startupProbe:
initialDelaySeconds: 60
failureThreshold: 20
args:
- "--defaults-file=/opt/bitnami/mysql/conf/my.cnf"
configuration: |-
[mysqld]
skip-name-resolve
performance_schema=ON
character_set_server = utf8mb4
collation_server = utf8mb4_unicode_ci
max_connections = 200
secure-file-priv = ""
innodb_lock_wait_timeout=120
socket=/var/run/mysqld/mysqld.sock
extraVolumeMounts:
- name: data
mountPath: /var/lib/mysql
# @TODO move to subchart monitoring. Doesn`t work with import-values
metrics:
enabled: false
monitoring:
enabled: false