shoebill-test/charts/postgresql-postgresql-server/values.yaml
Nikolai Rodionov 012aaadacc chore(release): Add a new release: metrics-server
A new release is added to the cluster:
		  Name: metrics-server 
		  Namespace: kube-system
		  Version: 3.11.0
		  Chart: metrics-server/metrics-server
2024-09-09 09:41:17 +02:00

1781 lines
82 KiB
YAML

# Copyright Broadcom, Inc. All Rights Reserved.
# SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Please, note that this will override the parameters, including dependencies, configured to use the global value
##
global:
## @param global.imageRegistry Global Docker image registry
##
imageRegistry: ""
## @param global.imagePullSecrets Global Docker registry secret names as an array
## e.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
##
defaultStorageClass: ""
storageClass: ""
postgresql:
## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`)
## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`)
## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`).
## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
##
auth:
postgresPassword: ""
username: ""
password: ""
database: ""
existingSecret: ""
secretKeys:
adminPasswordKey: ""
userPasswordKey: ""
replicationPasswordKey: ""
## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
##
service:
ports:
postgresql: ""
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
##
## @param kubeVersion Override Kubernetes version
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname template
##
fullnameOverride: ""
## @param clusterDomain Kubernetes Cluster Domain
##
clusterDomain: cluster.local
## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template)
##
extraDeploy: []
## @param commonLabels Add labels to all the deployed resources
##
commonLabels: {}
## @param commonAnnotations Add annotations to all the deployed resources
##
commonAnnotations: {}
## Enable diagnostic mode in the statefulset
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the statefulset
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the statefulset
##
args:
- infinity
## @section PostgreSQL common parameters
##
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry
## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository
## @skip image.tag PostgreSQL image tag (immutable tags are recommended)
## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param image.pullPolicy PostgreSQL image pull policy
## @param image.pullSecrets Specify image pull secrets
## @param image.debug Specify if debug values should be set
##
image:
registry: docker.io
repository: bitnami/postgresql
tag: 16.4.0-debian-12-r7
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Set to true if you would like to see extra information on logs
##
debug: false
## Authentication parameters
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run
##
auth:
## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
##
enablePostgresUser: true
## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided
##
postgresPassword: ""
## @param auth.username Name for a custom user to create
##
username: ""
## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided
##
password: ""
## @param auth.database Name for a custom database to create
##
database: ""
## @param auth.replicationUsername Name of the replication user
##
replicationUsername: repl_user
## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided
##
replicationPassword: ""
## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case.
##
existingSecret: ""
## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
##
secretKeys:
adminPasswordKey: postgres-password
userPasswordKey: password
replicationPasswordKey: replication-password
## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable
##
usePasswordFiles: false
## @param architecture PostgreSQL architecture (`standalone` or `replication`)
##
architecture: standalone
## Replication configuration
## Ignored if `architecture` is `standalone`
##
replication:
## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`
## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`.
## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT
##
synchronousCommit: "off"
numSynchronousReplicas: 0
## @param replication.applicationName Cluster application name. Useful for advanced replication settings
##
applicationName: my_application
## @param containerPorts.postgresql PostgreSQL container port
##
containerPorts:
postgresql: 5432
## Audit settings
## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing
## @param audit.logHostname Log client hostnames
## @param audit.logConnections Add client log-in operations to the log file
## @param audit.logDisconnections Add client log-outs operations to the log file
## @param audit.pgAuditLog Add operations to log using the pgAudit extension
## @param audit.pgAuditLogCatalog Log catalog using pgAudit
## @param audit.clientMinMessages Message log level to share with the user
## @param audit.logLinePrefix Template for log line prefix (default if not set)
## @param audit.logTimezone Timezone for the log timestamps
##
audit:
logHostname: false
logConnections: false
logDisconnections: false
pgAuditLog: ""
pgAuditLogCatalog: "off"
clientMinMessages: error
logLinePrefix: ""
logTimezone: ""
## LDAP configuration
## @param ldap.enabled Enable LDAP support
## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead
## @param ldap.server IP address or name of the LDAP server.
## @param ldap.port Port number on the LDAP server to connect to
## @param ldap.prefix String to prepend to the user name when forming the DN to bind
## @param ldap.suffix String to append to the user name when forming the DN to bind
## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead
## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead
## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead
## @param ldap.basedn Root DN to begin the search for the user in
## @param ldap.binddn DN of user to bind to LDAP
## @param ldap.bindpw Password for the user to bind to LDAP
## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead
## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead
## @param ldap.searchAttribute Attribute to match against the user name in the search
## @param ldap.searchFilter The search filter to use when doing search+bind authentication
## @param ldap.scheme Set to `ldaps` to use LDAPS
## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead
## @param ldap.tls.enabled Se to true to enable TLS encryption
##
ldap:
enabled: false
server: ""
port: ""
prefix: ""
suffix: ""
basedn: ""
binddn: ""
bindpw: ""
searchAttribute: ""
searchFilter: ""
scheme: ""
tls:
enabled: false
## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored.
## Ref: https://www.postgresql.org/docs/current/auth-ldap.html
##
uri: ""
## @param postgresqlDataDir PostgreSQL data dir folder
##
postgresqlDataDir: /bitnami/postgresql/data
## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list)
##
postgresqlSharedPreloadLibraries: "pgaudit"
## Start PostgreSQL pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M`
## ref: https://github.com/docker-library/postgres/issues/416
## ref: https://github.com/containerd/containerd/issues/3654
##
shmVolume:
## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s)
##
enabled: true
## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs
## Note: the size of the tmpfs counts against container's memory limit
## e.g:
## sizeLimit: 1Gi
##
sizeLimit: ""
## TLS configuration
##
tls:
## @param tls.enabled Enable TLS traffic support
##
enabled: false
## @param tls.autoGenerated Generate automatically self-signed TLS certificates
##
autoGenerated: false
## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's
##
preferServerCiphers: true
## @param tls.certificatesSecret Name of an existing secret that contains the certificates
##
certificatesSecret: ""
## @param tls.certFilename Certificate filename
##
certFilename: ""
## @param tls.certKeyFilename Certificate key filename
##
certKeyFilename: ""
## @param tls.certCAFilename CA Certificate filename
## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
##
certCAFilename: ""
## @param tls.crlFilename File containing a Certificate Revocation List
##
crlFilename: ""
## @section PostgreSQL Primary parameters
##
primary:
## @param primary.name Name of the primary database (eg primary, master, leader, ...)
##
name: primary
## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
configuration: ""
## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration
## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
## e.g:#
## pgHbaConfiguration: |-
## local all all trust
## host all all localhost trust
## host mydatabase mysuser 192.168.0.0/24 md5
##
pgHbaConfiguration: ""
## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration
## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored
##
existingConfigmap: ""
## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
extendedConfiguration: ""
## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration
## NOTE: `primary.extendedConfiguration` will be ignored
##
existingExtendedConfigmap: ""
## Initdb configuration
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments
##
initdb:
## @param primary.initdb.args PostgreSQL initdb extra arguments
##
args: ""
## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log
##
postgresqlWalDir: ""
## @param primary.initdb.scripts Dictionary of initdb scripts
## Specify dictionary of scripts to be run at first boot
## e.g:
## scripts:
## my_init_script.sh: |
## #!/bin/sh
## echo "Do something."
##
scripts: {}
## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot
## NOTE: This will override `primary.initdb.scripts`
##
scriptsConfigMap: ""
## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap`
##
scriptsSecret: ""
## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts
##
user: ""
## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts
##
password: ""
## Pre-init configuration
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start
preInitDb:
## @param primary.preInitDb.scripts Dictionary of pre-init scripts
## Specify dictionary of shell scripts to be run before db boot
## e.g:
## scripts:
## my_pre_init_script.sh: |
## #!/bin/sh
## echo "Do something."
scripts: {}
## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run
## NOTE: This will override `primary.preInitDb.scripts`
scriptsConfigMap: ""
## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run
## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap`
scriptsSecret: ""
## Configure current cluster's primary server to be the standby server in other cluster.
## This will allow cross cluster replication and provide cross cluster high availability.
## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled.
## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not
## @param primary.standby.primaryHost The Host of replication primary in the other cluster
## @param primary.standby.primaryPort The Port of replication primary in the other cluster
##
standby:
enabled: false
primaryHost: ""
primaryPort: ""
## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes
##
extraEnvVarsCM: ""
## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes
##
extraEnvVarsSecret: ""
## @param primary.command Override default container command (useful when using custom images)
##
command: []
## @param primary.args Override default container args (useful when using custom images)
##
args: []
## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers
## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers
## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers
## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
## @param primary.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param primary.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup
##
lifecycleHooks: {}
## PostgreSQL Primary resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param primary.podSecurityContext.enabled Enable security context
## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param primary.podSecurityContext.fsGroup Group ID for the pod
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param primary.containerSecurityContext.enabled Enabled containers' Security Context
## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param primary.containerSecurityContext.privileged Set container's Security Context privileged
## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param primary.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param primary.hostAliases PostgreSQL primary pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary)
##
hostNetwork: false
## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
##
hostIPC: false
## @param primary.labels Map of labels to add to the statefulset (postgresql primary)
##
labels: {}
## @param primary.annotations Annotations for PostgreSQL primary pods
##
annotations: {}
## @param primary.podLabels Map of labels to add to the pods (postgresql primary)
##
podLabels: {}
## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary)
##
podAnnotations: {}
## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## PostgreSQL Primary node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param primary.affinity Affinity for PostgreSQL primary pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary)
##
priorityClassName: ""
## @param primary.schedulerName Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
terminationGracePeriodSeconds: ""
## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type
## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s)
##
extraVolumeMounts: []
## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s)
##
extraVolumes: []
## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s)
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s)
## Example
##
## initContainers:
## - name: do-something
## image: busybox
## command: ['do', 'something']
##
initContainers: []
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation
## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s)
##
extraPodSpec: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param primary.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## PostgreSQL Primary service configuration
##
service:
## @param primary.service.type Kubernetes Service type
##
type: ClusterIP
## @param primary.service.ports.postgresql PostgreSQL service port
##
ports:
postgresql: 5432
## Node ports to expose
## NOTE: choose port between <30000-32767>
## @param primary.service.nodePorts.postgresql Node port for PostgreSQL
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePorts:
postgresql: ""
## @param primary.service.clusterIP Static clusterIP or None for headless services
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param primary.service.annotations Annotations for PostgreSQL primary service
##
annotations: {}
## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
##
loadBalancerClass: ""
## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
## Set the LoadBalancer service type to internal only
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param primary.service.externalTrafficPolicy Enable client source IP preservation
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service
##
extraPorts: []
## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Headless service properties
##
headless:
## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service
##
annotations: {}
## PostgreSQL Primary persistence configuration
##
persistence:
## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
##
enabled: true
## @param primary.persistence.volumeName Name to assign the volume
##
volumeName: "data"
## @param primary.persistence.existingClaim Name of an existing PVC to use
##
existingClaim: ""
## @param primary.persistence.mountPath The path the volume will be mounted at
## Note: useful when using custom PostgreSQL images
##
mountPath: /bitnami/postgresql
## @param primary.persistence.subPath The subdirectory of the volume to mount to
## Useful in dev environments and one PV for multiple services
##
subPath: ""
## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume
##
accessModes:
- ReadWriteOnce
## @param primary.persistence.size PVC Storage Request for PostgreSQL volume
##
size: 8Gi
## @param primary.persistence.annotations Annotations for the PVC
##
annotations: {}
## @param primary.persistence.labels Labels for the PVC
##
labels: {}
## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
## selector:
## matchLabels:
## app: my-app
##
selector: {}
## @param primary.persistence.dataSource Custom PVC data source
##
dataSource: {}
## PostgreSQL Primary Persistent Volume Claim Retention Policy
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
##
persistentVolumeClaimRetentionPolicy:
## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset
##
enabled: false
## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
##
whenScaled: Retain
## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
##
whenDeleted: Retain
## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
##
readReplicas:
## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...)
##
name: read
## @param readReplicas.replicaCount Number of PostgreSQL read only replicas
##
replicaCount: 1
## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration)
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
extendedConfiguration: ""
## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes
##
extraEnvVarsCM: ""
## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes
##
extraEnvVarsSecret: ""
## @param readReplicas.command Override default container command (useful when using custom images)
##
command: []
## @param readReplicas.args Override default container args (useful when using custom images)
##
args: []
## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers
## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers
## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers
## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe
## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe
## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup
##
lifecycleHooks: {}
## PostgreSQL read only resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param readReplicas.podSecurityContext.enabled Enable security context
## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context
## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged
## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param readReplicas.hostAliases PostgreSQL read only pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only)
##
hostNetwork: false
## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
##
hostIPC: false
## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only)
##
labels: {}
## @param readReplicas.annotations Annotations for PostgreSQL read only pods
##
annotations: {}
## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only)
##
podLabels: {}
## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only)
##
podAnnotations: {}
## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## PostgreSQL read only node affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only)
##
priorityClassName: ""
## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
terminationGracePeriodSeconds: ""
## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type
## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
rollingUpdate: {}
## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s)
##
extraVolumeMounts: []
## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s)
##
extraVolumes: []
## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s)
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s)
## Example
##
## initContainers:
## - name: do-something
## image: busybox
## command: ['do', 'something']
##
initContainers: []
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation
## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s)
##
extraPodSpec: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## PostgreSQL read only service configuration
##
service:
## @param readReplicas.service.type Kubernetes Service type
##
type: ClusterIP
## @param readReplicas.service.ports.postgresql PostgreSQL service port
##
ports:
postgresql: 5432
## Node ports to expose
## NOTE: choose port between <30000-32767>
## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePorts:
postgresql: ""
## @param readReplicas.service.clusterIP Static clusterIP or None for headless services
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param readReplicas.service.annotations Annotations for PostgreSQL read only service
##
annotations: {}
## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
##
loadBalancerClass: ""
## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
## Set the LoadBalancer service type to internal only
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service
##
extraPorts: []
## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Headless service properties
##
headless:
## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service
##
annotations: {}
## PostgreSQL read only persistence configuration
##
persistence:
## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC
##
enabled: true
## @param readReplicas.persistence.existingClaim Name of an existing PVC to use
##
existingClaim: ""
## @param readReplicas.persistence.mountPath The path the volume will be mounted at
## Note: useful when using custom PostgreSQL images
##
mountPath: /bitnami/postgresql
## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to
## Useful in dev environments and one PV for multiple services
##
subPath: ""
## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume
##
accessModes:
- ReadWriteOnce
## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume
##
size: 8Gi
## @param readReplicas.persistence.annotations Annotations for the PVC
##
annotations: {}
## @param readReplicas.persistence.labels Labels for the PVC
##
labels: {}
## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
## selector:
## matchLabels:
## app: my-app
##
selector: {}
## @param readReplicas.persistence.dataSource Custom PVC data source
##
dataSource: {}
## PostgreSQL Read only Persistent Volume Claim Retention Policy
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
##
persistentVolumeClaimRetentionPolicy:
## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset
##
enabled: false
## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
##
whenScaled: Retain
## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
##
whenDeleted: Retain
## @section Backup parameters
## This section implements a trivial logical dump cronjob of the database.
## This only comes with the consistency guarantees of the dump program.
## This is not a snapshot based roll forward/backward recovery backup.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/
backup:
## @param backup.enabled Enable the logical dump of the database "regularly"
enabled: false
cronjob:
## @param backup.cronjob.schedule Set the cronjob parameter schedule
schedule: "@daily"
## @param backup.cronjob.timeZone Set the cronjob parameter timeZone
timeZone: ""
## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy
concurrencyPolicy: Allow
## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit
failedJobsHistoryLimit: 1
## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit
successfulJobsHistoryLimit: 3
## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds
startingDeadlineSeconds: ""
## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished
ttlSecondsAfterFinished: ""
## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy
restartPolicy: OnFailure
## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup
## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## backup container's Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context
## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged
## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param backup.cronjob.command Set backup container's command to run
command:
- /bin/sh
- -c
- "pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump"
## @param backup.cronjob.labels Set the cronjob labels
labels: {}
## @param backup.cronjob.annotations Set the cronjob annotations
annotations: {}
## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/
##
nodeSelector: {}
## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## backup cronjob container resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory
## Example:
resources: {}
## resources:
## requests:
## cpu: 1
## memory: 512Mi
## limits:
## cpu: 2
## memory: 1024Mi
networkPolicy:
## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
storage:
## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume
##
enabled: true
## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`)
## If defined, PVC must be created manually before volume will be bound
##
existingClaim: ""
## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted
##
resourcePolicy: ""
## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
storageClass: ""
## @param backup.cronjob.storage.accessModes PV Access Mode
##
accessModes:
- ReadWriteOnce
## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume
##
size: 8Gi
## @param backup.cronjob.storage.annotations PVC annotations
##
annotations: {}
## @param backup.cronjob.storage.mountPath Path to mount the volume at
##
mountPath: /backup/pgdump
## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at
## and one PV for multiple services.
##
subPath: ""
## Fine tuning for volumeClaimTemplates
##
volumeClaimTemplates:
## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes)
## A label query over volumes to consider for binding (e.g. when using local volumes)
## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details
##
selector: {}
## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container
##
extraVolumeMounts: []
## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container
##
extraVolumes: []
## @section Volume Permissions parameters
##
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
##
enabled: false
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
##
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r29
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Init container resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Init container' Security Context
## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
## and not the below volumePermissions.containerSecurityContext.runAsUser
## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container
## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container
## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container
##
containerSecurityContext:
seLinuxOptions: {}
runAsUser: 0
runAsGroup: 0
runAsNonRoot: false
seccompProfile:
type: RuntimeDefault
## @section Other Parameters
##
## @param serviceBindings.enabled Create secret for service binding (Experimental)
## Ref: https://servicebinding.io/service-provider/
##
serviceBindings:
enabled: false
## Service account for PostgreSQL to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod
##
create: true
## @param serviceAccount.name The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the common.names.fullname template
##
name: ""
## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
## Can be set to false if pods using this serviceAccount do not need to use K8s API
##
automountServiceAccountToken: false
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
##
annotations: {}
## Creates role for ServiceAccount
## @param rbac.create Create Role and RoleBinding (required for PSP to work)
##
rbac:
create: false
## @param rbac.rules Custom RBAC rules to set
## e.g:
## rules:
## - apiGroups:
## - ""
## resources:
## - pods
## verbs:
## - get
## - list
##
rules: []
## Pod Security Policy
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
##
psp:
create: false
## @section Metrics Parameters
##
metrics:
## @param metrics.enabled Start a prometheus exporter
##
enabled: false
## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry
## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository
## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended)
## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy
## @param metrics.image.pullSecrets Specify image pull secrets
##
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.15.0-debian-12-r41
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param metrics.collectors Control enabled collectors
## ref: https://github.com/prometheus-community/postgres_exporter#flags
## Example:
## collectors:
## wal: false
collectors: {}
## @param metrics.customMetrics Define additional custom metrics
## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated
## customMetrics:
## pg_database:
## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
## metrics:
## - name:
## usage: "LABEL"
## description: "Name of the database"
## - size_bytes:
## usage: "GAUGE"
## description: "Size of the database in bytes"
##
customMetrics: {}
## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter
## see: https://github.com/prometheus-community/postgres_exporter#environment-variables
## For example:
## extraEnvVars:
## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS
## value: "true"
##
extraEnvVars: []
## PostgreSQL Prometheus exporter containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context
## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged
## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers
## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers
## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers
## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port
##
containerPorts:
metrics: 9187
## PostgreSQL Prometheus exporter resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "nano"
## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## Service configuration
##
service:
## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port
##
ports:
metrics: 9187
## @param metrics.service.clusterIP Static clusterIP or None for headless services
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
clusterIP: ""
## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
##
sessionAffinity: None
## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
##
namespace: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
##
labels: {}
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
##
selector: {}
## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
##
relabelings: []
## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
##
metricRelabelings: []
## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
##
honorLabels: false
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
##
prometheusRule:
## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
##
enabled: false
## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
##
namespace: ""
## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
##
labels: {}
## @param metrics.prometheusRule.rules PrometheusRule definitions
## Make sure to constraint the rules to the current postgresql service.
## rules:
## - alert: HugeReplicationLag
## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1
## for: 1m
## labels:
## severity: critical
## annotations:
## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
##
rules: []