## Atlassian Confluence Data Center Helm values # # HEADS UP! # # Data loss will occur if sections declared as 'REQUIRED' are not configured appropriately! # These sections are: # - database # - volumes # # Additional details on pre-provisioning these required resources can be found here: # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#3-configure-database # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#5-configure-persistent-storage # # To manage external access to the Confluence instance, an ingress resource can also be configured # under the 'ingress' stanza. This requires a pre-provisioned ingress controller to be present. # # Additional details on pre-provisioning an ingress controller can be found here: # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#4-configure-ingress # ## # -- The initial number of Confluence pods that should be started at deployment time. # Note that Confluence requires manual configuration via the browser post deployment # after the first pod is deployed. This configuration must be completed before # scaling up additional pods. As such this value should always be kept as 1, # but can be altered once manual configuration is complete. # replicaCount: 1 # Image configuration # image: # -- The Confluence Docker image to use # https://hub.docker.com/r/atlassian/confluence-server # repository: atlassian/confluence # -- Image pull policy # pullPolicy: IfNotPresent # -- The docker image tag to be used - defaults to the Chart appVersion # tag: "" # K8s ServiceAccount configuration. Give fine-grained identity and authorization # to Pods # serviceAccount: # -- Set to 'true' if a ServiceAccount should be created, or 'false' if it # already exists. # create: true # -- The name of the ServiceAccount to be used by the pods. If not specified, but # the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name # will be auto-generated, otherwise the 'default' ServiceAccount will be used. # https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server # name: # -- For Docker images hosted in private registries, define the list of image pull # secrets that should be utilized by the created ServiceAccount # https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # imagePullSecrets: [] # - name: secretName # -- Annotations to add to the ServiceAccount (if created) # annotations: {} role: # -- Create a role for Hazelcast client with privileges to get and list pods and endpoints in the namespace. # Set to false if you need to create a Role and RoleBinding manually # create: true # Define permissions # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole # clusterRole: # -- Set to 'true' if a ClusterRole should be created, or 'false' if it # already exists. # create: true # -- The name of the ClusterRole to be used. If not specified, but # the "serviceAccount.clusterRole.create" flag is set to 'true', # then the ClusterRole name will be auto-generated. # name: confluence # -- Grant permissions defined in Role (list and get pods and endpoints) to a service account. # roleBinding: # Set to false if you need to create a Role and RoleBinding manually # create: true # Grant permissions defined in ClusterRole # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding # clusterRoleBinding: # -- Set to 'true' if a ClusterRoleBinding should be created, or 'false' if it # already exists. # create: true # -- The name of the ClusterRoleBinding to be created. If not specified, but # the "serviceAccount.clusterRoleBinding.create" flag is set to 'true', # then the ClusterRoleBinding name will be auto-generated. # name: confluence # Use EKS IRSA. See: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html # eksIrsa: # Defining a full arn of AWS role to assume will automatically annotate confluence, # e.g. `eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/S3Access`. # "-Daws.webIdentityTokenFile=/var/run/secrets/eks.amazonaws.com/serviceaccount/token" will be automatically added to Confluence JVM arguments roleArn: # REQUIRED - Database configuration # # Confluence requires a backend database. The configuration below can be used to define the # database to use and its connection details. # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#database-connectivity # database: # -- The database type that should be used. If not specified, then it will need to be # provided via the browser during manual configuration post deployment. Valid values # include: # - 'postgresql' # - 'mysql' # - 'oracle' # - 'mssql' # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype # type: postgresql # -- The jdbc URL of the database. If not specified, then it will need to be provided # via the browser during manual configuration post deployment. Example URLs include: # - 'jdbc:postgresql://:5432/' # - 'jdbc:mysql:///' # - 'jdbc:sqlserver://:1433;databaseName=' # - 'jdbc:oracle:thin:@:1521:' # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl # url: # JDBC connection credentials # credentials: # -- The name of the K8s Secret that contains the database login credentials. # If the secret is specified, then the credentials will be automatically utilised on # Confluence startup. If the secret is not provided, then the credentials will need to be # provided via the browser during manual configuration post deployment. # # Example of creating a database credentials K8s secret below: # 'kubectl create secret generic --from-literal=username= \ # --from-literal=password=' # https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets # secretName: credentials-db # -- The key ('username') in the Secret used to store the database login username # usernameSecretKey: username # -- The key ('password') in the Secret used to store the database login password # passwordSecretKey: password # REQUIRED - Volume configuration # # By default, the charts will configure the local-home, synchrony-home and shared-home as ephemeral # volumes i.e. 'emptyDir: {}'. This is fine for evaluation purposes but for production # deployments this is not ideal and so local-home, synchrony-home and shared-home should all be configured # appropriately. # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#volumes # volumes: # Each pod requires its own volume for 'local-home'. This is needed for key data # that help define how Confluence works. # https://confluence.atlassian.com/doc/confluence-home-and-other-important-directories-590259707.html # localHome: # Dynamic provisioning of local-home using the K8s Storage Classes # # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/LOCAL_STORAGE/ # persistentVolumeClaim: # -- If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically # created for each pod based on the 'StorageClassName' supplied below. # create: false # -- Specify the name of the 'StorageClass' that should be used for the local-home # volume claim. # storageClassName: # -- Specifies the standard K8s resource requests for the local-home # volume claims. # resources: requests: storage: 10Gi # -- Static provisioning of local-home using K8s PVs and PVCs # # NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for # pods is not recommended. Dynamic provisioning described above is the prescribed # approach. # # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define # a standard K8s volume that will be used for the local-home volume(s). If not defined, # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static # customVolume: {} # persistentVolumeClaim: # claimName: "" # -- Specifies the path in the Confluence container to which the local-home volume will be # mounted. # mountPath: "/var/atlassian/application-data/confluence" # A volume for 'shared-home' is required by Confluence to effectively operate in multi-node # environment # https://confluence.atlassian.com/doc/set-up-a-confluence-data-center-cluster-982322030.html#SetupaConfluenceDataCentercluster-Setupandconfigureyourcluster # sharedHome: # Dynamic provisioning of shared-home using the K8s Storage Class # # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic # persistentVolumeClaim: # -- If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically # created for shared-home based on the 'StorageClassName' supplied below. # create: false # -- Specify the name of the 'StorageClass' that should be used for the 'shared-home' # volume claim. # storageClassName: vsan-default-storage-policy # -- Specifies the standard K8s resource requests limits for the shared-home # volume claims. # resources: requests: storage: 10Gi # -- Static provisioning of shared-home using K8s PVs and PVCs # # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define # a standard K8s volume that will be used for the shared-home volume. If not defined, # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ # customVolume: {} # persistentVolumeClaim: # claimName: "" # -- Specifies the path in the Confluence container to which the shared-home volume will be # mounted. # mountPath: "/var/atlassian/application-data/shared-home" # -- Specifies the sub-directory of the shared-home volume that will be mounted in to the # Confluence container. # subPath: # Modify permissions on shared-home # nfsPermissionFixer: # -- If 'true', this will alter the shared-home volume's root directory so that Confluence # can write to it. This is a workaround for a K8s bug affecting NFS volumes: # https://github.com/kubernetes/examples/issues/260 # enabled: true # -- The path in the K8s initContainer where the shared-home volume will be mounted # mountPath: "/shared-home" # -- Image repository for the permission fixer init container. Defaults to alpine # imageRepo: alpine # -- Image tag for the permission fixer init container. Defaults to latest # imageTag: latest # -- By default, the fixer will change the group ownership of the volume's root directory # to match the Confluence container's GID (2002), and then ensures the directory is # group-writeable. If this is not the desired behaviour, command used can be specified # here. # command: # Each synchrony pod needs its own volume for 'synchrony-home'. The Synchrony process will write logs to that location # and any configuration files can be placed there. # synchronyHome: # Dynamic provisioning of synchrony-home using the K8s Storage Classes # # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/LOCAL_STORAGE/ # persistentVolumeClaim: # -- If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically # created for each pod based on the 'StorageClassName' supplied below. # create: true # -- Specify the name of the 'StorageClass' that should be used for the synchrony-home # volume claim. # storageClassName: vsan-default-storage-policy # -- Specifies the standard K8s resource requests for the synchrony-home # volume claims. # resources: requests: storage: 10Gi # -- Static provisioning of synchrony-home using K8s PVs and PVCs # # NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for # pods is not recommended. Dynamic provisioning described above is the prescribed # approach. # # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define # a standard K8s volume that will be used for the synchrony-home volume(s). If not defined, # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static # customVolume: {} # persistentVolumeClaim: # claimName: "" # -- Specifies the path in the Synchrony container to which the synchrony-home volume will be # mounted. # mountPath: "/var/atlassian/application-data/confluence" # -- Defines additional volumes that should be applied to all Confluence pods. # Note that this will not create any corresponding volume mounts; # those needs to be defined in confluence.additionalVolumeMounts # additional: [] # -- Defines additional volumes that should be applied to all Synchrony pods. # Note that this will not create any corresponding volume mounts; # those needs to be defined in synchrony.additionalVolumeMounts # additionalSynchrony: [] # -- Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511 # Typically overridden in volumes from Secrets and ConfigMaps to make mounted files executable # defaultPermissionsMode: 484 # Ingress configuration # # To make the Atlassian product available from outside the K8s cluster an Ingress # Controller should be pre-provisioned. With this in place the configuration below # can be used to configure an appropriate Ingress Resource. # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#ingress # ingress: # -- Set to 'true' if an Ingress Resource should be created. This depends on a # pre-provisioned Ingress Controller being available. # create: true # -- The class name used by the ingress controller if it's being used. # # Please follow documentation of your ingress controller. If the cluster # contains multiple ingress controllers, this setting allows you to control # which of them is used for Atlassian application traffic. # className: "contour" # -- Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' # controller. # https://kubernetes.github.io/ingress-nginx/ # # This will populate the Ingress Resource with annotations that are specific to # the K8s ingress-nginx controller. Set to 'false' if a different controller is # to be used, in which case the appropriate annotations for that controller must # be specified below under 'ingress.annotations'. # nginx: false # -- The max body size to allow. Requests exceeding this size will result # in an HTTP 413 error being returned to the client. # maxBodySize: 250m # -- Defines a timeout for establishing a connection with a proxied server. It should # be noted that this timeout cannot usually exceed 75 seconds. # proxyConnectTimeout: 60 # -- Defines a timeout for reading a response from the proxied server. The timeout is # set only between two successive read operations, not for the transmission of the # whole response. If the proxied server does not transmit anything within this time, # the connection is closed. # proxyReadTimeout: 60 # -- Sets a timeout for transmitting a request to the proxied server. The timeout is set # only between two successive write operations, not for the transmission of the whole # request. If the proxied server does not receive anything within this time, the # connection is closed. # proxySendTimeout: 60 # -- The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on # this hostname will be routed by the Ingress Resource to the appropriate backend # Service. # # Example 1 host: atlassian-test.saltlabs.cloud # Example 2 #host: confluence.apps.saltlabs.cloud # -- The base path for the Ingress Resource. For example '/confluence'. Based on a # 'ingress.host' value of 'company.k8s.com' this would result in a URL of # 'company.k8s.com/confluence'. Default value is 'confluence.service.contextPath' # Example 1 #path: "" # Example 2 path: "/confluence" # -- The custom annotations that should be applied to the Ingress Resource # when NOT using the K8s ingress-nginx controller. # annotations: cert-manager.io/issuer: ca-selfsigned-issuer # -- Set to 'true' if browser communication with the application should be TLS # (HTTPS) enforced. # https: false # -- The name of the K8s Secret that contains the TLS private key and corresponding # certificate. When utilised, TLS termination occurs at the ingress point where # traffic to the Service, and it's Pods is in plaintext. # # Usage is optional and depends on your use case. The Ingress Controller itself # can also be configured with a TLS secret for all Ingress Resources. # https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets # https://kubernetes.io/docs/concepts/services-networking/ingress/#tls # tlsSecretName: # Confluence configuration # confluence: # K8s Confluence Service configuration # service: # -- The port on which the Confluence K8s Service will listen # port: 80 # -- The type of K8s service to use for Confluence # type: ClusterIP # -- Use specific loadBalancerIP. Only applies to service type LoadBalancer. # loadBalancerIP: # -- The Tomcat context path that Confluence will use. The ATL_TOMCAT_CONTEXTPATH # will be set automatically. # contextPath: # -- Additional annotations to apply to the Service # annotations: {} # Standard K8s field that holds pod-level security attributes and common container settings. # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ # Do not populate when deploying to OpenShift, unless anyuid policy is attached to a service account. # # -- Whether to apply security context to pod. # securityContextEnabled: true securityContext: # -- The GID used by the Confluence docker image # GID will default to 2002 if not supplied and securityContextEnabled is set to true. # This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Confluence container. # However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 fsGroup: 2002 # -- Standard K8s field that holds security configurations that will be applied to a container. # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ # containerSecurityContext: {} # -- The umask used by the Confluence process when it creates new files. # The default is 0022. This gives the new files: # - read/write permissions for the Confluence user # - read permissions for everyone else. # umask: "0022" # -- Boolean to define whether to set local home directory permissions on startup # of Confluence container. Set to 'false' to disable this behaviour. # setPermissions: true # Port definitions # ports: # -- The port on which the Confluence container listens for HTTP traffic # http: 8090 # -- The port on which the Confluence container listens for Hazelcast traffic # hazelcast: 5701 # Confluence licensing details # license: # -- The name of the K8s Secret that contains the Confluence license key. If specified, then # the license will be automatically populated during Confluence setup. Otherwise, it will # need to be provided via the browser after initial startup. An Example of creating # a K8s secret for the license below: # 'kubectl create secret generic --from-literal=license-key= # https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets # secretName: # -- The key in the K8s Secret that contains the Confluence license key # secretKey: license-key # Confirm that Confluence is up and running with a ReadinessProbe # https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes # readinessProbe: # -- The initial delay (in seconds) for the Confluence container readiness probe, # after which the probe will start running. # initialDelaySeconds: 10 # -- How often (in seconds) the Confluence container readiness probe will run # periodSeconds: 5 # -- The number of consecutive failures of the Confluence container readiness probe # before the pod fails readiness checks. # failureThreshold: 6 # Confluence log configuration # accessLog: # -- Set to 'true' if access logging should be enabled. # enabled: true # -- The path within the Confluence container where the local-home volume should be # mounted in order to capture access logs. # mountPath: "/opt/atlassian/confluence/logs" # -- The subdirectory within the local-home volume where access logs should be # stored. # localHomeSubPath: "logs" # Data Center clustering # clustering: # -- Set to 'true' if Data Center clustering should be enabled # This will automatically configure cluster peer discovery between cluster nodes. # enabled: false # -- Set to 'true' if the K8s pod name should be used as the end-user-visible # name of the Data Center cluster node. # usePodNameAsClusterNodeName: true # Use AWS S3 to store attachments. From Confluence 8.1 onwards. # s3AttachmentsStorage: # Bucket name to store attachments. If a bucket name and region (see below) are defined, Confluence will automatically # use AWS S3 to store attachments. Only bucket name is required, not the full arn. # bucketName: # AWS region where the S3 bucket is located. # bucketRegion: # -- EXPERIMENTAL Feature! Override the default AWS API endpoint with a custom one, for example to use # Minio as object storage https://min.io/ # endpointOverride: # Confluence Pod resource requests # resources: # JVM Memory / Heap Size definitions. The values below are based on the # defaults defined for the Confluence docker container. # https://bitbucket.org/atlassian-docker/docker-atlassian-confluence-server/src/master/#markdown-header-memory-heap-size # jvm: # -- The maximum amount of heap memory that will be used by the Confluence JVM # maxHeap: "1g" # -- The minimum amount of heap memory that will be used by the Confluence JVM # minHeap: "1g" # -- The memory reserved for the Confluence JVM code cache # reservedCodeCache: "256m" # Specifies the standard K8s resource requests and/or limits for the Confluence # container. It is important that if the memory resources are specified here, # they must allow for the size of the Confluence JVM. That means the maximum heap # size, the reserved code cache size, plus other JVM overheads, must be # accommodated. Allowing for (maxHeap+codeCache)*1.5 would be an example. # container: requests: # -- Initial CPU request by Confluence pod. # cpu: "2" # -- Initial Memory request by Confluence pod # memory: "2G" # limits: # cpu: "2" # memory: "2G" shutdown: # -- The termination grace period for pods during shutdown. This # should be set to the Confluence internal grace period (default 20 # seconds), plus a small buffer to allow the JVM to fully terminate. # terminationGracePeriodSeconds: 25 # -- By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), # using a script supplied by the Docker image. If any other # shutdown behaviour is needed it can be achieved by overriding # this value. Note that the shutdown command needs to wait for the # application shutdown completely before exiting; see [the default # command](https://bitbucket.org/atlassian-docker/docker-atlassian-confluence-server/src/master/shutdown-wait.sh) # for details. # command: "/shutdown-wait.sh" # -- The Docker entrypoint.py generates application configuration on # first start; not all of these files are regenerated on subsequent starts. # By default, confluence.cfg.xml is generated only once. Set `forceConfigUpdate` to true # to change this behaviour. # forceConfigUpdate: false # -- Specifies a list of additional arguments that can be passed to the Confluence JVM, e.g. # system properties. # additionalJvmArgs: [] # -- Specifies a list of additional Java libraries that should be added to the # Confluence container. Each item in the list should specify the name of the volume # that contains the library, as well as the name of the library file within that # volume's root directory. Optionally, a subDirectory field can be included to # specify which directory in the volume contains the library file. Additional details: # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ # additionalLibraries: [] # - volumeName: # subDirectory: # fileName: # -- Specifies a list of additional Confluence plugins that should be added to the # Confluence container. Note plugins installed via this method will appear as # bundled plugins rather than user plugins. These should be specified in the same # manner as the 'additionalLibraries' property. Additional details: # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ # # NOTE: only .jar files can be loaded using this approach. OBR's can be extracted # (unzipped) to access the associated .jar # # An alternative to this method is to install the plugins via "Manage Apps" in the # product system administration UI. # additionalBundledPlugins: [] # - volumeName: # subDirectory: # fileName: # -- Defines any additional volumes mounts for the Confluence container. These # can refer to existing volumes, or new volumes can be defined via # 'volumes.additional'. # additionalVolumeMounts: [] # -- Defines any additional environment variables to be passed to the Confluence # container. See https://hub.docker.com/r/atlassian/confluence-server for # supported variables. # additionalEnvironmentVariables: [] # -- Defines any additional ports for the Confluence container. # additionalPorts: [] # - name: jmx # containerPort: 5555 # protocol: TCP # -- Defines additional volumeClaimTemplates that should be applied to the Confluence pod. # Note that this will not create any corresponding volume mounts; # those needs to be defined in confluence.additionalVolumeMounts # additionalVolumeClaimTemplates: [] # - name: myadditionalvolumeclaim # storageClassName: # resources: # requests: # storage: 1Gi # -- Defines topology spread constraints for Confluence pods. See details: # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # topologySpreadConstraints: [] # - maxSkew: 1 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: ScheduleAnyway # labelSelector: # matchLabels: # app.kubernetes.io/name: confluence # Debugging # jvmDebug: # -- Set to 'true' for remote debugging. Confluence JVM will be started with debugging # port 5005 open. enabled: false # Confluence Synchrony configuration # https://confluence.atlassian.com/doc/configuring-synchrony-858772125.html synchrony: # -- Set to 'true' if Synchrony (i.e. collaborative editing) should be enabled. # This will result in a separate StatefulSet and Service to be created for Synchrony. # If disabled, then collaborative editing will be disabled in Confluence. enabled: false # -- Number of Synchrony pods # replicaCount: 1 # -- Custom annotations that will be applied to all Synchrony pods. # When undefined, default to '.Values.podAnnotations' which are Confluence pod annotations (if defined) podAnnotations: {} # name: # K8s Synchrony Service configuration # service: # -- The port on which the Synchrony K8s Service will listen # port: 80 # -- The type of K8s service to use for Synchrony # type: ClusterIP # -- Use specific loadBalancerIP. Only applies to service type LoadBalancer. # loadBalancerIP: # -- Annotations to apply to Synchrony Service # annotations: {} securityContextEnabled: true securityContext: # -- The GID used by the Confluence docker image # GID will default to 2002 if not supplied and securityContextEnabled is set to true. # This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Confluence container. # However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 fsGroup: 2002 # -- Standard K8s field that holds security configurations that will be applied to a container. # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ # containerSecurityContext: {} # -- Boolean to define whether to set synchrony home directory permissions on startup # of Synchrony container. Set to 'false' to disable this behaviour. # setPermissions: true # Port definitions # ports: # -- The port on which the Synchrony container listens for HTTP traffic # http: 8091 # -- The port on which the Synchrony container listens for Hazelcast traffic # hazelcast: 5701 # Confirm that Synchrony is up and running with a ReadinessProbe # https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes # readinessProbe: # -- The healthcheck path to check against for the Synchrony container # useful when configuring behind a reverse-proxy or loadbalancer # https://confluence.atlassian.com/confkb/cannot-enable-collaborative-editing-on-synchrony-cluster-962962742.html # healthcheckPath: "/synchrony/heartbeat" # -- The initial delay (in seconds) for the Synchrony container readiness probe, # after which the probe will start running. # initialDelaySeconds: 5 # -- How often (in seconds) the Synchrony container readiness probe will run # periodSeconds: 1 # -- The number of consecutive failures of the Synchrony container readiness probe # before the pod fails readiness checks. # failureThreshold: 10 # Synchrony Pod resource requests # resources: # JVM Memory / Heap Size definitions. The values below are based on the # defaults defined for the Synchrony docker container. # jvm: # -- The maximum amount of heap memory that will be used by the Synchrony JVM # minHeap: "1g" # -- The minimum amount of heap memory that will be used by the Synchrony JVM # maxHeap: "2g" # -- The memory allocated for the Synchrony stack # stackSize: "2048k" # Specifies the standard K8s resource requests and/or limits for the Synchrony # container. It is important that if the memory resources are specified here, # they must allow for the size of the Synchrony JVM. That means the maximum heap # size, the reserved code cache size, plus other JVM overheads, must be # accommodated. Allowing for (maxHeap+codeCache)*1.5 would be an example. # container: requests: # -- Initial CPU request by Synchrony pod # cpu: "2" # -- Initial Memory request Synchrony pod # memory: "2.5G" # -- Specifies a list of additional arguments that can be passed to the Synchrony JVM, e.g. # system properties. # additionalJvmArgs: [] #- -Dsynchrony.example.system.property=46 shutdown: # -- The termination grace period for pods during shutdown. This # should be set to the Synchrony internal grace period (default 20 # seconds), plus a small buffer to allow the JVM to fully terminate. terminationGracePeriodSeconds: 25 # -- Specifies a list of additional Java libraries that should be added to the # Synchrony container. Each item in the list should specify the name of the volume # that contains the library, as well as the name of the library file within that # volume's root directory. Optionally, a subDirectory field can be included to # specify which directory in the volume contains the library file. Additional details: # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ # additionalLibraries: [] # - volumeName: # subDirectory: # fileName: # -- Defines any additional volumes mounts for the Synchrony container. These # can refer to existing volumes, or new volumes can be defined via # 'volumes.additionalSynchrony'. # additionalVolumeMounts: [] # -- Defines any additional ports for the Synchrony container. # additionalPorts: [] # - name: jmx # containerPort: 5555 # protocol: TCP # -- Defines topology spread constraints for Synchrony pods. See details: # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # topologySpreadConstraints: [] # - maxSkew: 1 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: ScheduleAnyway # labelSelector: # matchLabels: # app.kubernetes.io/name: confluence-synchrony # Fluentd configuration # # Confluence log collection and aggregation can be enabled using Fluentd. This config # assumes an existing ELK stack has been stood up and is available. # https://www.fluentd.org/ # fluentd: # -- Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod # enabled: false # -- The Fluentd sidecar image repository # imageRepo: harbor.saltlabs.cloud/docker.io/fluent/fluentd-kubernetes-daemonset # -- The Fluentd sidecar image tag # imageTag: v1.11.5-debian-elasticsearch7-1.2 # -- The command used to start Fluentd. If not supplied the default command # will be used: "fluentd -c /fluentd/etc/fluent.conf -v" # # Note: The custom command can be free-form, however pay particular attention to # the process that should ultimately be left running in the container. This process # should be invoked with 'exec' so that signals are appropriately propagated to it, # for instance SIGTERM. An example of how such a command may look is: # " && && exec " command: # -- Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) # should be used for Fluentd. If enabled this config must be supplied via the # 'fluentdCustomConfig' property below. # customConfigFile: false # -- Custom fluent.conf file # fluentdCustomConfig: {} # fluent.conf: | # # @type tail # # @type multiline # format_firstline /\d{4}-\d{1,2}-\d{1,2}/ # # path /opt/atlassian/confluence/logs/access_log.* # pos_file /tmp/confluencelog.pos # tag confluence-access-logs # # -- The port on which the Fluentd sidecar will listen # httpPort: 9880 # Elasticsearch config based on your ELK stack # elasticsearch: # -- Set to 'true' if Fluentd should send all log events to an Elasticsearch service. # enabled: true # -- The hostname of the Elasticsearch service that Fluentd should send logs to. # hostname: elasticsearch # -- The prefix of the Elasticsearch index name that will be used # indexNamePrefix: confluence # -- Specify custom volumes to be added to Fluentd container (e.g. more log sources) # extraVolumes: [] # - name: local-home # mountPath: /opt/atlassian/confluence/logs # subPath: log # readOnly: true # -- Custom annotations that will be applied to all Confluence pods # podAnnotations: {} # name: # -- Custom labels that will be applied to all Confluence pods # podLabels: {} # name: # -- Standard K8s node-selectors that will be applied to all Confluence pods # nodeSelector: {} # name: # -- Standard K8s tolerations that will be applied to all Confluence pods # tolerations: [] # - effect: # operator: # key: # -- Standard K8s affinities that will be applied to all Confluence pods # affinity: {} # name: # -- Standard K8s schedulerName that will be applied to all Confluence pods. # Check Kubernetes documentation on how to configure multiple schedulers: # https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods # schedulerName: # -- Additional container definitions that will be added to all Confluence pods # additionalContainers: [] # - name: # image: : # -- Additional initContainer definitions that will be added to all Confluence pods # additionalInitContainers: [] # - name: # image: : # -- Additional labels that should be applied to all resources # additionalLabels: {} # name: # -- Additional existing ConfigMaps and Secrets not managed by Helm that should be # mounted into service container. Configuration details below (camelCase is important!): # 'name' - References existing ConfigMap or secret name. # 'type' - 'configMap' or 'secret' # 'key' - The file name. # 'mountPath' - The destination directory in a container. # VolumeMount and Volumes are added with this name and index position, for example; # custom-config-0, keystore-2 # additionalFiles: [] # - name: custom-config # type: configMap # key: log4j.properties # mountPath: /var/atlassian # - name: custom-config # type: configMap # key: web.xml # mountPath: /var/atlassian # - name: keystore # type: secret # key: keystore.jks # mountPath: /var/ssl # -- Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file. # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ additionalHosts: [] # - ip: "127.0.0.1" # hostnames: # - "foo.local" # - "bar.local"