-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathsub_schema_values.yaml.j2
355 lines (299 loc) · 14.5 KB
/
sub_schema_values.yaml.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
{#
Copyright 2024 New Vector Ltd
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
#}
{% macro generatedFileWarning() %}
# This file is generated. Do not edit directly. Edit source/values.yaml.j2 instead to make changes
{%- endmacro %}
{% macro ess() %}
## Common configuration that impacts all components in the chart
## The matrix-tools image, used in multiple components
matrixTools:
{{ image(registry="ghcr.io", repository="element-hq/ess-helm/matrix-tools", tag="0.3.0") | indent(2) }}
## CertManager Issuer to configure by default automatically on all ingresses
## If configured, the chart will automatically generate the tlsSecret name for all ingresses
certManager: {}
## Choose one of clusterIssuer or issuer
# clusterIssuer:
# issuer:
## The server name of the Matrix Stack. This gets embedded in user IDs & room IDs
## It can not change after the initial deployment.
# serverName: ess.localhost
{{ labels(global=true) }}
{{ ingress(global=true) }}
## A list of Secrets in this namespace to use as pull Secrets.
## Ignored if a given component specifies its own pull Secrets.
## e.g.
## imagePullSecrets:
## - name: ess-pull-secret
imagePullSecrets: []
{{ tolerations(global=true) }}
{{ topologySpreadConstraints(global=true) }}
{%- endmacro %}
{% macro containersSecurityContext(key='containersSecurityContext') %}
## A subset of SecurityContext. ContainersSecurityContext holds pod-level security attributes and common container settings
{{ key }}:
## Controls whether a process can gain more privileges than its parent process.
## This bool directly controls whether the no_new_privs flag gets set on the container process.
## allowPrivilegeEscalation is always true when the container is run as privileged, or has CAP_SYS_ADMIN
allowPrivilegeEscalation: false
## Give a process some privileges, but not all the privileges of the root user.
capabilities:
## Privileges to add.
# add: []
## Privileges to drop.
drop:
- ALL
## Mounts the container's root filesystem as read-only.
readOnlyRootFilesystem: true
## To set the Seccomp profile for a Container, include the seccompProfile field in the securityContext section of your Pod or Container manifest.
## The seccompProfile field is a SeccompProfile object consisting of type and localhostProfile. Valid options for type include RuntimeDefault, Unconfined, and Localhost.
## localhostProfile must only be set set if type Localhost. It indicates the path of the pre-configured profile on the node, relative to the kubelet's configured Seccomp profile location (configured with the --root-dir flag).
# seccompProfile:
# type: RuntimeDefault
{%- endmacro %}
{% macro credential(credential_description, key, initIfAbsent=False, commented=False) %}
## {{ credential_description }}.
{%- if initIfAbsent %}
## This secret is optional, and will be generated by the `initSecrets` job
## if it is empty.
{%- endif %}
## It can either be provided inline in the Helm chart e.g.:
## {{ key }}:
## value: SecretValue
##
## Or it can be provided via an existing Secret e.g.:
## {{ key }}:
## secret: existing-secret
## secretKey: key-in-secret
{% if commented %}# {% endif %}{{ key }}: {}
{%- endmacro %}
{% macro extraEnv(key='extraEnv') %}
## Defines additional environment variables to be injected onto this workload
## e.g.
## {{ key }}:
## - name: FOO
## value: "bar"
{{ key }}: []
{%- endmacro %}
{% macro hostAliases(key="hostAliases") %}
## The list of hosts aliases to configure on the pod spec.
## It should be avoid as much as possible to use this feature.
## Please prefer using an DNS entry to resolve your hostnames.
## This can be used as a workaround when entries cannot be resolved using DNS, for example for our automated testings.
## e.g.
## {{ key }}:
## - ip: 192.0.2.1 # An IP resolution to add to /etc/hosts
## # A list of hostnames to be associated with the above IP
## hostnames:
## - ess.localhost
## - synapse.ess.localhost
{{ key }}: []
{%- endmacro %}
{% macro image(registry, repository, tag='', key='image') %}
# Details of the image to be used
{{ key }}:
## The host and (optional) port of the container image registry for this component.
## If not specified Docker Hub is implied
registry: {{ registry }}
## The path in the registry where the container image is located
repository: {{ repository }}
## The tag of the container image to use.
## Defaults to the Chart's appVersion if not set
{%- if tag | length > 0 %}
tag: "{{ tag }}"
{%- else %}
# tag:
{%- endif %}
## Container digest to use. Used to pull the image instead of the image tag / Chart appVersion if set
# digest:
## Whether the image should be pulled on container startup. Valid values are Always, IfNotPresent and Never
## If this isn't provided it defaults to Always when using the image tag / Chart appVersion or
## IfNotPresent if using a digest
# pullPolicy:
## A list of pull secrets to use for this image
## e.g.
## pullSecrets:
## - name: dockerhub
pullSecrets: []
{%- endmacro %}
{% macro ingress(global=false, key='ingress') %}
{%- if global %}
## How all ingresses should be constructed by default, unless overridden
{%- else %}
## How this ingress should be constructed
{%- endif %}
{{ key }}:
{%- if not global %}
## What hostname should be used for this Ingress
# host:
{% endif %}
## Annotations to be added to {{ 'all Ingresses. Will be merged with component specific Ingress annotations' if global else 'this Ingress' }}
annotations: {}
## What Ingress Class Name that should be used for {{ 'all Ingresses by default' if global else 'this Ingress' }}
# className:
## The name of the Secret containing the TLS certificate and the key that should be used for {{ 'all Ingresses by default' if global else 'this Ingress' }}
# tlsSecret:
## How the {{ 'Services' if global else 'Service' }} behind {{ 'all Ingresses' if global else 'this Ingress' }} is constructed{{ ' by default' if global else '' }}
{%- if global %}
service:
type: ClusterIP
{%- else %}
service: {}
{%- endif %}
{%- endmacro %}
{% macro labels(global=false, key='labels') %}
## Labels to add to all manifest {{ 'for all components in this chart' if global else 'for this component' }}
labels: {}
{%- endmacro %}
{% macro nodeSelector(key='nodeSelector') %}
## NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
# {{ key }}: {}
{%- endmacro %}
{% macro persistentVolumeClaim(key) %}
## Configures the PersistentVolumeClaim to be used for storage
{{ key }}:
## Name of an existing PersistentVolumeClaim in this namespace that should be used
# existingClaim:
## The size of a PersistentVolumeClaim to be constructed
## Ignored if existingClaim is provided
size: 10Gi
## The StorageClass to be used by the constructed PersistentVolumeClaim.
## Will use the cluster default if not provided
## Ignored if existingClaim is provided
# storageClass:
{%- endmacro %}
{% macro podSecurityContext(user_id, group_id, filesystem_group_id=-1, key='podSecurityContext') %}
## A subset of PodSecurityContext. PodSecurityContext holds pod-level security attributes and common container settings
{{ key }}:
## A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to
## change the ownership of that volume to be owned by the pod:
##
## 1. The owning GID will be the FSGroup
## 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)## 3. The permission bits are OR'd with rw-rw----
##
## If unset, the Kubelet will not modify the ownership and permissions of any volume.
fsGroup: {{ group_id if (filesystem_group_id == -1) else filesystem_group_id }}
## fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod.
## This field will only apply to volume types which support fsGroup based ownership(and permissions).
## It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
# fsGroupChangePolicy:
## The GID to run the entrypoint of the container process. Uses runtime default if unset.
runAsGroup: {{ group_id }}
## Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed.
runAsNonRoot: true
## The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified.
runAsUser: {{ user_id }}
## SELinuxOptions are the labels to be applied to all the pod containers
# seLinuxOptions:
## Level is SELinux level label that applies to the container.
# level:
## Role is a SELinux role label that applies to the container.
# role:
## Type is a SELinux type label that applies to the container.
# type:
## User is a SELinux user label that applies to the container.
# user:
## "To set the Seccomp profile for a Container, include the seccompProfile field in the securityContext section of your Pod or Container manifest.
## The seccompProfile field is a SeccompProfile object consisting of type and localhostProfile.
## Valid options for type include RuntimeDefault, Unconfined, and Localhost. localhostProfile must only be set set if type Localhost.
## It indicates the path of the pre-configured profile on the node, relative to the kubelet's configured Seccomp profile location (configured with the --root-dir flag).
seccompProfile:
# localhostProfile:
type: RuntimeDefault
## A list of groups applied to the first process run in each container, in addition to the container's primary GID.
## If unspecified, no groups will be added to any container.
supplementalGroups: []
{%- endmacro %}
{% macro postgresLibPQ(key="postgres") %}
## Details of the Postgres Database to use
{{ key }}: {}
## PostgreSQL database host
# host:
## PostgreSQL port
# port: 5432
## PostgreSQL username
# user:
## PostgreSQL database name
# database:
## TLS settings to use for the PostgreSQL connection
# sslMode: prefer
{{ (credential("PostgreSQL password", "password", commented=True) | indent(2)) }}
{%- endmacro %}
{% macro resources(requests_memory, requests_cpu, limits_memory, key='resources') %}
## Kubernetes resources to allocate to each instance.
{{ key }}:
## Requests describes the minimum amount of compute resources required. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
requests:
memory: {{ requests_memory }}
cpu: {{ requests_cpu }}
## Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
limits:
memory: {{ limits_memory }}
{%- endmacro %}
{% macro serviceAccount(key='serviceAccount') %}
## Controls configuration of the ServiceAccount for this component
{{ key }}:
## Whether a ServiceAccount should be created by the chart or not
create: true
## What name to give the ServiceAccount. If not provided the chart will provide the name automatically
name: ""
## Annotations to add to the service account
annotations: {}
{%- endmacro %}
{% macro serviceMonitors(key='serviceMonitors') %}
## Whether to deploy ServiceMonitors into the cluster for this component
## Requires the ServiceMonitor CRDs to be in the cluster
{{ key }}:
enabled: true
{%- endmacro %}
{% macro tolerations(global=false, key='tolerations') %}
## Workload tolerations allows Pods that are part of {{ 'a' if global else 'this' }} (sub)component to 'tolerate' any taint that matches the triple <key,value,effect> using the matching operator <operator>.
##
## * effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
## * key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
## * operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
## * value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
##
## * tolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
## e.g.
## {{ key }}:
## - effect:
## key:
## operator:
## value:
{{ key }}: []
{%- endmacro %}
{% macro topologySpreadConstraints(global=false, key='topologySpreadConstraints') %}
## TopologySpreadConstraints describes how Pods for {{ 'a' if global else 'this' }} component should be spread between nodes.
## https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ for in-depth details
## labelSelector can be omitted and the chart will populate a sensible value for {{ 'each' if global else 'this' }} component.
## Similarly `pod-template-hash` will be aded to `matchLabelKeys` if appropriate for {{ 'each' if global else 'this' }} component.
## If any TopologySpreadConstraints are provided for a component any global TopologySpreadConstraints are ignored for that component.
## e.g.
## {{ key }}:
## - maxSkew: 1
## topologyKey: topology.kubernetes.io/zone
## # nodeAffinityPolicy: Honor/Ignore
## # nodeTaintsPolicy: Honor/Ignore
## # whenUnsatisfiable: DoNotSchedule/ScheduleAnyway
{{ key }}: []
{%- endmacro %}
{% macro workloadAnnotations(key='annotations') %}
## Defines the annotations to add to the workload
# {{ key }}: {}
{%- endmacro %}
{% macro additionalConfig(key='additional') %}
## This can be provided in-line in the Helm Chart and/or via an existing Secret
## e.g.
## additional:
## 0-customConfig:
## config: |
## <any valid configuration>
## 1-customConfig:
## configSecret: custom-config
## configSecretKey: shared.yaml
##
## Most settings are configurable but some settings are owned by the chart and can't overwritten
{{ key }}: {}
{% endmacro %}