-
Notifications
You must be signed in to change notification settings - Fork 468
/
99-clickhouseinstallation-max.yaml
620 lines (581 loc) · 26 KB
/
99-clickhouseinstallation-max.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
# We may need to label nodes with clickhouse=allow label for this example to run
# See ./label_nodes.sh for this purpose
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "clickhouse-installation-max"
labels:
label1: label1_value
label2: label2_value
annotations:
annotation1: annotation1_value
annotation2: annotation2_value
spec:
# Allows to define custom taskID for CHI update and watch status of this update execution.
# Displayed in all .status.taskID* fields.
# By default (if not filled) every update of CHI manifest will generate random taskID
taskID: "qweqwe"
# Allows to stop all ClickHouse clusters defined in a CHI.
# Works as the following:
# - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
# - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
stop: "no"
# In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
# This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
restart: "RollingUpdate"
# Allows to troubleshoot Pods during CrashLoopBack state.
# This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
# Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
# and give time to troubleshoot via CLI.
# Liveness and Readiness probes are disabled as well.
troubleshoot: "no"
# Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
# Typical use scenario - custom cluster domain in Kubernetes cluster
namespaceDomainPattern: "%s.svc.my.test"
# Optional, applicable inside ClickHouseInstallationTemplate only.
# Defines current ClickHouseInstallationTemplate application policy to target ClickHouseInstallation(s)."
templating:
# When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
# will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
# Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
# Possible values:
# - "auto"
# - "manual"
policy: "manual"
# Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate
chiSelector:
name1: value1
name2: value2
# TODO introduce one-time actions
# chiTaskID: "qweqwe"
# autoPurge: "yes"
# Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side
reconciling:
# DISCUSSED TO BE DEPRECATED
# Syntax sugar
# Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
# Possible values:
# - wait - should wait to exclude host, complete queries and include host back into the cluster
# - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
policy: "nowait"
# Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
# More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
configMapPropagationTimeout: 90
# Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle
cleanup:
# Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
# but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
# Default behavior is `Delete`"
unknownObjects:
# Behavior policy for unknown PVC, `Delete` by default
statefulSet: Delete
# Behavior policy for unknown PVC, `Delete` by default
pvc: Delete
# Behavior policy for unknown ConfigMap, `Delete` by default
configMap: Delete
# Behavior policy for unknown Service, `Delete` by default
service: Delete
# Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
# Default behavior is `Retain`"
reconcileFailedObjects:
# Behavior policy for failed StatefulSet, `Retain` by default
statefulSet: Retain
# Behavior policy for failed PVC, `Retain` by default
pvc: Retain
# Behavior policy for failed ConfigMap, `Retain` by default
configMap: Retain
# Behavior policy for failed Service, `Retain` by default
service: Retain
# List of templates used by a CHI
useTemplates:
- name: template1
namespace: ns1
useType: merge
- name: template2
# No namespace specified - use CHI namespace
defaults:
replicasUseFQDN: "no"
distributedDDL:
profile: default
storageManagement:
# Specify PVC provisioner.
# 1. StatefulSet. PVC would be provisioned by the StatefulSet
# 2. Operator. PVC would be provisioned by the operator
provisioner: StatefulSet
# Specify PVC reclaim policy.
# 1. Retain. Keep PVC from being deleted
# Retaining PVC will also keep backing PV from deletion. This is useful in case we need to keep data intact.
# 2. Delete
reclaimPolicy: Retain
templates:
hostTemplate: host-template-custom-ports
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
serviceTemplate: chi-service-template
clusterServiceTemplate: cluster-service-template
shardServiceTemplate: shard-service-template
replicaServiceTemplate: replica-service-template
configuration:
zookeeper:
nodes:
- host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local
port: 2181
session_timeout_ms: 30000
operation_timeout_ms: 10000
root: "/path/to/zookeeper/root/node"
identity: "user:password"
users:
readonly/profile: readonly
# <users>
# <readonly>
# <profile>readonly</profile>
# </readonly>
# </users>
test/networks/ip:
- "127.0.0.1"
- "::/0"
# <users>
# <test>
# <networks>
# <ip>127.0.0.1</ip>
# <ip>::/0</ip>
# </networks>
# </test>
# </users>
test/profile: default
test/quotas: default
profiles:
readonly/readonly: "1"
# <profiles>
# <readonly>
# <readonly>1</readonly>
# </readonly>
# </profiles>
default/max_memory_usage: "1000000000"
quotas:
default/interval/duration: "3600"
# <quotas>
# <default>
# <interval>
# <duration>3600</duration>
# </interval>
# </default>
# </quotas>
settings:
compression/case/method: zstd
# <compression>
# <case>
# <method>zstd</method>
# </case>
# </compression>
disable_internal_dns_cache: 1
# <disable_internal_dns_cache>1</disable_internal_dns_cache>
files:
dict1.xml: |
<yandex>
<!-- ref to file /etc/clickhouse-data/config.d/source1.csv -->
</yandex>
source1.csv: |
a1,b1,c1,d1
a2,b2,c2,d2
clusters:
- name: all-counts
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
schemaPolicy:
replica: All
shard: All
layout:
shardsCount: 3
replicasCount: 2
- name: shards-only
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
shardsCount: 3
# replicasCount not specified, assumed = 1, by default
- name: replicas-only
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
# shardsCount not specified, assumed = 1, by default
replicasCount: 3
- name: customized
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
schemaPolicy:
replica: None
shard: None
layout:
shards:
- name: shard0
replicasCount: 3
weight: 1
internalReplication: Disabled
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
- name: shard1
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicas:
- name: replica0
- name: replica1
- name: replica2
- name: shard2
replicasCount: 3
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicaServiceTemplate: replica-service-template
replicas:
- name: replica0
tcpPort: 9000
httpPort: 8123
interserverHTTPPort: 9009
templates:
podTemplate: clickhouse-v23.8
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicaServiceTemplate: replica-service-template
- name: with-secret
# Insecure communication.
# Opens/Closes insecure ports
insecure: "yes"
# Secure communication.
# Opens/Closes secure ports
# Translates into <secure>1</secure> ClickHouse setting for remote replicas
secure: "yes"
# Shared secret value to secure cluster communications
secret:
# Auto-generate shared secret value to secure cluster communications
auto: "True"
# Cluster shared secret value in plain text
value: "plaintext secret"
# Cluster shared secret source
valueFrom:
secretKeyRef:
name: "SecretName"
key: "Key"
layout:
shardsCount: 2
templates:
hostTemplates:
- name: host-template-custom-ports
portDistribution:
- type: "ClusterScopeIndex"
spec:
# Insecure communication.
# Opens/Closes insecure ports
insecure: "yes"
# Secure communication.
# Opens/Closes secure ports
# Translates into <secure>1</secure> ClickHouse setting for remote replicas
secure: "no"
tcpPort: 7000
httpPort: 7001
interserverHTTPPort: 7002
#settings:
#files:
#templates:
serviceTemplates:
- name: chi-service-template
# generateName understands different sets of macroses,
# depending on the level of the object, for which Service is being created:
#
# For CHI-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
#
# For Cluster-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
#
# For Shard-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
# 6. {shard} - shard name
# 7. {shardID} - short hashed shard name (BEWARE, this is an experimental feature)
# 8. {shardIndex} - 0-based index of the shard in the cluster (BEWARE, this is an experimental feature)
#
# For Replica-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
# 6. {shard} - shard name
# 7. {shardID} - short hashed shard name (BEWARE, this is an experimental feature)
# 8. {shardIndex} - 0-based index of the shard in the cluster (BEWARE, this is an experimental feature)
# 9. {replica} - replica name
# 10. {replicaID} - short hashed replica name (BEWARE, this is an experimental feature)
# 11. {replicaIndex} - 0-based index of the replica in the shard (BEWARE, this is an experimental feature)
# 12. {chiScopeIndex} - 0-based index of the host in the chi (BEWARE, this is an experimental feature)
# 13. {chiScopeCycleIndex} - 0-based index of the host's cycle in the chi-scope (BEWARE, this is an experimental feature)
# 14. {chiScopeCycleOffset} - 0-based offset of the host in the chi-scope cycle (BEWARE, this is an experimental feature)
# 15. {clusterScopeIndex} - 0-based index of the host in the cluster (BEWARE, this is an experimental feature)
# 16. {clusterScopeCycleIndex} - 0-based index of the host's cycle in the cluster-scope (BEWARE, this is an experimental feature)
# 17. {clusterScopeCycleOffset} - 0-based offset of the host in the cluster-scope cycle (BEWARE, this is an experimental feature)
# 18. {shardScopeIndex} - 0-based index of the host in the shard (BEWARE, this is an experimental feature)
# 19. {replicaScopeIndex} - 0-based index of the host in the replica (BEWARE, this is an experimental feature)
# 20. {clusterScopeCycleHeadPointsToPreviousCycleTail} - 0-based cluster-scope index of previous cycle tail
generateName: "service-{chi}"
# type ObjectMeta struct from k8s.io/meta/v1
metadata:
labels:
custom.label: "custom.value"
annotations:
# For more details on Internal Load Balancer check
# https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
cloud.google.com/load-balancer-type: "Internal"
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
# NLB Load Balancer
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
# type ServiceSpec struct from k8s.io/core/v1
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
type: LoadBalancer
- name: replica-service-template
# type ServiceSpec struct from k8s.io/core/v1
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
- name: interserver
port: 9009
type: ClusterIP
clusterIP: None
- name: preserve-client-source-ip
# For more details on Preserving Client Source IP check
# https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
spec:
selector:
app: example
ports:
- name: http
port: 8123
- name: tcp
port: 9000
- name: interserver
port: 9009
externalTrafficPolicy: Local
type: LoadBalancer
volumeClaimTemplates:
- name: default-volume-claim
# Specify PVC provisioner.
# 1. StatefulSet. PVC would be provisioned by the StatefulSet
# 2. Operator. PVC would be provisioned by the operator
provisioner: StatefulSet
# Specify PVC reclaim policy.
# 1. Retain. Keep PVC from being deleted
# Retaining PVC will also keep backing PV from deletion. This is useful in case we need to keep data intact.
# 2. Delete
reclaimPolicy: Retain
# type ObjectMeta struct {} from k8s.io/meta/v1
metadata:
labels:
a: "b"
# type PersistentVolumeClaimSpec struct from k8s.io/core/v1
spec:
# 1. If storageClassName is not specified, default StorageClass
# (must be specified by cluster administrator) would be used for provisioning
# 2. If storageClassName is set to an empty string (‘’), no storage class will be used
# dynamic provisioning is disabled for this PVC. Existing, “Available”, PVs
# (that do not have a specified storageClassName) will be considered for binding to the PVC
#storageClassName: gold
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- name: volume-claim-retain-pvc
# Specify PVC provisioner.
# 1. StatefulSet. PVC would be provisioned by the StatefulSet
# 2. Operator. PVC would be provisioned by the operator
provisioner: StatefulSet
# Specify PVC reclaim policy.
# 1. Retain. Keep PVC from being deleted
# Retaining PVC will also keep backing PV from deletion. This is useful in case we need to keep data intact.
# 2. Delete
reclaimPolicy: Retain
# type ObjectMeta struct {} from k8s.io/meta/v1
metadata:
labels:
a: "b"
# type PersistentVolumeClaimSpec struct from k8s.io/core/v1
spec:
# 1. If storageClassName is not specified, default StorageClass
# (must be specified by cluster administrator) would be used for provisioning
# 2. If storageClassName is set to an empty string (‘’), no storage class will be used
# dynamic provisioning is disabled for this PVC. Existing, “Available”, PVs
# (that do not have a specified storageClassName) will be considered for binding to the PVC
#storageClassName: gold
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
podTemplates:
# multiple pod templates makes possible to update version smoothly
# pod template for ClickHouse v18.16.1
- name: clickhouse-v23.8
# We may need to label nodes with clickhouse=allow label for this example to run
# See ./label_nodes.sh for this purpose
zone:
key: "clickhouse"
values:
- "allow"
# Shortcut version for AWS installations
#zone:
# values:
# - "us-east-1a"
# Possible values for podDistribution are:
# Unspecified - empty value
# ClickHouseAntiAffinity - AntiAffinity by ClickHouse instances.
# Pod pushes away other ClickHouse pods, which allows one ClickHouse instance per topologyKey-specified unit
# CH - (push away) - CH - (push away) - CH
# ShardAntiAffinity - AntiAffinity by shard name.
# Pod pushes away other pods of the same shard (replicas of this shard),
# which allows one replica of a shard instance per topologyKey-specified unit.
# Other shards are allowed - it does not push all shards away, but CH-instances of the same shard only.
# Used for data loss avoidance - keeps all copies of the shard on different topologyKey-specified units.
# shard1,replica1 - (push away) - shard1,replica2 - (push away) - shard1,replica3
# ReplicaAntiAffinity - AntiAffinity by replica name.
# Pod pushes away other pods of the same replica (shards of this replica),
# which allows one shard of a replica per topologyKey-specified unit.
# Other replicas are allowed - it does not push all replicas away, but CH-instances of the same replica only.
# Used to evenly distribute load from "full cluster scan" queries.
# shard1,replica1 - (push away) - shard2,replica1 - (push away) - shard3,replica1
# AnotherNamespaceAntiAffinity - AntiAffinity by "another" namespace.
# Pod pushes away pods from another namespace, which allows same-namespace pods per topologyKey-specified unit.
# ns1 - (push away) - ns2 - (push away) - ns3
# AnotherClickHouseInstallationAntiAffinity - AntiAffinity by "another" ClickHouseInstallation name.
# Pod pushes away pods from another ClickHouseInstallation,
# which allows same-ClickHouseInstallation pods per topologyKey-specified unit.
# CHI1 - (push away) - CHI2 - (push away) - CHI3
# AnotherClusterAntiAffinity - AntiAffinity by "another" cluster name.
# Pod pushes away pods from another Cluster,
# which allows same-cluster pods per topologyKey-specified unit.
# cluster1 - (push away) - cluster2 - (push away) - cluster3
# MaxNumberPerNode - AntiAffinity by cycle index.
# Pod pushes away pods from the same cycle,
# which allows to specify maximum number of ClickHouse instances per topologyKey-specified unit.
# Used to setup circular replication.
# NamespaceAffinity - Affinity by namespace.
# Pod attracts pods from the same namespace,
# which allows pods from same namespace per topologyKey-specified unit.
# ns1 + (attracts) + ns1
# ClickHouseInstallationAffinity - Affinity by ClickHouseInstallation name.
# Pod attracts pods from the same ClickHouseInstallation,
# which allows pods from the same CHI per topologyKey-specified unit.
# CHI1 + (attracts) + CHI1
# ClusterAffinity - Affinity by cluster name.
# Pod attracts pods from the same cluster,
# which allows pods from the same Cluster per topologyKey-specified unit.
# cluster1 + (attracts) + cluster1
# ShardAffinity - Affinity by shard name.
# Pod attracts pods from the same shard,
# which allows pods from the same Shard per topologyKey-specified unit.
# shard1 + (attracts) + shard1
# ReplicaAffinity - Affinity by replica name.
# Pod attracts pods from the same replica,
# which allows pods from the same Replica per topologyKey-specified unit.
# replica1 + (attracts) + replica1
# PreviousTailAffinity - Affinity to overlap cycles. Used to make cycle pod distribution
# cycle head + (attracts to) + previous cycle tail
podDistribution:
- type: ShardAntiAffinity
- type: MaxNumberPerNode
number: 2
# Apply podDistribution on per-host basis
topologyKey: "kubernetes.io/hostname"
# Apply podDistribution on per-zone basis
#topologyKey: "kubernetes.io/zone"
# type ObjectMeta struct {} from k8s.io/meta/v1
metadata:
labels:
a: "b"
# type PodSpec struct {} from k8s.io/core/v1
spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:23.8
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "64Mi"
cpu: "100m"
- name: clickhouse-log
image: clickhouse/clickhouse-server:23.8
command:
- "/bin/sh"
- "-c"
- "--"
args:
- "while true; do sleep 30; done;"
# pod template for ClickHouse v23.8
- name: clickhouse-v23.8
# type ObjectMeta struct {} from k8s.io/meta/v1
metadata:
labels:
a: "b"
# type PodSpec struct {} from k8s.io/core/v1
spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:23.8
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "64Mi"
cpu: "100m"
- name: clickhouse-log
image: clickhouse/clickhouse-server:23.8
command:
- "/bin/sh"
- "-c"
- "--"
args:
- "while true; do sleep 30; done;"