From c110a6f7ad04d9e598113d9d90a8d5e38e99962d Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Mon, 24 Nov 2025 15:48:07 +0100 Subject: [PATCH 001/233] fix chk grafana dashboard --- .../ClickHouseKeeper_dashboard.json | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/grafana-dashboard/ClickHouseKeeper_dashboard.json b/grafana-dashboard/ClickHouseKeeper_dashboard.json index 2b47c9419..832e1f052 100644 --- a/grafana-dashboard/ClickHouseKeeper_dashboard.json +++ b/grafana-dashboard/ClickHouseKeeper_dashboard.json @@ -154,9 +154,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperAvgLatency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperAvgLatency{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "avg {{namespace}}.{{pod_name}}", + "legendFormat": "avg {{namespace}}.{{pod}}", "refId": "A" }, { @@ -164,9 +164,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperMaxLatency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperMaxLatency{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "max {{namespace}}.{{pod_name}}", + "legendFormat": "max {{namespace}}.{{pod}}", "refId": "B" } ], @@ -261,10 +261,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseMetrics_KeeperAliveConnections{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseMetrics_KeeperAliveConnections{namespace=~\"$namespace\", pod=~\"$pod\"}", "hide": false, "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -358,10 +358,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "irate(ClickHouseAsyncMetrics_KeeperPacketsSent{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])", + "expr": "irate(ClickHouseAsyncMetrics_KeeperPacketsSent{namespace=~\"$namespace\", pod=~\"$pod\"}[1m])", "hide": false, "interval": "", - "legendFormat": "OUT {{namespace}}.{{pod_name}}", + "legendFormat": "OUT {{namespace}}.{{pod}}", "refId": "A" }, { @@ -369,9 +369,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "-irate(ClickHouseAsyncMetrics_KeeperPacketsReceived{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])", + "expr": "-irate(ClickHouseAsyncMetrics_KeeperPacketsReceived{namespace=~\"$namespace\", pod=~\"$pod\"}[1m])", "interval": "", - "legendFormat": "IN {{namespace}}.{{pod_name}}", + "legendFormat": "IN {{namespace}}.{{pod}}", "refId": "B" } ], @@ -465,9 +465,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperZnodeCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperZnodeCount{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -562,9 +562,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperWatchCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperWatchCount{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -659,9 +659,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperEphemeralsCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperEphemeralsCount{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -755,9 +755,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperApproximateDataSize{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperApproximateDataSize{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -868,9 +868,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "irate(ClickHouseMetrics_KeeperOutstandingRequests{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])", + "expr": "irate(ClickHouseMetrics_KeeperOutstandingRequests{namespace=~\"$namespace\", pod=~\"$pod\"}[1m])", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -965,9 +965,9 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}", + "expr": "ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{namespace=~\"$namespace\", pod=~\"$pod\"}", "interval": "", - "legendFormat": "{{namespace}}.{{pod_name}}", + "legendFormat": "{{namespace}}.{{pod}}", "refId": "A" } ], @@ -990,14 +990,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "label_values(up{container_name=\"clickhouse-keeper\"},namespace)", + "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\"},namespace)", "includeAll": true, "multi": true, "name": "namespace", "options": [], "query": { "qryType": 1, - "query": "label_values(up{container_name=\"clickhouse-keeper\"},namespace)", + "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\"},namespace)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, @@ -1011,14 +1011,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "label_values(up{container_name=\"clickhouse-keeper\", namespace=~\"$namespace\"},pod_name)", + "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\",namespace=~\"$namespace\"},pod)", "includeAll": true, "multi": true, - "name": "pod_name", + "name": "pod", "options": [], "query": { "qryType": 1, - "query": "label_values(up{container_name=\"clickhouse-keeper\", namespace=~\"$namespace\"},pod_name)", + "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\",namespace=~\"$namespace\"},pod)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, From bb83a098856a959ca71bed48c194c88afdea987c Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Thu, 27 Nov 2025 17:54:58 +0100 Subject: [PATCH 002/233] fix macos setup --- ...q_transform_clickhouse-operator-install.sh | 7 +++++- .../install-grafana-operator.sh | 7 +++++- deploy/minio/install-minio-operator.sh | 22 +++++++++++++------ 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/deploy/devspace/yq_transform_clickhouse-operator-install.sh b/deploy/devspace/yq_transform_clickhouse-operator-install.sh index 8b1c7bba5..db5ec1eed 100755 --- a/deploy/devspace/yq_transform_clickhouse-operator-install.sh +++ b/deploy/devspace/yq_transform_clickhouse-operator-install.sh @@ -14,4 +14,9 @@ yq eval -e --inplace "(select(.kind == \"Deployment\" and .metadata.name == \"cl yq eval -e --inplace '(select(.kind == "Deployment" and .metadata.name == "clickhouse-operator") | .spec.template.spec.containers[] | select(.name=="metrics-exporter") | .imagePullPolicy) = "IfNotPresent"' "${CUR_DIR}/clickhouse-operator-install.yaml" yq eval -e --inplace '(select(.kind == "Deployment" and .metadata.name == "clickhouse-operator") | .spec.template.spec.containers[] | select(.name=="metrics-exporter") | .securityContext.capabilities.add) = ["SYS_PTRACE"]' "${CUR_DIR}/clickhouse-operator-install.yaml" -sed -i "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml" +# Use sed with compatibility for both macOS and Linux +if [[ "$(uname)" == "Darwin" ]]; then + sed -i '' "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml" +else + sed -i "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml" +fi diff --git a/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh b/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh index dce405fbd..814f9b76e 100755 --- a/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh +++ b/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh @@ -65,7 +65,12 @@ echo "Setup Grafana operator into ${GRAFANA_NAMESPACE} namespace" kubectl create namespace "${GRAFANA_NAMESPACE}" || true # Setup grafana-operator into dedicated namespace -sed -i "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml" +# Use sed with compatibility for both macOS and Linux +if [[ "$(uname)" == "Darwin" ]]; then + sed -i '' "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml" +else + sed -i "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml" +fi kubectl kustomize "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped" --load-restrictor LoadRestrictionsNone | kubectl apply --server-side -f - kubectl wait deployment/grafana-operator-controller-manager -n "${GRAFANA_NAMESPACE}" --for=condition=available --timeout=300s diff --git a/deploy/minio/install-minio-operator.sh b/deploy/minio/install-minio-operator.sh index 0f6158fbc..9aee125b4 100755 --- a/deploy/minio/install-minio-operator.sh +++ b/deploy/minio/install-minio-operator.sh @@ -63,13 +63,21 @@ echo "Setup minio.io operator ${MINIO_OPERATOR_VERSION} into ${MINIO_NAMESPACE} ## TODO: need to refactor after next minio-operator release MINIO_KUSTOMIZE_DIR="${MINIO_OPERATOR_DIR}/resources" -sed -i -e "s/replicas: 2/replicas: 1/" $MINIO_KUSTOMIZE_DIR/base/deployment.yaml -sed -i -e "s/name: minio-operator/name: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml -sed -i -e "s/: restricted/: baseline/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml -sed -i -e "s/namespace: default/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml -sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml -sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/kustomization.yaml -sed -i -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/" $MINIO_KUSTOMIZE_DIR/base/*.yaml + +# Use sed with compatibility for both macOS and Linux +if [[ "$(uname)" == "Darwin" ]]; then + SED_INPLACE="sed -i ''" +else + SED_INPLACE="sed -i" +fi + +$SED_INPLACE -e "s/replicas: 2/replicas: 1/" $MINIO_KUSTOMIZE_DIR/base/deployment.yaml +$SED_INPLACE -e "s/name: minio-operator/name: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml +$SED_INPLACE -e "s/: restricted/: baseline/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml +$SED_INPLACE -e "s/namespace: default/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml +$SED_INPLACE -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml +$SED_INPLACE -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/kustomization.yaml +$SED_INPLACE -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/" $MINIO_KUSTOMIZE_DIR/base/*.yaml # Setup minio-operator into dedicated namespace via kustomize kubectl --namespace="${MINIO_NAMESPACE}" apply -k "${MINIO_KUSTOMIZE_DIR}" From c33f83fa922d3fcf8458985da300206b113bb7ae Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Thu, 27 Nov 2025 17:55:59 +0100 Subject: [PATCH 003/233] fix grafana operator setup missing serviceAccount --- .../grafana-with-grafana-operator/grafana-cr-template.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml b/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml index 1b9b41e0b..989edcf7f 100644 --- a/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml +++ b/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml @@ -6,6 +6,12 @@ metadata: labels: app: grafana spec: + # Service account for the Grafana Operator to manage datasources and dashboards + serviceAccount: + metadata: + annotations: {} + labels: {} + deployment: metadata: annotations: From e2c2e6f40dc96458931b20486834f72aabcc6ebb Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Thu, 27 Nov 2025 17:56:55 +0100 Subject: [PATCH 004/233] change label names --- deploy/prometheus/clickhouse.test.yaml | 4 +- .../prometheus-alert-rules-backup.yaml | 220 +++++++++--------- .../prometheus-alert-rules-chkeeper.yaml | 46 ++-- .../prometheus-alert-rules-clickhouse.yaml | 4 +- .../prometheus-alert-rules-zookeeper.yaml | 156 ++++++------- deploy/prometheus/prometheus-template.yaml | 12 +- 6 files changed, 221 insertions(+), 221 deletions(-) diff --git a/deploy/prometheus/clickhouse.test.yaml b/deploy/prometheus/clickhouse.test.yaml index 52ae6abe1..95dfaa947 100644 --- a/deploy/prometheus/clickhouse.test.yaml +++ b/deploy/prometheus/clickhouse.test.yaml @@ -23,7 +23,7 @@ tests: - name: ClickHouseMetricsExporterDown interval: 30s input_series: - - series: 'up{app="clickhouse-operator", pod_name="clickhouse-operator-XXX"}' + - series: 'up{app="clickhouse-operator", pod="clickhouse-operator-XXX"}' values: "0+0x10" alert_rule_test: - eval_time: 30s @@ -31,7 +31,7 @@ tests: exp_alerts: - exp_labels: severity: critical - pod_name: clickhouse-operator-XXX + pod: clickhouse-operator-XXX app: clickhouse-operator exp_annotations: description: "`metrics-exporter` not sent data more than 1 minutes.\nPlease check instance status\n```kubectl logs -n clickhouse-operator-XXX -c metrics-exporter -f```" diff --git a/deploy/prometheus/prometheus-alert-rules-backup.yaml b/deploy/prometheus/prometheus-alert-rules-backup.yaml index 8dd123fc5..257e01e9f 100644 --- a/deploy/prometheus/prometheus-alert-rules-backup.yaml +++ b/deploy/prometheus/prometheus-alert-rules-backup.yaml @@ -11,51 +11,51 @@ spec: - name: ClickHouseBackupRules rules: - alert: ClickHouseBackupDown - expr: up{container_name='clickhouse-backup'} == 0 + expr: up{container='clickhouse-backup'} == 0 labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup possible down" description: |- `clickhouse-backup` can't be scraped via prometheus. Check clickhouse-backup status - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` - alert: ClickHouseBackupRecentlyRestart - expr: (clickhouse_backup_last_backup_success{container_name='clickhouse-backup'} == 2 or clickhouse_backup_last_create_status{container_name='clickhouse-backup'} == 2) and time() - process_start_time_seconds{container_name='clickhouse-backup'} < 180 + expr: (clickhouse_backup_last_backup_success{container='clickhouse-backup'} == 2 or clickhouse_backup_last_create_status{container='clickhouse-backup'} == 2) and time() - process_start_time_seconds{container='clickhouse-backup'} < 180 labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup restart less than 3 minutes ago" description: |- `clickhouse-backup` possible was restarted Check clickhouse-backup status - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` - alert: ClickHouseBackupFailed expr: |- - (increase(clickhouse_backup_failed_backups{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_backup_success != 1) - or (clickhouse_backup_last_backup_success{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_creates{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_create_status != 1) - or (clickhouse_backup_last_create_status{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_downloads{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_download_status != 1) - or (clickhouse_backup_last_download_status{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_restores{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_restore_status != 1) - or (clickhouse_backup_last_restore_status{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_uploads{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_upload_status != 1) - or (clickhouse_backup_last_upload_status{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_create_remotes{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_create_remote_status != 1) - or (clickhouse_backup_last_create_remote_status{container_name='clickhouse-backup'} == 0) - or (increase(clickhouse_backup_failed_restore_remotes{container_name='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_restore_remote_status != 1) - or (clickhouse_backup_last_restore_remote_status{container_name='clickhouse-backup'} == 0) + (increase(clickhouse_backup_failed_backups{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_backup_success != 1) + or (clickhouse_backup_last_backup_success{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_creates{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_create_status != 1) + or (clickhouse_backup_last_create_status{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_downloads{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_download_status != 1) + or (clickhouse_backup_last_download_status{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_restores{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_restore_status != 1) + or (clickhouse_backup_last_restore_status{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_uploads{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_upload_status != 1) + or (clickhouse_backup_last_upload_status{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_create_remotes{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_create_remote_status != 1) + or (clickhouse_backup_last_create_remote_status{container='clickhouse-backup'} == 0) + or (increase(clickhouse_backup_failed_restore_remotes{container='clickhouse-backup'}[24h]) > 0 and clickhouse_backup_last_restore_remote_status != 1) + or (clickhouse_backup_last_restore_remote_status{container='clickhouse-backup'} == 0) labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup last backup possible failed" description: |- status legend @@ -63,175 +63,175 @@ spec: - 1 - success - 2 - unknown - `increase(clickhouse_backup_failed_create_remotes{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_create_remotes{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_create_remote_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_create_remotes{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_create_remotes{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_create_remote_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_restore_remotes{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_restore_remotes{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_restore_remote_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_remote_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_restore_remotes{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_restore_remotes{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_restore_remote_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_remote_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_backups{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_backups{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_backup_success{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_success{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_backups{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_backups{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_backup_success{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_success{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_creates{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_creates{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_create_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_creates{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_creates{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_create_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_downloads{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_downloads{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_download_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_downloads{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_downloads{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_download_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_restores{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_restores{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_restore_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_restores{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_restores{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_restore_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `increase(clickhouse_backup_failed_uploads{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_uploads{pod_name='%s',namespace='%s'}[24h])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} - `clickhouse_backup_last_upload_status{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_status{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `increase(clickhouse_backup_failed_uploads{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[24h])` = {{ with printf "increase(clickhouse_backup_failed_uploads{pod='%s',namespace='%s'}[24h])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `clickhouse_backup_last_upload_status{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_status{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` Check backup list - ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -c {{ $labels.container_name }} -- wget -qO- http://127.0.0.1:7171/backup/list``` + ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -c {{ $labels.container }} -- wget -qO- http://127.0.0.1:7171/backup/list``` Check backup commands status - ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -c {{ $labels.container_name }} -- wget -qO- http://127.0.0.1:7171/backup/status``` + ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -c {{ $labels.container }} -- wget -qO- http://127.0.0.1:7171/backup/status``` Run backup manually - ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -c {{ $labels.container_name }} -- clickhouse-backup create [-t, --tables=.] [--diff-from=] [--delete] ``` + ```kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -c {{ $labels.container }} -- clickhouse-backup create [-t, --tables=.
] [--diff-from=] [--delete] ``` - alert: ClickHouseBackupTooLong # duration in nanoseconds so we expect 3600 * 4 * 10^9 expr: |- - clickhouse_backup_last_backup_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_create_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_download_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_restore_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_upload_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_create_remote_duration{container_name='clickhouse-backup'} > 14400000000000 - or clickhouse_backup_last_restore_remote_duration{container_name='clickhouse-backup'} > 14400000000000 + clickhouse_backup_last_backup_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_create_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_download_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_restore_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_upload_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_create_remote_duration{container='clickhouse-backup'} > 14400000000000 + or clickhouse_backup_last_restore_remote_duration{container='clickhouse-backup'} > 14400000000000 labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup last backup duration was more 4 hours" description: |- - `clickhouse_backup_last_create_remote_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_restore_remote_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_create_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_upload_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_download_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_restore_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `clickhouse_backup_last_backup_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_duration{pod_name='%s',namespace='%s'} / 1000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_create_remote_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_restore_remote_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_remote_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_create_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_upload_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_download_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_restore_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `clickhouse_backup_last_backup_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_duration{pod='%s',namespace='%s'} / 1000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` - alert: ClickHouseBackupTooShort expr: |- - (clickhouse_backup_last_backup_success{container_name='clickhouse-backup'} == 1 and clickhouse_backup_last_backup_duration{container_name='clickhouse-backup'} < clickhouse_backup_last_backup_duration{container_name='clickhouse-backup'} offset 1m * 0.70) - or (clickhouse_backup_last_create_status{container_name='clickhouse-backup'} == 1 and clickhouse_backup_last_create_duration{container_name='clickhouse-backup'} < clickhouse_backup_last_create_duration{container_name='clickhouse-backup'} offset 1m * 0.70) - or (clickhouse_backup_last_download_status{container_name='clickhouse-backup'} == 1 and clickhouse_backup_last_download_duration{container_name='clickhouse-backup'} < clickhouse_backup_last_download_duration{container_name='clickhouse-backup'} offset 1m * 0.70) - or (clickhouse_backup_last_restore_status{container_name='clickhouse-backup'} == 1 and clickhouse_backup_last_restore_duration{container_name='clickhouse-backup'} < clickhouse_backup_last_restore_duration{container_name='clickhouse-backup'} offset 1m * 0.70) - or (clickhouse_backup_last_upload_status{container_name='clickhouse-backup'} == 1 and clickhouse_backup_last_upload_duration{container_name='clickhouse-backup'} < clickhouse_backup_last_upload_duration{container_name='clickhouse-backup'} offset 1m * 0.70) + (clickhouse_backup_last_backup_success{container='clickhouse-backup'} == 1 and clickhouse_backup_last_backup_duration{container='clickhouse-backup'} < clickhouse_backup_last_backup_duration{container='clickhouse-backup'} offset 1m * 0.70) + or (clickhouse_backup_last_create_status{container='clickhouse-backup'} == 1 and clickhouse_backup_last_create_duration{container='clickhouse-backup'} < clickhouse_backup_last_create_duration{container='clickhouse-backup'} offset 1m * 0.70) + or (clickhouse_backup_last_download_status{container='clickhouse-backup'} == 1 and clickhouse_backup_last_download_duration{container='clickhouse-backup'} < clickhouse_backup_last_download_duration{container='clickhouse-backup'} offset 1m * 0.70) + or (clickhouse_backup_last_restore_status{container='clickhouse-backup'} == 1 and clickhouse_backup_last_restore_duration{container='clickhouse-backup'} < clickhouse_backup_last_restore_duration{container='clickhouse-backup'} offset 1m * 0.70) + or (clickhouse_backup_last_upload_status{container='clickhouse-backup'} == 1 and clickhouse_backup_last_upload_duration{container='clickhouse-backup'} < clickhouse_backup_last_upload_duration{container='clickhouse-backup'} offset 1m * 0.70) labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup last backup duration time is 30% less than the time of the previous one" description: |- - `clickhouse_backup_last_backup_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_duration{pod_name='%s',namespace='%s'} / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_backup_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_duration{pod_name='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_backup_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_duration{pod='%s',namespace='%s'} / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_backup_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_duration{pod='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_create_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_duration{pod_name='%s',namespace='%s'} / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_create_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_create_duration{pod_name='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_create_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_create_duration{pod='%s',namespace='%s'} / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_create_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_create_duration{pod='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_download_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_duration{pod_name='%s',namespace='%s'} / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_download_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_download_duration{pod_name='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_download_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_download_duration{pod='%s',namespace='%s'} / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_download_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_download_duration{pod='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_restore_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_duration{pod_name='%s',namespace='%s'} / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_restore_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_restore_duration{pod_name='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_restore_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_restore_duration{pod='%s',namespace='%s'} / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_restore_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_restore_duration{pod='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_upload_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_duration{pod_name='%s',namespace='%s'} / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes - `clickhouse_backup_last_upload_duration{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_upload_duration{pod_name='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_upload_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_upload_duration{pod='%s',namespace='%s'} / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes + `clickhouse_backup_last_upload_duration{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_upload_duration{pod='%s',namespace='%s'} offset 1m / 60000000000 " .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} minutes Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` ClickHouse data size - ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` + ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` - alert: ClickHouseBackupSizeChanged expr: |- ( - clickhouse_backup_last_create_status{container_name='clickhouse-backup'} == 1 - and clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} offset 1m > 0 - and clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} < clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} offset 1m * 0.60 + clickhouse_backup_last_create_status{container='clickhouse-backup'} == 1 + and clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} offset 1m > 0 + and clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} < clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} offset 1m * 0.60 ) or ( - clickhouse_backup_last_create_status{container_name='clickhouse-backup'} == 1 - and clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} offset 1m > 0 - and clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} > clickhouse_backup_last_backup_size_local{container_name='clickhouse-backup'} offset 1m * 1.40) + clickhouse_backup_last_create_status{container='clickhouse-backup'} == 1 + and clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} offset 1m > 0 + and clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} > clickhouse_backup_last_backup_size_local{container='clickhouse-backup'} offset 1m * 1.40) or ( - clickhouse_backup_last_upload_status{container_name='clickhouse-backup'} == 1 - and clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} offset 1m > 0 - and clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} < clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} offset 1m * 0.60 + clickhouse_backup_last_upload_status{container='clickhouse-backup'} == 1 + and clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} offset 1m > 0 + and clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} < clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} offset 1m * 0.60 ) or ( - clickhouse_backup_last_upload_status{container_name='clickhouse-backup'} == 1 - and clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} offset 1m > 0 - and clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} > clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} offset 1m * 1.40 + clickhouse_backup_last_upload_status{container='clickhouse-backup'} == 1 + and clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} offset 1m > 0 + and clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} > clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} offset 1m * 1.40 ) labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup last backup size is changed more than 40% than the size of the previous one" description: |- - `clickhouse_backup_last_backup_size_local{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod_name='%s',namespace='%s',container_name='clickhouse-backup'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} - `clickhouse_backup_last_backup_size_local{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_size_local{pod_name='%s',namespace='%s',container_name='clickhouse-backup'} offset 1m" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_local{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod='%s',namespace='%s',container='clickhouse-backup'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_local{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_size_local{pod='%s',namespace='%s',container='clickhouse-backup'} offset 1m" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} - `clickhouse_backup_last_backup_size_remote{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod_name='%s',namespace='%s',container_name='clickhouse-backup'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} - `clickhouse_backup_last_backup_size_remote{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_size_local{pod_name='%s',namespace='%s',container_name='clickhouse-backup'} offset 1m" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_remote{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod='%s',namespace='%s',container='clickhouse-backup'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_remote{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"} offset 1m` = {{ with printf "clickhouse_backup_last_backup_size_local{pod='%s',namespace='%s',container='clickhouse-backup'} offset 1m" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` ClickHouse data size - ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` + ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` - alert: ClickHouseRemoteBackupSizeZero for: "36h" - expr: clickhouse_backup_last_backup_size_remote{container_name='clickhouse-backup'} == 0 + expr: clickhouse_backup_last_backup_size_remote{container='clickhouse-backup'} == 0 labels: severity: warning team: ClickHouse annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup last backup size is zero last 36 hours" description: |- - `clickhouse_backup_last_backup_size_remote{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_remote{pod_name='%s',namespace='%s',container_name='clickhouse-backup'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} - `clickhouse_backup_last_backup_size_local{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod_name='%s',namespace='%s',container_name='clickhouse-backup'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_remote{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_remote{pod='%s',namespace='%s',container='clickhouse-backup'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} + `clickhouse_backup_last_backup_size_local{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_last_backup_size_local{pod='%s',namespace='%s',container='clickhouse-backup'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanize1024 }}B{{ end }} Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` ClickHouse data size - ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` + ```kubectl exec -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }}-- wget -qO- "http://127.0.0.1:8123/?query=SELECT+formatReadableSize(sum(total_bytes))+FROM+system.tables+WHERE+database!='system'"``` - alert: ClickhouseBackupDoesntRunTooLong expr: |- - (clickhouse_backup_last_backup_end{container_name='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_backup_end{container_name='clickhouse-backup'} > 129600) - or (clickhouse_backup_last_create_finish{container_name='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_create_finish{container_name='clickhouse-backup'} > 129600) - or (clickhouse_backup_last_upload_finish{container_name='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_upload_finish{container_name='clickhouse-backup'} > 129600) + (clickhouse_backup_last_backup_end{container='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_backup_end{container='clickhouse-backup'} > 129600) + or (clickhouse_backup_last_create_finish{container='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_create_finish{container='clickhouse-backup'} > 129600) + or (clickhouse_backup_last_upload_finish{container='clickhouse-backup'} > 0 and time() - clickhouse_backup_last_upload_finish{container='clickhouse-backup'} > 129600) labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup didn't run last 24h" description: |- - `time() - clickhouse_backup_last_backup_end{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_backup_end{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `time() - clickhouse_backup_last_create_finish{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_create_finish{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} - `time() - clickhouse_backup_last_backup_finish{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_backup_finish{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `time() - clickhouse_backup_last_backup_end{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_backup_end{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `time() - clickhouse_backup_last_create_finish{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_create_finish{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `time() - clickhouse_backup_last_backup_finish{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "time() - clickhouse_backup_last_backup_finish{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} Check clickhouse-backup logs - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=48h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=48h``` # https://github.com/Altinity/clickhouse-backup/issues/836 - alert: ClickHouseBackupLocalBackupUnexpectedPresent expr: |- @@ -242,11 +242,11 @@ spec: ) for: "4h" annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "clickhouse-backup have unexpected local backup" description: |- unexpected local backups could allocate additional disk space - `clickhouse_backup_number_backups_local{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_number_backups_local{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value }}{{ end }} - `clickhouse_backup_number_backups_local_expected{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_number_backups_local_expected{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value }}{{ end }} + `clickhouse_backup_number_backups_local{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_number_backups_local{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value }}{{ end }} + `clickhouse_backup_number_backups_local_expected{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "clickhouse_backup_number_backups_local_expected{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value }}{{ end }} Check clickhouse-backup logs and remove local backup if necessary - ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod_name }} -c {{ $labels.container_name }} --since=24h``` + ```kubectl logs -n {{ $labels.namespace }} pods/{{ $labels.pod }} -c {{ $labels.container }} --since=24h``` diff --git a/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml b/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml index 67d47608d..3ad3b4fad 100644 --- a/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml +++ b/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml @@ -15,12 +15,12 @@ spec: labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "zookeeper possible down" description: |- `zookeeper` can't be scraped via prometheus. Please check instance status - ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod_name }} -f``` + ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod }} -f``` - alert: ClickHouseKeeperHighLatency expr: zk_max_latency{app=~'clickhouse-keeper.*'} > 500 @@ -28,34 +28,34 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Average amount of time it takes for the server to respond to each client request (since the server was started)." description: |- - `avg_latency{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "avg_latency{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ticks{{ end }} + `avg_latency{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "avg_latency{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ticks{{ end }} reset server statistics ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- bash -c 'exec 3<>/dev/tcp/127.0.0.1/2181 && printf \"stats_reset\" >&3 && timeout 5 cat <&3' + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- bash -c 'exec 3<>/dev/tcp/127.0.0.1/2181 && printf \"stats_reset\" >&3 && timeout 5 cat <&3' ``` Look to CPU/Memory node/pod utilization ``` - kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod_name }} + kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod }} kubectl top node {{ $labels.node }} ``` Look to ClickHouseKeeper Disk free space ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- df -h + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- df -h ``` Look to clickhouse-keeper read\write ``` - readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) sleep 5 - readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) echo "ClickHouseKeeper Read $((($readEnd - $readBegin) / 5)) b/s" echo "ClickHouseKeeper Write $((($writeEnd - $writeBegin) / 5)) b/s" ``` @@ -66,29 +66,29 @@ spec: labels: severity: high annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "ClickHouseKeeper receives more requests than it can process." description: |- - `outstanding_requests{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_requests{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `outstanding_requests{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_requests{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} Look to CPU/Memory node/pod utilization ``` - kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod_name }} + kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod }} kubectl top node {{ $labels.node }} ``` Look to ClickHouseKeeper Disk free space ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- df -h + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- df -h ``` Look to zookeeper read\write ``` - readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) sleep 5 - readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) echo "ClickHouseKeeper Read $((($readEnd - $readBegin) / 5)) b/s" echo "ClickHouseKeeper Write $((($writeEnd - $writeBegin) / 5)) b/s" ``` @@ -99,10 +99,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Number of file descriptors used over the limit." description: |- - `zk_open_file_descriptor_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "zk_open_file_descriptor_count{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} + `zk_open_file_descriptor_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "zk_open_file_descriptor_count{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} - alert: ClickHouseKeeperHighEphemeralNodes @@ -111,9 +111,9 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "ClickHouseKeeper have too high ephemeral znodes count." description: |- - `zk_ephemerals_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "ephemerals_count{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} nodes{{ end }} + `zk_ephemerals_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "ephemerals_count{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} nodes{{ end }} Look to documentation: https://zookeeper.apache.org/doc/current/zookeeperOver.html#Nodes+and+ephemeral+nodes diff --git a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml index d78349e30..ceafe77a5 100644 --- a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml +++ b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml @@ -15,12 +15,12 @@ spec: labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "metrics-exporter possible down" description: |- `metrics-exporter` not sent data more than 1 minutes. Please check instance status - ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod_name }} -c metrics-exporter -f``` + ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod }} -c metrics-exporter -f``` - alert: ClickHouseServerDown expr: chi_clickhouse_metric_fetch_errors{fetch_type='system.metrics'} > 0 diff --git a/deploy/prometheus/prometheus-alert-rules-zookeeper.yaml b/deploy/prometheus/prometheus-alert-rules-zookeeper.yaml index 038fc207f..76d72d08a 100644 --- a/deploy/prometheus/prometheus-alert-rules-zookeeper.yaml +++ b/deploy/prometheus/prometheus-alert-rules-zookeeper.yaml @@ -17,24 +17,24 @@ spec: labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}" + identifier: "{{ $labels.pod }}" summary: "zookeeper possible down" description: |- `zookeeper` can't be scraped via prometheus. Please check instance status - ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod_name }} -f``` + ```kubectl logs -n {{ $labels.namespace }} {{ $labels.pod }} -f``` - alert: ZookeeperRestartRecently expr: uptime{app=~'zookeeper.*'} > 1 < 180000 annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Amount of time since the server was started." description: |- - `uptime{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "uptime{pod_name='%s',namespace='%s'} / 1000" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} + `uptime{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "uptime{pod='%s',namespace='%s'} / 1000" .Labels.pod .Labels.namespace | query }}{{ . | first | value | humanizeDuration }}{{ end }} Look to previous Zookeeper pod log to investigate restart reason ``` - kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} --previous + kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod }} --previous ``` - alert: ZookeeperHighLatency @@ -43,34 +43,34 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Average amount of time it takes for the server to respond to each client request (since the server was started)." description: |- - `avg_latency{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "avg_latency{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ticks{{ end }} + `avg_latency{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "avg_latency{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ticks{{ end }} reset server statistics ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- bash -xc "echo stats_reset | nc localhost 2181" + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- bash -xc "echo stats_reset | nc localhost 2181" ``` Look to CPU/Memory node/pod utilization ``` - kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod_name }} + kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod }} kubectl top node {{ $labels.node }} ``` Look to Zookeeper Disk free space ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- df -h + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- df -h ``` Look to zookeeper read\write ``` - readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) sleep 5 - readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) echo "Zookeeper Read $((($readEnd - $readBegin) / 5)) b/s" echo "Zookeeper Write $((($writeEnd - $writeBegin) / 5)) b/s" ``` @@ -81,29 +81,29 @@ spec: labels: severity: high annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper receives more requests than it can process." description: |- - `outstanding_requests{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_requests{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `outstanding_requests{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_requests{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} Look to CPU/Memory node/pod utilization ``` - kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod_name }} + kubectl top -n {{ $labels.namespace }} pod {{ $labels.pod }} kubectl top node {{ $labels.node }} ``` Look to Zookeeper Disk free space ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- df -h + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- df -h ``` Look to zookeeper read\write ``` - readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeBegin=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) sleep 5 - readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) - writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) + readEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^rchar" | cut -d " " -f 2) + writeEnd=$(kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/1/io | grep -E "^wchar" | cut -d " " -f 2) echo "Zookeeper Read $((($readEnd - $readBegin) / 5)) b/s" echo "Zookeeper Write $((($writeEnd - $writeBegin) / 5)) b/s" ``` @@ -114,11 +114,11 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Number of file descriptors used over the limit." description: |- - `open_file_descriptor_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "open_file_descriptor_count{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} - `process_open_fds{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "process_open_fds{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} + `open_file_descriptor_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "open_file_descriptor_count{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} + `process_open_fds{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "process_open_fds{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} descriptors{{ end }} - alert: ZookeeperPendingSyncs expr: pending_syncs{app=~'zookeeper.*'} > 10 @@ -126,10 +126,10 @@ spec: labels: severity: high annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Possible Zookeeper master pending syncs with followers." description: |- - `pending_session_queue_size{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "pending_session_queue_size{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessions{{ end }} + `pending_session_queue_size{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "pending_session_queue_size{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessions{{ end }} - alert: ZookeeperPendingSessions expr: pending_session_queue_size{app=~'zookeeper.*'} > 10 @@ -137,10 +137,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Possible Zookeeper pending sessions." description: |- - `pending_session_queue_size{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "pending_session_queue_size{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessions{{ end }} + `pending_session_queue_size{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "pending_session_queue_size{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessions{{ end }} - alert: ZookeeperThrottleRequests expr: increase(request_throttle_wait_count{app=~'zookeeper.*'}[1m]) > 0 @@ -148,10 +148,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper throttle requests" description: |- - `increase(request_throttle_wait_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(request_throttle_wait_count{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} + `increase(request_throttle_wait_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(request_throttle_wait_count{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} Look `requestThrottleLimit`, `requestThrottleStallTime` in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_advancedConfiguration @@ -161,10 +161,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper receives more TLS handshake than it can process." description: |- - `outstanding_tls_handshake{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_tls_handshake{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} + `outstanding_tls_handshake{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "outstanding_tls_handshake{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} - alert: ZookeeperConnectionRejected expr: increase(connection_rejected{app=~'zookeeper.*'}[1m]) > 0 @@ -172,14 +172,14 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper reject connection." description: |- - `increase(connection_rejected{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(connection_rejected{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} + `increase(connection_rejected{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(connection_rejected{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} Check connections count on Zookeeper ``` - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/net/sockstat - kubectl exec -n {{ $labels.namespace }} {{ $labels.pod_name }} -- cat /proc/net/sockstat6 + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/net/sockstat + kubectl exec -n {{ $labels.namespace }} {{ $labels.pod }} -- cat /proc/net/sockstat6 ``` - alert: ZookeeperHighEphemeralNodes @@ -187,10 +187,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have too high ephemeral znodes count." description: |- - `ephemerals_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "ephemerals_count{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} nodes{{ end }} + `ephemerals_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "ephemerals_count{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} nodes{{ end }} Look to documentation: https://zookeeper.apache.org/doc/current/zookeeperOver.html#Nodes+and+ephemeral+nodes @@ -199,16 +199,16 @@ spec: labels: severity: high annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have unhandled Exception" description: |- - `increase(unrecoverable_error_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "rate(unrecoverable_error_count{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} exceptions{{ end }} + `increase(unrecoverable_error_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "rate(unrecoverable_error_count{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} exceptions{{ end }} It mean Zookeeper catch some unknown Exception and will close listen socket Look to current and previous Zookeeper pod log to investigate restart reason ``` - kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} - kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} --previous + kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod }} + kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod }} --previous ``` @@ -218,12 +218,12 @@ spec: labels: severity: info annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have inefficient get data znodes response cache." description: |- - `increase(response_packet_cache_hits{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_cache_hits{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} hits{{ end }} - `increase(response_packet_cache_misses{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_cache_misses{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} misses{{ end }} - `Get Data Cache Hit Rate` = {{ with printf "increase(response_packet_cache_hits{pod_name='%s',namespace='%s'}[1m]) / (increase(response_packet_cache_misses{pod_name='%s',namespace='%s'}[1m]) + increase(response_packet_cache_hits{pod_name='%s',namespace='%s'}[1m]))" .Labels.pod_name .Labels.namespace .Labels.pod_name .Labels.namespace .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `increase(response_packet_cache_hits{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_cache_hits{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} hits{{ end }} + `increase(response_packet_cache_misses{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_cache_misses{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} misses{{ end }} + `Get Data Cache Hit Rate` = {{ with printf "increase(response_packet_cache_hits{pod='%s',namespace='%s'}[1m]) / (increase(response_packet_cache_misses{pod='%s',namespace='%s'}[1m]) + increase(response_packet_cache_hits{pod='%s',namespace='%s'}[1m]))" .Labels.pod .Labels.namespace .Labels.pod .Labels.namespace .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} For heavy read workloads Zookeeper try to cache response for `get data` API method for save the serialization cost on popular znodes. Try to tune `maxResponseCacheSize` Look to documentation: @@ -235,12 +235,12 @@ spec: labels: severity: info annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have inefficient get data znodes response cache." description: |- - `increase(response_packet_get_children_cache_hits{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_hits{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} hits{{ end }} - `increase(response_packet_get_children_cache_misses{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_misses{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} misses{{ end }} - `Get Children Cache Hit Rate` = {{ with printf "increase(response_packet_get_children_cache_hits{pod_name='%s',namespace='%s'}[1m]) / (increase(response_packet_get_children_cache_misses{pod_name='%s',namespace='%s'}[1m]) + increase(response_packet_get_children_cache_hits{pod_name='%s',namespace='%s'}[1m]))" .Labels.pod_name .Labels.namespace .Labels.pod_name .Labels.namespace .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} + `increase(response_packet_get_children_cache_hits{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_hits{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} hits{{ end }} + `increase(response_packet_get_children_cache_misses{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_misses{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} misses{{ end }} + `Get Children Cache Hit Rate` = {{ with printf "increase(response_packet_get_children_cache_hits{pod='%s',namespace='%s'}[1m]) / (increase(response_packet_get_children_cache_misses{pod='%s',namespace='%s'}[1m]) + increase(response_packet_get_children_cache_hits{pod='%s',namespace='%s'}[1m]))" .Labels.pod .Labels.namespace .Labels.pod .Labels.namespace .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }}{{ end }} For heavy read workloads Zookeeper try to cache response for `get children` API method for save the serialization cost on popular znodes. Try to tune `maxGetChildrenResponseCacheSize` Look to documentation: @@ -252,10 +252,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper authentication failures with `ensemble` scheme." description: |- - `increase(ensemble_auth_fail{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_hits{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} failures{{ end }} + `increase(ensemble_auth_fail{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(response_packet_get_children_cache_hits{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} failures{{ end }} Look to `ensembleAuthName` in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_authOptions @@ -264,12 +264,12 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have slow fsync to disk." description: |- - `increase(fsynctime_sum{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(fsynctime_sum{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ms{{ end }} - `increase(fsynctime_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(fsynctime_count{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} count{{ end }} - `fsynctime{quantile="0.5",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "fsynctime{quantile='0.5',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ms{{ end }} + `increase(fsynctime_sum{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(fsynctime_sum{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ms{{ end }} + `increase(fsynctime_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(fsynctime_count{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} count{{ end }} + `fsynctime{quantile="0.5",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "fsynctime{quantile='0.5',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} ms{{ end }} It mean writes to Transactional Log (WAL) takes more time than expected. If a Zookkeper crashes it can replay the WAL to recover its previous state after restart. - alert: ZookeeperLargeRequestsRejected @@ -277,10 +277,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper reject some large requests." description: |- - `increase(large_requests_rejected{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(large_requests_rejected{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} + `increase(large_requests_rejected{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(large_requests_rejected{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} It mean Zookeeper avoid JVM allocate too much memory and runs out of usable heap and ultimately crashes. Look to `largeRequestMaxBytes` and `largeRequestThreshold` options in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_advancedConfiguration @@ -290,10 +290,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper dropped stale requests." description: |- - `increase(stale_requests_dropped{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(stale_requests_dropped{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} + `increase(stale_requests_dropped{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(stale_requests_dropped{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} requests{{ end }} `Stale request` is a request sent by a connection that is now closed, and/or a request that will have a request latency higher than the `sessionTimeout`. Look to `requestThrottleDropStale`, `requestStaleLatencyCheck`, `requestStaleConnectionCheck` options in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_advancedConfiguration @@ -303,10 +303,10 @@ spec: labels: severity: critical annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper have inconsistent data in memory." description: |- - `increase(digest_mismatches_count{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(digest_mismatches_count{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} mismatches{{ end }} + `increase(digest_mismatches_count{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(digest_mismatches_count{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} mismatches{{ end }} The digest feature is added to detect the data inconsistency inside ZooKeeper when loading database from disk, catching up and following leader, its doing incrementally hash check for the DataTree based on the adHash. Look to `digest.enabled` options in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_advancedConfiguration @@ -316,10 +316,10 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper close connection without session." description: |- - `increase(sessionless_connections_expired{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(sessionless_connections_expired{pod_name='%s',namespace='%s'}[1m])" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessionless connections{{ end }} + `increase(sessionless_connections_expired{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}[1m])` = {{ with printf "increase(sessionless_connections_expired{pod='%s',namespace='%s'}[1m])" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.2f" }} sessionless connections{{ end }} Look to `minSessionTimeout`,`maxSessionTimeout` options in documentation: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_advancedConfiguration @@ -328,7 +328,7 @@ spec: labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper JVM threads Deadlock occurred." description: |- JVM Thread Deadlock means a situation where two or more JVM threads are blocked forever, waiting for each other. @@ -336,32 +336,32 @@ spec: As a quick workaround - pod restart ``` - kubectl exec -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} -- kill 1 + kubectl exec -n {{ $labels.namespace }} pod/{{ $labels.pod }} -- kill 1 ``` Look to current Zookeeper pod log to investigate Deadlock reason ``` - kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} -f | grep -i -E "deadlock|exception" + kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod }} -f | grep -i -E "deadlock|exception" ``` Also look to JVM documentation about threads state: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/lang/Thread.State.html - `jvm_threads_deadlocked{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_deadlocked{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_current{pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_current{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="NEW",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='NEW',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="RUNNABLE",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='RUNNABLE',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="BLOCKED",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='BLOCKED',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="WAITING",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='WAITING',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="TIMED_WAITING",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='TIMED_WAITING',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - `jvm_threads_state{state="TERMINATED",pod_name="{{ $labels.pod_name }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='TERMINATED',pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_deadlocked{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_deadlocked{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_current{pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_current{pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="NEW",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='NEW',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="RUNNABLE",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='RUNNABLE',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="BLOCKED",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='BLOCKED',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="WAITING",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='WAITING',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="TIMED_WAITING",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='TIMED_WAITING',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} + `jvm_threads_state{state="TERMINATED",pod="{{ $labels.pod }}",namespace="{{ $labels.namespace }}"}` = {{ with printf "jvm_threads_state{state='TERMINATED',pod='%s',namespace='%s'}" .Labels.pod .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - alert: ZookeeperUnsuccessfulSSLHandshakes expr: increase(unsuccessful_handshake{app=~'zookeeper.*'}[1m]) > 0 labels: severity: warning annotations: - identifier: "{{ $labels.pod_name }}.{{ $labels.namespace }}" + identifier: "{{ $labels.pod }}.{{ $labels.namespace }}" summary: "Zookeeper Unsuccessful Handshakes occurred." description: |- Look to `ssl.*` options in documentation: @@ -369,5 +369,5 @@ spec: Look to current Zookeeper pod log to investigate unsucessfull handshake reason ``` - kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod_name }} -f | grep -i -E "tls|ssl|auth|cert|handshake" + kubectl logs -n {{ $labels.namespace }} pod/{{ $labels.pod }} -f | grep -i -E "tls|ssl|auth|cert|handshake" ``` diff --git a/deploy/prometheus/prometheus-template.yaml b/deploy/prometheus/prometheus-template.yaml index fbb8b4bfa..4fa89b722 100644 --- a/deploy/prometheus/prometheus-template.yaml +++ b/deploy/prometheus/prometheus-template.yaml @@ -51,11 +51,11 @@ stringData: - source_labels: [__meta_kubernetes_pod_name] action: replace - target_label: pod_name + target_label: pod - source_labels: [__meta_kubernetes_pod_container_name] action: replace - target_label: container_name + target_label: container - action: labelmap regex: __meta_kubernetes_pod_label_(.+) @@ -110,11 +110,11 @@ stringData: - source_labels: [__meta_kubernetes_pod_name] action: replace - target_label: pod_name + target_label: pod - source_labels: [__meta_kubernetes_pod_container_name] action: replace - target_label: container_name + target_label: container - action: labelmap regex: __meta_kubernetes_pod_label_(.+) @@ -163,11 +163,11 @@ stringData: - source_labels: [__meta_kubernetes_pod_name] action: replace - target_label: pod_name + target_label: pod - source_labels: [__meta_kubernetes_pod_container_name] action: replace - target_label: container_name + target_label: container - action: labelmap regex: __meta_kubernetes_pod_label_(.+) From c45a4661078531b0abff18edb37644c352d39a9c Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Thu, 27 Nov 2025 17:57:20 +0100 Subject: [PATCH 005/233] make regex more relaxed --- grafana-dashboard/ClickHouseKeeper_dashboard.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/grafana-dashboard/ClickHouseKeeper_dashboard.json b/grafana-dashboard/ClickHouseKeeper_dashboard.json index 832e1f052..e2f1cb363 100644 --- a/grafana-dashboard/ClickHouseKeeper_dashboard.json +++ b/grafana-dashboard/ClickHouseKeeper_dashboard.json @@ -990,14 +990,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\"},namespace)", + "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-.*\"},namespace)", "includeAll": true, "multi": true, "name": "namespace", "options": [], "query": { "qryType": 1, - "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\"},namespace)", + "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-.*\"},namespace)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, @@ -1011,14 +1011,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\",namespace=~\"$namespace\"},pod)", + "definition": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-.*\",namespace=~\"$namespace\"},pod)", "includeAll": true, "multi": true, "name": "pod", "options": [], "query": { "qryType": 1, - "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-clickhouse-keeper.*\",namespace=~\"$namespace\"},pod)", + "query": "label_values({__name__=~\"ClickHouse.*Keeper.*\",pod=~\"chk-.*\",namespace=~\"$namespace\"},pod)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, From bf9eac101bd96294cae71f0bff160b683c203f35 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 12 Dec 2025 02:10:31 +0500 Subject: [PATCH 006/233] env: operatorhub manifests --- ...perator.v0.25.6.clusterserviceversion.yaml | 1660 +++++++++++++++++ ...allations.clickhouse.altinity.com.crd.yaml | 1453 +++++++++++++++ ...templates.clickhouse.altinity.com.crd.yaml | 1453 +++++++++++++++ ...ns.clickhouse-keeper.altinity.com.crd.yaml | 883 +++++++++ ...gurations.clickhouse.altinity.com.crd.yaml | 539 ++++++ 5 files changed, 5988 insertions(+) create mode 100644 deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml create mode 100644 deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml diff --git a/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml b/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml new file mode 100644 index 000000000..26c6644f6 --- /dev/null +++ b/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml @@ -0,0 +1,1660 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + name: clickhouse-operator.v0.25.6 + namespace: placeholder + annotations: + capabilities: Full Lifecycle + categories: Database + containerImage: docker.io/altinity/clickhouse-operator:0.25.6 + createdAt: '2025-12-12T02:07:15Z' + support: Altinity Ltd. https://altinity.com + description: The Altinity® Kubernetes Operator for ClickHouse® manages the full lifecycle of ClickHouse clusters. + repository: https://github.com/altinity/clickhouse-operator + certified: 'false' + alm-examples: | + [ + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseInstallation", + "metadata": { + "name": "simple-01" + }, + "spec": { + "configuration": { + "users": { + "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01", + "test_user/password": "test_password", + "test_user/networks/ip": [ + "0.0.0.0/0" + ] + }, + "clusters": [ + { + "name": "simple" + } + ] + } + } + }, + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseInstallation", + "metadata": { + "name": "use-templates-all", + "labels": { + "target-chi-label-manual": "target-chi-label-manual-value", + "target-chi-label-auto": "target-chi-label-auto-value" + } + }, + "spec": { + "useTemplates": [ + { + "name": "chit-01" + }, + { + "name": "chit-02" + } + ], + "configuration": { + "clusters": [ + { + "name": "c1" + } + ] + } + } + }, + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseOperatorConfiguration", + "metadata": { + "name": "chop-config-01" + }, + "spec": { + "watch": { + "namespaces": { + "include": [], + "exclude": [] + } + }, + "clickhouse": { + "configuration": { + "file": { + "path": { + "common": "config.d", + "host": "conf.d", + "user": "users.d" + } + }, + "user": { + "default": { + "profile": "default", + "quota": "default", + "networksIP": [ + "::1", + "127.0.0.1" + ], + "password": "default" + } + }, + "network": { + "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$" + } + }, + "access": { + "username": "clickhouse_operator", + "password": "clickhouse_operator_password", + "secret": { + "namespace": "", + "name": "" + }, + "port": 8123 + } + }, + "template": { + "chi": { + "path": "templates.d" + } + }, + "reconcile": { + "runtime": { + "reconcileCHIsThreadsNumber": 10, + "reconcileShardsThreadsNumber": 5, + "reconcileShardsMaxConcurrencyPercent": 50 + }, + "statefulSet": { + "create": { + "onFailure": "ignore" + }, + "update": { + "timeout": 300, + "pollInterval": 5, + "onFailure": "abort" + } + }, + "host": { + "wait": { + "exclude": true, + "queries": true, + "include": false, + "replicas": { + "all": "no", + "new": "yes", + "delay": 10 + }, + "probes": { + "startup": "no", + "readiness": "yes" + } + } + } + }, + "annotation": { + "include": [], + "exclude": [] + }, + "label": { + "include": [], + "exclude": [], + "appendScope": "no" + }, + "statefulSet": { + "revisionHistoryLimit": 0 + }, + "pod": { + "terminationGracePeriod": 30 + }, + "logger": { + "logtostderr": "true", + "alsologtostderr": "false", + "v": "1", + "stderrthreshold": "", + "vmodule": "", + "log_backtrace_at": "" + } + } + } + ] +spec: + version: 0.25.6 + minKubeVersion: 1.12.6 + maturity: alpha + replaces: clickhouse-operator.v0.25.5 + maintainers: + - email: support@altinity.com + name: Altinity + provider: + name: Altinity + displayName: Altinity® Kubernetes Operator for ClickHouse® + keywords: + - "clickhouse" + - "database" + - "oltp" + - "timeseries" + - "time series" + - "altinity" + customresourcedefinitions: + owned: + - description: ClickHouse Installation - set of ClickHouse Clusters + displayName: ClickHouseInstallation + group: clickhouse.altinity.com + kind: ClickHouseInstallation + name: clickhouseinstallations.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Installation Template - template for ClickHouse Installation + displayName: ClickHouseInstallationTemplate + group: clickhouse.altinity.com + kind: ClickHouseInstallationTemplate + name: clickhouseinstallationtemplates.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Operator Configuration - configuration of ClickHouse operator + displayName: ClickHouseOperatorConfiguration + group: clickhouse.altinity.com + kind: ClickHouseOperatorConfiguration + name: clickhouseoperatorconfigurations.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance + displayName: ClickHouseKeeperInstallation + group: clickhouse-keeper.altinity.com + kind: ClickHouseKeeperInstallation + name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + description: |- + ## ClickHouse + [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports. + Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details. + ## The Altinity Operator for ClickHouse + The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment. + Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples. + links: + - name: Altinity + url: https://altinity.com/ + - name: Operator homepage + url: https://www.altinity.com/kubernetes-operator + - name: Github + url: https://github.com/altinity/clickhouse-operator + - name: Documentation + url: https://github.com/Altinity/clickhouse-operator/tree/master/docs + icon: + - mediatype: image/png + base64data: |- + iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs + vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ + BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf + 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW + 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh + jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye + x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m + zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2 + cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB + fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f + sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72 + 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K + 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw + mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt + fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO + HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O + N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM + vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr + D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7 + 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w + ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m + 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI + zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7 + b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs + 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6 + dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj + 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM + jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72 + ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv + 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY + nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7 + b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH + IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq + z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8 + bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0 + wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8 + FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y + vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0 + HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x + wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj + Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F + t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6 + e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD + Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H + fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk + j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw + hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ + cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z + +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+ + TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8 + b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab + Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y + ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m + sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+ + VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw + vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex + 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox + zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+ + XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0 + c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP + WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955 + wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72 + zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD + XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax + WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r + brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx + dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V + bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi + /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq + mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe + 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL + e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT + H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+ + pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89 + 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H + Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf + k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c + jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/ + 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK + bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP + VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y + UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT + ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM + 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B + u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+ + XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A + lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d + P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW + ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx + uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT + b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE + HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj + ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8 + FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K + l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8 + M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT + 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh + xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe + +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr + vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr + 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx + AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw + 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte + fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y + UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N + b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE + thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2 + SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri + XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST + HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m + nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4 + E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc + yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN + hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2 + EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ + ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35 + W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ + D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL + 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5 + VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4 + oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy + 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k + Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX + /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln + U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A + KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP + fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh + r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH + hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U + /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+ + Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD + yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/ + tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo + n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K + dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5 + lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee + IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n + Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS + X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe + qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz + MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u + CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP + sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr + ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI + feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8 + WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv + ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz + flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo + zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0 + UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj + rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ + eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ + EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz + mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+ + Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t + ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u + fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL + TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn + YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn + K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh + olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M + JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z + yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf + mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m + Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n + Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ + z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP + vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8 + YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD + 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z + l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl + UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf + +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii + 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO + xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+ + ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0 + Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L + OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926 + wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE + qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/ + r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI + 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+ + nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx + LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU + Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe + q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR + eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W + NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw + YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc + 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx + yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l + vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I + uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP + 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF + RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG + 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo + 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ + OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+ + r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch + brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553 + aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG + R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq + FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq + yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+ + RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV + xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0 + w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX + v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw + mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7 + oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K + j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB + rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM + N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT + I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP + /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+ + R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81 + r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf + WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b + /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1 + F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm + +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T + pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf + N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb + PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1 + LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8 + 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX + TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U + YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE + ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2 + eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo + eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK + Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t + VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf + 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv + ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc + PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z + hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV + PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE + kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc + Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf + bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN + E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My + 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm + XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM + 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD + pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8 + 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ + nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4 + epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj + GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA + Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H + llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5 + eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz + YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg + Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X + M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16 + GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198 + uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8 + 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV + aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg + Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN + ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W + 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P + Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx + LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C + VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99 + 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk + bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt + NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h + GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8 + soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA + kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc + Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s + yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7 + uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u + 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh + lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp + 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34 + R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q + 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g + B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs + Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/ + qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+ + LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31 + tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO + wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi + 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5 + 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ + 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl + bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c + Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb + 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e + qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx + WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2 + 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC + xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i + 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM + cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7 + 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m + c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef + mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy + 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc + 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39 + W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r + NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv + rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD + KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN + Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx + 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG + 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT + 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9 + m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+ + inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB + 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c + r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf + g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG + HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t + LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq + mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX + b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO + P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48 + xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde + dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx + U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+ + 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ + Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR + IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6 + On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ + ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP + 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El + fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk + FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55 + bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z + rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW + 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq + CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv + 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw + cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL + T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R + b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer + e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+ + k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu + CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT + DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv + /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX + n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF + /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011 + F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX + fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8 + gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD + TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0 + 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+ + 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH + /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn + 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt + PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6 + EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k + z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9 + UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg + 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL + fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny + 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo + XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J + /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm + 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM + dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw + HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW + tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk + EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai + c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3 + 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi + kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx + xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc + gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U + eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc + Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht + 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b + tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx + 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x + tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/ + 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH + Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2 + Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8 + DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9 + PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv + Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0 + 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i + eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU + PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS + HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q + ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik + l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4 + HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd + E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z + LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY + KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha + muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6 + od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi + QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4 + xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3 + zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8 + J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN + DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W + PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK + po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO + oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O + CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2 + jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw + HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t + v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1 + AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY + jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE + iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3 + 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355 + w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI + fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI + b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV + bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY + Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy + yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ + bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf + 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok + wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk + R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej + Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n + +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8 + F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k + 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/ + Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl + n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J + n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT + E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1 + jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ + /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP + jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5 + t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y + 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R + uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu + WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l + n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151 + F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW + m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw + aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on + /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E + m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO + TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm + l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL + P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/ + v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3 + GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh + MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr + sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr + fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ + 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O + FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf + 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh + 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1 + bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ + bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G + ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse + D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV + Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX + dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6 + GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y + wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm + RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs + /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5 + ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC + EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup + 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44 + /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf + yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z + c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX + eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q + iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m + fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/ + 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP + sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd + iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk + PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS + Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH + 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5 + sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi + h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC + 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x + FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK + pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf + 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6 + i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu + v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/ + 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5 + iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao + 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96 + EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht + I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn + JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub + jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn + Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena + NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r + 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd + QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4 + cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv + 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2 + P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF + lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2 + mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba + D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG + Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL + qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8 + MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH + FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51 + q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K + 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj + 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi + 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF + 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF + Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x + SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc + pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO + OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL + 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW + 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe + tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb + +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM + NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv + f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od + yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz + 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam + PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC + FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW + 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88 + LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue + cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd + BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9 + eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J + z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH + CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/ + IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p + n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs + X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+ + yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT + Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN + hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr + RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k + 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s + VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu + nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4 + nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv + Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu + 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y + d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH + Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3 + /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n + E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf + 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m + 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim + 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue + tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc + jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m + nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h + Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N + v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf + hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe + PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn + T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw + O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v + Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk + AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g + 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7 + Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq + JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9 + V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+ + meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9 + TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S + hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL + 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S + W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7 + 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv + P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G + 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY + P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW + Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt + q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j + 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh + 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT + B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS + l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt + VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH + 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR + qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5 + Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy + 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p + rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q + jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n + /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo + 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa + LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb + F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16 + WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+ + 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F + RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe + +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W + qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55 + hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu + I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha + xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL + fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK + zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4 + 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ + pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc + 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+ + znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of + u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf + QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9 + a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs + fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn + hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn + 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo + liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7 + sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV + 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3 + A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu + +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt + jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk + arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT + k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf + /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+ + VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4 + 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY + Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv + v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl + 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA + 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el + sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq + FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa + Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E + VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w + /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2 + 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr + 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW + feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd + 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe + M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj + SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8 + Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y + n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf + ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An + 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If + /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5 + mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj + A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W + wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5 + z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo + 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj + t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP + bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S + bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT + RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F + X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6 + sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl + Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj + D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn + nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36 + WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15 + 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf + ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4 + 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1 + I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb + IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2 + G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U + UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP + n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv + 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve + iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M + lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U + M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf + RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6 + rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe + la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu + dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ + d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v + Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt + nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx + 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv + +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2 + 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U + vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X + fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n + 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG + PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga + STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7 + 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG + f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj + 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ + LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u + 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0 + /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334 + 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4 + fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2 + L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl + LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4 + yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq + 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA + n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7 + 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd + 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2 + Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK + FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4 + avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D + /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt + TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn + vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6 + 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR + kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd + 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3 + IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt + zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84 + g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1 + jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r + lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW + HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB + HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG + HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/ + EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k + SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj + SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37 + H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0 + fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2 + Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1 + 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh + vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO + iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5 + 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl + rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3 + 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x + savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV + HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s + u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ + N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi + Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/ + ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/ + 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5 + j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s + rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan + H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4 + Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd + np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ + +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+ + bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6 + xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc + QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT + P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR + BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU + sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9 + 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++ + 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7 + /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh + fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz + 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf + ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58 + zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ + C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i + Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU + i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1 + xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR + l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8 + Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+ + A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj + 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab + dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x + 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN + WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59 + vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2 + ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf + OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94 + 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg + Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi + sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw + mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+ + wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+ + qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX + OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI + lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH + +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai + jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L + jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i + 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1 + T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n + lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c + 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB + +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/ + /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0 + w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr + oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT + F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq + Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR + qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el + DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm + /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE + a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/ + 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7 + 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg + h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar + /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33 + k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx + 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3 + SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y + m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP + songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm + 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8 + ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm + 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg + 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6 + fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ + /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe + icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV + jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe + hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8 + SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe + A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16 + WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN + up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg + ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP + cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze + pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/ + HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3 + 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78 + aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd + AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy + 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M + eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe + WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF + o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7 + XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M + O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m + I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO + +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X + jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M + paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A + 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv + 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z + +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q + mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN + EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF + FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1 + Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f + Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT + nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s + eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex + rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS + 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU + l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj + LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa + ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/ + 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7 + 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf + +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU + gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w + phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ + fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8 + qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv + polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY + MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231 + o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2 + TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x + DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/ + v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7 + vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l + Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM + VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn + 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs + vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5 + rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ + 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P + nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8 + HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD + /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3 + a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE + wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf + eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU + t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw + pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj + tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h + PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95 + GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R + 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0 + JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s + 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA + Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T + VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE + +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy + dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P + WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt + I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv + X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F + W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv + z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN + 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz + vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4 + BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs + 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+ + IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc + ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3 + ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON + vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x + cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r + WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v + xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/ + 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+ + opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H + 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH + ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK + F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6 + flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj + gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6 + dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9 + CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO + oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U + U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72 + RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF + O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc + 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX + v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL + CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc + t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA + gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL + wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX + 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz + 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7 + jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75 + lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf + m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx + tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59 + X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL + 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk + lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ + +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q + ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3 + 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb + mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU + jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH + 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm + wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx + DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh + xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx + 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh + vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw + uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+ + dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM + 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5 + JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0 + bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu + HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW + +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G + uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi + ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk + koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6 + Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+ + ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt + Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6 + xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4 + HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt + Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj + Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7 + zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4 + aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1 + AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X + v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha + N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy + n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn + DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct + dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr + Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco + GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG + qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH + wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS + VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP + Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq + DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh + 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd + 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b + j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j + i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X + x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7 + C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw + v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je + wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q + BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628 + 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe + Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b + FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv + l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv + cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh + edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j + VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O + /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql + CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d + tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk + SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD + k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2 + ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3 + UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql + 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N + 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E + d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC + q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+ + E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec + eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff + Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr + l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP + jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4 + nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v + E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu + V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l + f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC + 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z + 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T + 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/ + 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs + T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg + LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36 + NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK + zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl + tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg + 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI + wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47 + Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv + t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W + PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu + Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj + r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL + k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx + XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv + xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB + HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P + Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2 + mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m + kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc + WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue + CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn + 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh + 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/ + CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1 + MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd + 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1 + ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg== + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: true + type: MultiNamespace + - supported: true + type: AllNamespaces + install: + strategy: deployment + spec: + deployments: + - name: clickhouse-operator + spec: + replicas: 1 + selector: + matchLabels: + app: clickhouse-operator + template: + metadata: + labels: + app: clickhouse-operator + spec: + containers: + - env: + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: docker.io/altinity/clickhouse-operator:0.25.6 + imagePullPolicy: Always + name: clickhouse-operator + - image: docker.io/altinity/metrics-exporter:0.25.6 + imagePullPolicy: Always + name: metrics-exporter + serviceAccountName: clickhouse-operator + permissions: + - serviceAccountName: clickhouse-operator + rules: + # + # Core API group + # + - apiGroups: + - "" + resources: + - configmaps + - services + - persistentvolumeclaims + - secrets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch + - delete + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + # + # apps.* resources + # + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + - apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - patch + - update + - delete + # The operator deployment personally, identified by name + - apiGroups: + - apps + resources: + - deployments + resourceNames: + - clickhouse-operator + verbs: + - get + - patch + - update + - delete + # + # policy.* resources + # + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + # + # discovery.* resources + # + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + # + # apiextensions + # + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + # clickhouse - related resources + - apiGroups: + - clickhouse.altinity.com + # + # The operator's specific Custom Resources + # + + resources: + - clickhouseinstallations + verbs: + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallationtemplates + - clickhouseoperatorconfigurations + verbs: + - get + - list + - watch + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/finalizers + - clickhouseinstallationtemplates/finalizers + - clickhouseoperatorconfigurations/finalizers + verbs: + - update + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/status + - clickhouseinstallationtemplates/status + - clickhouseoperatorconfigurations/status + verbs: + - get + - update + - patch + - create + - delete + # clickhouse-keeper - related resources + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations + verbs: + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations/finalizers + verbs: + - update + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations/status + verbs: + - get + - update + - patch + - create + - delete diff --git a/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..1a42a88be --- /dev/null +++ b/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,1453 @@ +# Template Parameters: +# +# KIND=ClickHouseInstallation +# SINGULAR=clickhouseinstallation +# PLURAL=clickhouseinstallations +# SHORT=chi +# OPERATOR_VERSION=0.25.6 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseinstallations.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.25.6 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseInstallation + singular: clickhouseinstallation + plural: clickhouseinstallations + shortNames: + - chi + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + nullable: true + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + nullable: true + x-kubernetes-preserve-unknown-fields: true + actionPlan: + type: object + description: "Action Plan" + nullable: true + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse clusters defined in a CHI. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + restart: + type: string + description: | + In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile. + This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts. + enum: + - "" + - "RollingUpdate" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Installation. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + troubleshoot: + !!merge <<: *TypeStringBool + description: | + Allows to troubleshoot Pods during CrashLoopBack state. + This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start. + Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts + and give time to troubleshoot via CLI. + Liveness and Readiness probes are disabled as well. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + templating: + type: object + # nullable: true + description: | + Optional, applicable inside ClickHouseInstallationTemplate only. + Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)." + properties: + policy: + type: string + description: | + When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate + will be auto-added into ClickHouseInstallation, selectable by `chiSelector`. + Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly. + enum: + - "" + - "auto" + - "manual" + chiSelector: + type: object + description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + reconciling: &TypeReconcile + type: object + description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + macros: + type: object + description: "macros parameters" + properties: + sections: + type: object + description: "sections behaviour for macros" + properties: + users: + type: object + description: "sections behaviour for macros on users" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + profiles: + type: object + description: "sections behaviour for macros on profiles" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + quotas: + type: object + description: "sections behaviour for macros on quotas" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + settings: + type: object + description: "sections behaviour for macros on settings" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + files: + type: object + description: "sections behaviour for macros on files" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + runtime: &TypeReconcileRuntime + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + host: &TypeReconcileHost + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: + !!merge <<: *TypeStringBool + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for ready probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + reconcile: + !!merge <<: *TypeReconcile + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + zookeeper: &TypeZookeeperConfig + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ + currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` + More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + secure: + !!merge <<: *TypeStringBool + description: "if a secure connection to Zookeeper is required" + availabilityZone: + type: string + description: "availability zone for Zookeeper node" + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + use_compression: + !!merge <<: *TypeStringBool + description: "Enables compression in Keeper protocol if set to true" + users: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure password hashed, authorization restrictions, database level security row filters etc. + More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write directly into XML tag during render *-usersd ConfigMap + + any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write into environment variable and write to XML tag via from_env=XXX + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of settings profile + More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles + # nullable: true + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of resource quotas + More details: https://clickhouse.tech/docs/en/operations/quotas/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas + # nullable: true + x-kubernetes-preserve-unknown-fields: true + settings: &TypeSettings + type: object + description: | + allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + every key in this object is the file name + every value in this object is the file content + you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html + each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored + More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml + + any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets + secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/ + and will automatically update when update secret + it useful for pass SSL certificates from cert-manager or similar tool + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level, shard-level and replica-level + every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` + all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` + Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zookeeper: + !!merge <<: *TypeZookeeperConfig + description: | + optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.zookeeper` settings + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + schemaPolicy: + type: object + description: | + describes how schema is propagated within replicas and shards + properties: + replica: + type: string + description: "how schema is propagated within a replica" + enum: + # List SchemaPolicyReplicaXXX constants from model + - "" + - "None" + - "All" + shard: + type: string + description: "how schema is propagated between shards" + enum: + # List SchemaPolicyShardXXX constants from model + - "" + - "None" + - "All" + - "DistributedTablesOnly" + insecure: + !!merge <<: *TypeStringBool + description: optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: optional, open secure ports for cluster + secret: + type: object + description: "optional, shared secret value to secure cluster communications" + properties: + auto: + !!merge <<: *TypeStringBool + description: "Auto-generate shared secret value to secure cluster communications" + value: + description: "Cluster shared secret value in plain text" + type: string + valueFrom: + description: "Cluster shared secret source" + type: object + properties: + secretKeyRef: + description: | + Selects a key of a secret in the clickhouse installation namespace. + Should not be used if value is not empty. + type: object + properties: + name: + description: | + Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - name + - key + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + !!merge <<: *TypeReconcileRuntime + host: + !!merge <<: *TypeReconcileHost + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + shardsCount: + type: integer + description: | + how much shards for current ClickHouse cluster will run in Kubernetes, + each shard contains shared-nothing part of data and contains set of replicas, + cluster contains 1 shard by default" + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + shards: + type: array + description: | + optional, allows override top-level `chi.spec.configuration`, cluster-level + `chi.spec.configuration.clusters` settings for each shard separately, + use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + definitionType: + type: string + description: "DEPRECATED - to be removed soon" + weight: + type: integer + description: | + optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + internalReplication: + !!merge <<: *TypeStringBool + description: | + optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise + allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard + override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` + replicasCount: + type: integer + description: | + optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + shard contains 1 replica by default + override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` + minimum: 1 + replicas: + type: array + description: | + optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` + More info: https://clickhouse.tech/docs/en/interfaces/tcp/ + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` + More info: https://clickhouse.tech/docs/en/interfaces/http/ + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + useTemplates: + type: array + description: | + list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI` + manifest during render Kubernetes resources to create related ClickHouse clusters" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "name of `ClickHouseInstallationTemplate` (chit) resource" + namespace: + type: string + description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`" + useType: + type: string + description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" + enum: + # List useTypeXXX constants from model + - "" + - "merge" diff --git a/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..0779a3051 --- /dev/null +++ b/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,1453 @@ +# Template Parameters: +# +# KIND=ClickHouseInstallationTemplate +# SINGULAR=clickhouseinstallationtemplate +# PLURAL=clickhouseinstallationtemplates +# SHORT=chit +# OPERATOR_VERSION=0.25.6 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseinstallationtemplates.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.25.6 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseInstallationTemplate + singular: clickhouseinstallationtemplate + plural: clickhouseinstallationtemplates + shortNames: + - chit + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + nullable: true + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + nullable: true + x-kubernetes-preserve-unknown-fields: true + actionPlan: + type: object + description: "Action Plan" + nullable: true + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse clusters defined in a CHI. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + restart: + type: string + description: | + In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile. + This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts. + enum: + - "" + - "RollingUpdate" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Installation. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + troubleshoot: + !!merge <<: *TypeStringBool + description: | + Allows to troubleshoot Pods during CrashLoopBack state. + This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start. + Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts + and give time to troubleshoot via CLI. + Liveness and Readiness probes are disabled as well. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + templating: + type: object + # nullable: true + description: | + Optional, applicable inside ClickHouseInstallationTemplate only. + Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)." + properties: + policy: + type: string + description: | + When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate + will be auto-added into ClickHouseInstallation, selectable by `chiSelector`. + Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly. + enum: + - "" + - "auto" + - "manual" + chiSelector: + type: object + description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + reconciling: &TypeReconcile + type: object + description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + macros: + type: object + description: "macros parameters" + properties: + sections: + type: object + description: "sections behaviour for macros" + properties: + users: + type: object + description: "sections behaviour for macros on users" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + profiles: + type: object + description: "sections behaviour for macros on profiles" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + quotas: + type: object + description: "sections behaviour for macros on quotas" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + settings: + type: object + description: "sections behaviour for macros on settings" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + files: + type: object + description: "sections behaviour for macros on files" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + runtime: &TypeReconcileRuntime + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + host: &TypeReconcileHost + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: + !!merge <<: *TypeStringBool + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for ready probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + reconcile: + !!merge <<: *TypeReconcile + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + zookeeper: &TypeZookeeperConfig + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ + currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` + More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + secure: + !!merge <<: *TypeStringBool + description: "if a secure connection to Zookeeper is required" + availabilityZone: + type: string + description: "availability zone for Zookeeper node" + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + use_compression: + !!merge <<: *TypeStringBool + description: "Enables compression in Keeper protocol if set to true" + users: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure password hashed, authorization restrictions, database level security row filters etc. + More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write directly into XML tag during render *-usersd ConfigMap + + any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write into environment variable and write to XML tag via from_env=XXX + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of settings profile + More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles + # nullable: true + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of resource quotas + More details: https://clickhouse.tech/docs/en/operations/quotas/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas + # nullable: true + x-kubernetes-preserve-unknown-fields: true + settings: &TypeSettings + type: object + description: | + allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + every key in this object is the file name + every value in this object is the file content + you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html + each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored + More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml + + any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets + secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/ + and will automatically update when update secret + it useful for pass SSL certificates from cert-manager or similar tool + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level, shard-level and replica-level + every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` + all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` + Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zookeeper: + !!merge <<: *TypeZookeeperConfig + description: | + optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.zookeeper` settings + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + schemaPolicy: + type: object + description: | + describes how schema is propagated within replicas and shards + properties: + replica: + type: string + description: "how schema is propagated within a replica" + enum: + # List SchemaPolicyReplicaXXX constants from model + - "" + - "None" + - "All" + shard: + type: string + description: "how schema is propagated between shards" + enum: + # List SchemaPolicyShardXXX constants from model + - "" + - "None" + - "All" + - "DistributedTablesOnly" + insecure: + !!merge <<: *TypeStringBool + description: optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: optional, open secure ports for cluster + secret: + type: object + description: "optional, shared secret value to secure cluster communications" + properties: + auto: + !!merge <<: *TypeStringBool + description: "Auto-generate shared secret value to secure cluster communications" + value: + description: "Cluster shared secret value in plain text" + type: string + valueFrom: + description: "Cluster shared secret source" + type: object + properties: + secretKeyRef: + description: | + Selects a key of a secret in the clickhouse installation namespace. + Should not be used if value is not empty. + type: object + properties: + name: + description: | + Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - name + - key + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + !!merge <<: *TypeReconcileRuntime + host: + !!merge <<: *TypeReconcileHost + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + shardsCount: + type: integer + description: | + how much shards for current ClickHouse cluster will run in Kubernetes, + each shard contains shared-nothing part of data and contains set of replicas, + cluster contains 1 shard by default" + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + shards: + type: array + description: | + optional, allows override top-level `chi.spec.configuration`, cluster-level + `chi.spec.configuration.clusters` settings for each shard separately, + use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + definitionType: + type: string + description: "DEPRECATED - to be removed soon" + weight: + type: integer + description: | + optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + internalReplication: + !!merge <<: *TypeStringBool + description: | + optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise + allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard + override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` + replicasCount: + type: integer + description: | + optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + shard contains 1 replica by default + override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` + minimum: 1 + replicas: + type: array + description: | + optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` + More info: https://clickhouse.tech/docs/en/interfaces/tcp/ + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` + More info: https://clickhouse.tech/docs/en/interfaces/http/ + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + useTemplates: + type: array + description: | + list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI` + manifest during render Kubernetes resources to create related ClickHouse clusters" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "name of `ClickHouseInstallationTemplate` (chit) resource" + namespace: + type: string + description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`" + useType: + type: string + description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" + enum: + # List useTypeXXX constants from model + - "" + - "merge" diff --git a/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml new file mode 100644 index 000000000..c2604dee9 --- /dev/null +++ b/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml @@ -0,0 +1,883 @@ +# Template Parameters: +# +# OPERATOR_VERSION=0.25.6 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com + labels: + clickhouse-keeper.altinity.com/chop: 0.25.6 +spec: + group: clickhouse-keeper.altinity.com + scope: Namespaced + names: + kind: ClickHouseKeeperInstallation + singular: clickhousekeeperinstallation + plural: clickhousekeeperinstallations + shortNames: + - chk + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: hosts-unchanged + type: integer + description: Unchanged hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUnchanged + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: hosts-delete + type: integer + description: Hosts to be deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDelete + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse clusters defined in a CHI. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Keeper. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + reconciling: + type: object + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + settings: &TypeSettings + type: object + description: | + allows configure multiple aspects and behavior for `clickhouse-keeper` instance + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level and replica-level + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zkPort: + type: integer + minimum: 1 + maximum: 65535 + raftPort: + type: integer + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zkPort: + type: integer + minimum: 1 + maximum: 65535 + raftPort: + type: integer + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true diff --git a/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..19434d6d5 --- /dev/null +++ b/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,539 @@ +# Template Parameters: +# +# NONE +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseoperatorconfigurations.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.25.6 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseOperatorConfiguration + singular: clickhouseoperatorconfiguration + plural: clickhouseoperatorconfigurations + shortNames: + - chopconf + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: namespaces + type: string + description: Watch namespaces + jsonPath: .status + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md" + x-kubernetes-preserve-unknown-fields: true + properties: + status: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + Allows to define settings of the clickhouse-operator. + More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml + Check into etc-clickhouse-operator* ConfigMaps if you need more control + x-kubernetes-preserve-unknown-fields: true + properties: + watch: + type: object + description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment" + properties: + namespaces: + type: object + description: "List of namespaces where clickhouse-operator watches for events." + x-kubernetes-preserve-unknown-fields: true + clickhouse: + type: object + description: "Clickhouse related parameters used by clickhouse-operator" + properties: + configuration: + type: object + properties: + file: + type: object + properties: + path: + type: object + description: | + Each 'path' can be either absolute or relative. + In case path is absolute - it is used as is. + In case path is relative - it is relative to the folder where configuration file you are reading right now is located. + properties: + common: + type: string + description: | + Path to the folder where ClickHouse configuration files common for all instances within a CHI are located. + Default value - config.d + host: + type: string + description: | + Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located. + Default value - conf.d + user: + type: string + description: | + Path to the folder where ClickHouse configuration files with users settings are located. + Files are common for all instances within a CHI. + Default value - users.d + user: + type: object + description: "Default parameters for any user which will create" + properties: + default: + type: object + properties: + profile: + type: string + description: "ClickHouse server configuration `...` for any " + quota: + type: string + description: "ClickHouse server configuration `...` for any " + networksIP: + type: array + description: "ClickHouse server configuration `...` for any " + items: + type: string + password: + type: string + description: "ClickHouse server configuration `...` for any " + network: + type: object + description: "Default network parameters for any user which will create" + properties: + hostRegexpTemplate: + type: string + description: "ClickHouse server configuration `...` for any " + configurationRestartPolicy: + type: object + description: "Configuration restart policy describes what configuration changes require ClickHouse restart" + properties: + rules: + type: array + description: "Array of set of rules per specified ClickHouse versions" + items: + type: object + properties: + version: + type: string + description: "ClickHouse version expression" + rules: + type: array + description: "Set of configuration rules for specified ClickHouse version" + items: + type: object + description: "setting: value pairs for configuration restart policy" + x-kubernetes-preserve-unknown-fields: true + access: + type: object + description: "parameters which use for connect to clickhouse from clickhouse-operator deployment" + properties: + scheme: + type: string + description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto" + username: + type: string + description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName" + password: + type: string + description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName" + rootCA: + type: string + description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse" + secret: + type: object + properties: + namespace: + type: string + description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances" + name: + type: string + description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances" + port: + type: integer + minimum: 1 + maximum: 65535 + description: "Port to be used by operator to connect to ClickHouse instances" + timeouts: + type: object + description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds" + properties: + connect: + type: integer + minimum: 1 + maximum: 10 + description: "Timout to setup connection from the operator to ClickHouse instances. In seconds." + query: + type: integer + minimum: 1 + maximum: 600 + description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds." + addons: + type: object + description: "Configuration addons specifies additional settings" + properties: + rules: + type: array + description: "Array of set of rules per specified ClickHouse versions" + items: + type: object + properties: + version: + type: string + description: "ClickHouse version expression" + spec: + type: object + description: "spec" + properties: + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + properties: + users: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + settings: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + metrics: + type: object + description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator" + properties: + timeouts: + type: object + description: | + Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances + Specified in seconds. + properties: + collect: + type: integer + minimum: 1 + maximum: 600 + description: | + Timeout used to limit metrics collection request. In seconds. + Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. + All collected metrics are returned. + template: + type: object + description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" + properties: + chi: + type: object + properties: + policy: + type: string + description: | + CHI template updates handling policy + Possible policy values: + - ReadOnStart. Accept CHIT updates on the operators start only. + - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI + enum: + - "" + - "ReadOnStart" + - "ApplyOnNextReconcile" + path: + type: string + description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located." + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileCHIsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default" + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + Possible options: + 1. abort - do nothing, just break the process and wait for admin. + 2. delete - delete newly created problematic StatefulSet. + 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet. + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for created/updated StatefulSet to be Ready" + pollInterval: + type: integer + description: "How many seconds to wait between checks for created/updated StatefulSet status" + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + Possible options: + 1. abort - do nothing, just break the process and wait for admin. + 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. + 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + host: + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: &TypeStringBool + type: string + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster" + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for readiness probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + annotation: + type: object + description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources" + properties: + include: + type: array + description: | + When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`, + include annotations with names from the following list + items: + type: string + exclude: + type: array + description: | + When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`, + exclude annotations with names from the following list + items: + type: string + label: + type: object + description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources" + properties: + include: + type: array + description: | + When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, + include labels from the following list + items: + type: string + exclude: + type: array + items: + type: string + description: | + When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, + exclude labels from the following list + appendScope: + !!merge <<: *TypeStringBool + description: | + Whether to append *Scope* labels to StatefulSet and Pod + - "LabelShardScopeIndex" + - "LabelReplicaScopeIndex" + - "LabelCHIScopeIndex" + - "LabelCHIScopeCycleSize" + - "LabelCHIScopeCycleIndex" + - "LabelCHIScopeCycleOffset" + - "LabelClusterScopeIndex" + - "LabelClusterScopeCycleSize" + - "LabelClusterScopeCycleIndex" + - "LabelClusterScopeCycleOffset" + metrics: + type: object + description: "defines metrics exporter options" + properties: + labels: + type: object + description: "defines metric labels options" + properties: + exclude: + type: array + description: | + When adding labels to a metric exclude labels with names from the following list + items: + type: string + status: + type: object + description: "defines status options" + properties: + fields: + type: object + description: "defines status fields options" + properties: + action: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'action'" + actions: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'actions'" + error: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'error'" + errors: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'errors'" + statefulSet: + type: object + description: "define StatefulSet-specific parameters" + properties: + revisionHistoryLimit: + type: integer + description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n" + pod: + type: object + description: "define pod specific parameters" + properties: + terminationGracePeriod: + type: integer + description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n" + logger: + type: object + description: "allow setup clickhouse-operator logger behavior" + properties: + logtostderr: + type: string + description: "boolean, allows logs to stderr" + alsologtostderr: + type: string + description: "boolean allows logs to stderr and files both" + v: + type: string + description: "verbosity level of clickhouse-operator log, default - 1 max - 9" + stderrthreshold: + type: string + vmodule: + type: string + description: | + Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level. + Ex.: file*=2 sets the 'V' to 2 in all files with names like file*. + log_backtrace_at: + type: string + description: | + It can be set to a file and line number with a logging line. + Ex.: file.go:123 + Each time when this line is being executed, a stack trace will be written to the Info log. From 4948d6d5464a63ab8f667ed8d9d1f7baede21ce6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 12 Dec 2025 15:01:49 +0500 Subject: [PATCH 007/233] 0.26.0 --- release | 2 +- releases | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/release b/release index 3f44db947..4e8f395fa 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.25.6 +0.26.0 diff --git a/releases b/releases index 76765ebc9..6c88ef957 100644 --- a/releases +++ b/releases @@ -1,3 +1,4 @@ +0.25.6 0.25.5 0.25.4 0.25.3 From 89f629bbd87d817c4f305eb4ae5e57de7642bc78 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 12 Dec 2025 15:16:24 +0500 Subject: [PATCH 008/233] env: manifests --- .../clickhouse-operator-install-ansible.yaml | 54 ++++++++--------- ...house-operator-install-bundle-v1beta1.yaml | 58 +++++++++---------- .../clickhouse-operator-install-bundle.yaml | 58 +++++++++---------- ...use-operator-install-template-v1beta1.yaml | 46 +++++++-------- .../clickhouse-operator-install-template.yaml | 46 +++++++-------- .../clickhouse-operator-install-tf.yaml | 54 ++++++++--------- deploy/operator/parts/crd.yaml | 14 ++--- 7 files changed, 165 insertions(+), 165 deletions(-) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index c781de370..196533b00 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -11,14 +11,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1470,14 +1470,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2932,7 +2932,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3470,14 +3470,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4368,7 +4368,7 @@ metadata: name: clickhouse-operator namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 --- # Template Parameters: # @@ -4394,7 +4394,7 @@ metadata: name: clickhouse-operator namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # @@ -4626,7 +4626,7 @@ metadata: name: clickhouse-operator namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -4648,7 +4648,7 @@ metadata: name: etc-clickhouse-operator-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5156,7 +5156,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5172,7 +5172,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5271,7 +5271,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5371,7 +5371,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5434,7 +5434,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5450,7 +5450,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5528,7 +5528,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5546,7 +5546,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5554,7 +5554,7 @@ data: # Template parameters available: # NAMESPACE={{ namespace }} # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN={{ password }} # @@ -5564,7 +5564,7 @@ metadata: name: clickhouse-operator namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5575,9 +5575,9 @@ stringData: # # NAMESPACE={{ namespace }} # COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6 +# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0 # OPERATOR_IMAGE_PULL_POLICY=Always -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6 +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0 # METRICS_EXPORTER_IMAGE_PULL_POLICY=Always # # Setup Deployment for clickhouse-operator @@ -5588,7 +5588,7 @@ metadata: name: clickhouse-operator namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5636,7 +5636,7 @@ spec: name: etc-keeper-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.25.6 + image: altinity/clickhouse-operator:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5712,7 +5712,7 @@ spec: name: op-metrics - name: metrics-exporter - image: altinity/metrics-exporter:0.25.6 + image: altinity/metrics-exporter:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5803,7 +5803,7 @@ metadata: name: clickhouse-operator-metrics namespace: {{ namespace }} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index bc9215dc4..6a7842498 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1453,14 +1453,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2903,7 +2903,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3432,14 +3432,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4327,7 +4327,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 # Template Parameters: # @@ -4352,7 +4352,7 @@ metadata: name: clickhouse-operator-kube-system #namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # # Core API group @@ -4572,7 +4572,7 @@ metadata: name: clickhouse-operator-kube-system #namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -4605,7 +4605,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # # Core API group @@ -4825,7 +4825,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -4847,7 +4847,7 @@ metadata: name: etc-clickhouse-operator-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5354,7 +5354,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5370,7 +5370,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5464,7 +5464,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5562,7 +5562,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5624,7 +5624,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5640,7 +5640,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5715,7 +5715,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5733,7 +5733,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5741,7 +5741,7 @@ data: # Template parameters available: # NAMESPACE=kube-system # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password # @@ -5751,7 +5751,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5762,9 +5762,9 @@ stringData: # # NAMESPACE=kube-system # COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6 +# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0 # OPERATOR_IMAGE_PULL_POLICY=Always -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6 +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0 # METRICS_EXPORTER_IMAGE_PULL_POLICY=Always # # Setup Deployment for clickhouse-operator @@ -5775,7 +5775,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5823,7 +5823,7 @@ spec: name: etc-keeper-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.25.6 + image: altinity/clickhouse-operator:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5897,7 +5897,7 @@ spec: - containerPort: 9999 name: op-metrics - name: metrics-exporter - image: altinity/metrics-exporter:0.25.6 + image: altinity/metrics-exporter:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5987,7 +5987,7 @@ metadata: name: clickhouse-operator-metrics namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 63f75781a..2ef6c61e5 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1463,14 +1463,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2925,7 +2925,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3463,14 +3463,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4361,7 +4361,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 --- # Template Parameters: # @@ -4387,7 +4387,7 @@ metadata: name: clickhouse-operator-kube-system #namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # @@ -4619,7 +4619,7 @@ metadata: name: clickhouse-operator-kube-system #namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -4653,7 +4653,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # @@ -4885,7 +4885,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -4907,7 +4907,7 @@ metadata: name: etc-clickhouse-operator-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5415,7 +5415,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5431,7 +5431,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5530,7 +5530,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5630,7 +5630,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5693,7 +5693,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5709,7 +5709,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5787,7 +5787,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5805,7 +5805,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5813,7 +5813,7 @@ data: # Template parameters available: # NAMESPACE=kube-system # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password # @@ -5823,7 +5823,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5834,9 +5834,9 @@ stringData: # # NAMESPACE=kube-system # COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6 +# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0 # OPERATOR_IMAGE_PULL_POLICY=Always -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6 +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0 # METRICS_EXPORTER_IMAGE_PULL_POLICY=Always # # Setup Deployment for clickhouse-operator @@ -5847,7 +5847,7 @@ metadata: name: clickhouse-operator namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5895,7 +5895,7 @@ spec: name: etc-keeper-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.25.6 + image: altinity/clickhouse-operator:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5971,7 +5971,7 @@ spec: name: op-metrics - name: metrics-exporter - image: altinity/metrics-exporter:0.25.6 + image: altinity/metrics-exporter:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -6062,7 +6062,7 @@ metadata: name: clickhouse-operator-metrics namespace: kube-system labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index 456623492..bbc1adfa3 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1453,14 +1453,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2903,7 +2903,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3432,14 +3432,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4327,7 +4327,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 # Template Parameters: # @@ -4352,7 +4352,7 @@ metadata: name: clickhouse-operator-${OPERATOR_NAMESPACE} #namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # # Core API group @@ -4572,7 +4572,7 @@ metadata: name: clickhouse-operator-${OPERATOR_NAMESPACE} #namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -4594,7 +4594,7 @@ metadata: name: etc-clickhouse-operator-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5101,7 +5101,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5117,7 +5117,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5211,7 +5211,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5309,7 +5309,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5371,7 +5371,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5387,7 +5387,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5462,7 +5462,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5480,7 +5480,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5488,7 +5488,7 @@ data: # Template parameters available: # NAMESPACE=${OPERATOR_NAMESPACE} # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password # @@ -5498,7 +5498,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5522,7 +5522,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5734,7 +5734,7 @@ metadata: name: clickhouse-operator-metrics namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index ea1238ee5..6f5aea129 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1463,14 +1463,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2925,7 +2925,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3463,14 +3463,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4361,7 +4361,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 --- # Template Parameters: # @@ -4387,7 +4387,7 @@ metadata: name: clickhouse-operator-${OPERATOR_NAMESPACE} #namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # @@ -4619,7 +4619,7 @@ metadata: name: clickhouse-operator-${OPERATOR_NAMESPACE} #namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -4641,7 +4641,7 @@ metadata: name: etc-clickhouse-operator-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5149,7 +5149,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5165,7 +5165,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5264,7 +5264,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5364,7 +5364,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5427,7 +5427,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5443,7 +5443,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5521,7 +5521,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5539,7 +5539,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5547,7 +5547,7 @@ data: # Template parameters available: # NAMESPACE=${OPERATOR_NAMESPACE} # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password # @@ -5557,7 +5557,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5581,7 +5581,7 @@ metadata: name: clickhouse-operator namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5796,7 +5796,7 @@ metadata: name: clickhouse-operator-metrics namespace: ${OPERATOR_NAMESPACE} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index a62e6a2a0..707bf9190 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -11,14 +11,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -1470,14 +1470,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -2932,7 +2932,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3470,14 +3470,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced @@ -4368,7 +4368,7 @@ metadata: name: clickhouse-operator namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 --- # Template Parameters: # @@ -4394,7 +4394,7 @@ metadata: name: clickhouse-operator namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 rules: # @@ -4626,7 +4626,7 @@ metadata: name: clickhouse-operator namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -4648,7 +4648,7 @@ metadata: name: etc-clickhouse-operator-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: config.yaml: | @@ -5156,7 +5156,7 @@ metadata: name: etc-clickhouse-operator-confd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5172,7 +5172,7 @@ metadata: name: etc-clickhouse-operator-configd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-01-listen.xml: | @@ -5271,7 +5271,7 @@ metadata: name: etc-clickhouse-operator-templatesd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 001-templates.json.example: | @@ -5371,7 +5371,7 @@ metadata: name: etc-clickhouse-operator-usersd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-clickhouse-operator-profile.xml: | @@ -5434,7 +5434,7 @@ metadata: name: etc-keeper-operator-confd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5450,7 +5450,7 @@ metadata: name: etc-keeper-operator-configd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: 01-keeper-01-default-config.xml: | @@ -5528,7 +5528,7 @@ metadata: name: etc-keeper-operator-templatesd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: readme: | @@ -5546,7 +5546,7 @@ metadata: name: etc-keeper-operator-usersd-files namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator data: --- @@ -5554,7 +5554,7 @@ data: # Template parameters available: # NAMESPACE=${namespace} # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=${password} # @@ -5564,7 +5564,7 @@ metadata: name: clickhouse-operator namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator type: Opaque stringData: @@ -5575,9 +5575,9 @@ stringData: # # NAMESPACE=${namespace} # COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6 +# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0 # OPERATOR_IMAGE_PULL_POLICY=Always -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6 +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0 # METRICS_EXPORTER_IMAGE_PULL_POLICY=Always # # Setup Deployment for clickhouse-operator @@ -5588,7 +5588,7 @@ metadata: name: clickhouse-operator namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: replicas: 1 @@ -5636,7 +5636,7 @@ spec: name: etc-keeper-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.25.6 + image: altinity/clickhouse-operator:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5712,7 +5712,7 @@ spec: name: op-metrics - name: metrics-exporter - image: altinity/metrics-exporter:0.25.6 + image: altinity/metrics-exporter:0.26.0 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -5803,7 +5803,7 @@ metadata: name: clickhouse-operator-metrics namespace: ${namespace} labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 app: clickhouse-operator spec: ports: diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index b666e6e13..beb4a811e 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -3556,14 +3556,14 @@ spec: # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -7111,7 +7111,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced @@ -7994,14 +7994,14 @@ spec: --- # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced From 06cb9f455a416246410e6ec092ee7ca869b1d1f5 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 12 Dec 2025 15:16:39 +0500 Subject: [PATCH 009/233] env: helm chart --- deploy/helm/clickhouse-operator/Chart.yaml | 4 ++-- deploy/helm/clickhouse-operator/README.md | 2 +- ...ition-clickhouseinstallations.clickhouse.altinity.com.yaml | 4 ++-- ...ickhouseinstallationtemplates.clickhouse.altinity.com.yaml | 4 ++-- ...usekeeperinstallations.clickhouse-keeper.altinity.com.yaml | 4 ++-- ...ckhouseoperatorconfigurations.clickhouse.altinity.com.yaml | 2 +- .../templates/generated/Deployment-clickhouse-operator.yaml | 4 ++-- .../templates/generated/Secret-clickhouse-operator.yaml | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/deploy/helm/clickhouse-operator/Chart.yaml b/deploy/helm/clickhouse-operator/Chart.yaml index d5d319523..ad9a7830a 100644 --- a/deploy/helm/clickhouse-operator/Chart.yaml +++ b/deploy/helm/clickhouse-operator/Chart.yaml @@ -17,8 +17,8 @@ description: |- kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml ``` type: application -version: 0.25.6 -appVersion: 0.25.6 +version: 0.26.0 +appVersion: 0.26.0 home: https://github.com/Altinity/clickhouse-operator icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg maintainers: diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md index abf5601a4..62f288c2b 100644 --- a/deploy/helm/clickhouse-operator/README.md +++ b/deploy/helm/clickhouse-operator/README.md @@ -1,6 +1,6 @@ # altinity-clickhouse-operator -![Version: 0.25.6](https://img.shields.io/badge/Version-0.25.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.25.6](https://img.shields.io/badge/AppVersion-0.25.6-informational?style=flat-square) +![Version: 0.26.0](https://img.shields.io/badge/Version-0.26.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.26.0](https://img.shields.io/badge/AppVersion-0.26.0-informational?style=flat-square) Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator). diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml index 1a42a88be..8869bd1f5 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallation # PLURAL=clickhouseinstallations # SHORT=chi -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml index 0779a3051..90ec27602 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml @@ -4,14 +4,14 @@ # SINGULAR=clickhouseinstallationtemplate # PLURAL=clickhouseinstallationtemplates # SHORT=chit -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhouseinstallationtemplates.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml index c2604dee9..201f1ffcb 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml @@ -1,13 +1,13 @@ # Template Parameters: # -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com labels: - clickhouse-keeper.altinity.com/chop: 0.25.6 + clickhouse-keeper.altinity.com/chop: 0.26.0 spec: group: clickhouse-keeper.altinity.com scope: Namespaced diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml index 19434d6d5..7a7ca67e7 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml @@ -7,7 +7,7 @@ kind: CustomResourceDefinition metadata: name: clickhouseoperatorconfigurations.clickhouse.altinity.com labels: - clickhouse.altinity.com/chop: 0.25.6 + clickhouse.altinity.com/chop: 0.26.0 spec: group: clickhouse.altinity.com scope: Namespaced diff --git a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml index 1b0ff45ad..342933cec 100644 --- a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml +++ b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml @@ -2,9 +2,9 @@ # # NAMESPACE=kube-system # COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6 +# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0 # OPERATOR_IMAGE_PULL_POLICY=Always -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6 +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0 # METRICS_EXPORTER_IMAGE_PULL_POLICY=Always # # Setup Deployment for clickhouse-operator diff --git a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml index faf5922dc..aed644b44 100644 --- a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml +++ b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml @@ -3,7 +3,7 @@ # Template parameters available: # NAMESPACE=kube-system # COMMENT= -# OPERATOR_VERSION=0.25.6 +# OPERATOR_VERSION=0.26.0 # CH_USERNAME_SECRET_PLAIN=clickhouse_operator # CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password # From 9f5ca941973088bc33f91fd6981d1a1230352d3d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 16:57:46 +0500 Subject: [PATCH 010/233] dev: unify chk --- .../v1/type_chk.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go index 9f1c01e86..1341f2b66 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go @@ -41,10 +41,10 @@ func (cr *ClickHouseKeeperInstallation) GetSpecA() any { } func (cr *ClickHouseKeeperInstallation) GetRuntime() apiChi.ICustomResourceRuntime { - return cr.ensureRuntime() + return cr.EnsureRuntime() } -func (cr *ClickHouseKeeperInstallation) ensureRuntime() *ClickHouseKeeperInstallationRuntime { +func (cr *ClickHouseKeeperInstallation) EnsureRuntime() *ClickHouseKeeperInstallationRuntime { if cr == nil { return nil } @@ -172,7 +172,7 @@ func (cr *ClickHouseKeeperInstallation) FillStatus(endpoints util.Slice[string], ClustersCount: cr.ClustersCount(), ShardsCount: cr.ShardsCount(), HostsCount: cr.HostsCount(), - TaskID: "", + TaskID: cr.GetSpecT().GetTaskID().Value(), HostsUpdatedCount: 0, HostsAddedCount: 0, HostsUnchangedCount: 0, @@ -454,20 +454,20 @@ func (cr *ClickHouseKeeperInstallation) Copy(opts types.CopyCROptions) *ClickHou return nil } - var chi2 *ClickHouseKeeperInstallation - if err := json.Unmarshal(jsonBytes, &chi2); err != nil { + var cr2 *ClickHouseKeeperInstallation + if err := json.Unmarshal(jsonBytes, &cr2); err != nil { return nil } if opts.SkipStatus { - chi2.Status = nil + cr2.Status = nil } if opts.SkipManagedFields { - chi2.SetManagedFields(nil) + cr2.SetManagedFields(nil) } - return chi2 + return cr2 } // JSON returns JSON string @@ -499,7 +499,7 @@ func (cr *ClickHouseKeeperInstallation) YAML(opts types.CopyCROptions) string { return string(yamlBytes) } -// FirstHost returns first host of the CHI +// FirstHost returns first host of the CR func (cr *ClickHouseKeeperInstallation) FirstHost() *apiChi.Host { var result *apiChi.Host cr.WalkHosts(func(host *apiChi.Host) error { From fef9211504aa681840a5843ea8966a974be6a909 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 16:58:16 +0500 Subject: [PATCH 011/233] dev: introduce action plan to chk status --- .../v1/type_status.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go index 4ce19b13c..5fa70cec3 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go @@ -75,6 +75,7 @@ type Status struct { Endpoints []string `json:"endpoints,omitempty" yaml:"endpoints,omitempty"` NormalizedCR *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"` NormalizedCRCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"` + ActionPlan *chi.ActionPlan `json:"actionPlan,omitempty" yaml:"actionPlan,omitempty"` HostsWithTablesCreated []string `json:"hostsWithTablesCreated,omitempty" yaml:"hostsWithTablesCreated,omitempty"` HostsWithReplicaCaughtUp []string `json:"hostsWithReplicaCaughtUp,omitempty" yaml:"hostsWithReplicaCaughtUp,omitempty"` UsedTemplates []*chi.TemplateRef `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"` @@ -281,7 +282,7 @@ func (s *Status) HostCompleted() { } // ReconcileStart marks reconcile start -func (s *Status) ReconcileStart(deleteHostsCount int) { +func (s *Status) ReconcileStart(ap chi.IActionPlan) { doWithWriteLock(s, func(s *Status) { if s == nil { return @@ -292,7 +293,8 @@ func (s *Status) ReconcileStart(deleteHostsCount int) { s.HostsUnchangedCount = 0 s.HostsCompletedCount = 0 s.HostsDeletedCount = 0 - s.HostsDeleteCount = deleteHostsCount + s.HostsDeleteCount = ap.GetRemovedHostsNum() + s.ActionPlan = ap.(*chi.ActionPlan) pushTaskIDStartedNoSync(s) }) } @@ -338,6 +340,13 @@ func (s *Status) DeleteStart() { }) } +// SetActionPlan sets action plan +func (s *Status) SetActionPlan(ap chi.IActionPlan) { + doWithWriteLock(s, func(s *Status) { + s.ActionPlan = ap.(*chi.ActionPlan) + }) +} + func prepareOptions(opts types.CopyStatusOptions) types.CopyStatusOptions { if opts.FieldGroupInheritable { opts.Copy.TaskIDsStarted = true @@ -531,6 +540,9 @@ func (s *Status) CopyFrom(f *Status, opts types.CopyStatusOptions) { if opts.Copy.NormalizedCRCompleted { s.NormalizedCRCompleted = from.NormalizedCRCompleted } + if opts.Copy.ActionPlan { + s.ActionPlan = from.ActionPlan + } if opts.Copy.HostsWithTablesCreated { s.HostsWithTablesCreated = nil if len(from.HostsWithTablesCreated) > 0 { From e3e54958e6be5ec8711788280c779ca77dc1320f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 16:58:54 +0500 Subject: [PATCH 012/233] dev: introduce action plan as a part of chk --- pkg/apis/clickhouse-keeper.altinity.com/v1/types.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go index 69c676eea..3474ad439 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go @@ -15,20 +15,20 @@ package v1 import ( - "github.com/altinity/clickhouse-operator/pkg/apis/swversion" "sync" meta "k8s.io/apimachinery/pkg/apis/meta/v1" apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/apis/swversion" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClickHouseKeeperInstallation defines a ClickHouse Keeper ChkCluster type ClickHouseKeeperInstallation struct { - meta.TypeMeta `json:",inline" yaml:",inline"` - meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + meta.TypeMeta `json:",inline" yaml:",inline"` + meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Spec ChkSpec `json:"spec" yaml:"spec"` Status *Status `json:"status,omitempty" yaml:"status,omitempty"` @@ -43,6 +43,7 @@ type ClickHouseKeeperInstallationRuntime struct { commonConfigMutex sync.Mutex `json:"-" yaml:"-"` MinVersion *swversion.SoftWareVersion `json:"-" yaml:"-"` MaxVersion *swversion.SoftWareVersion `json:"-" yaml:"-"` + ActionPlan apiChi.IActionPlan `json:"-" yaml:"-"` } func newClickHouseKeeperInstallationRuntime() *ClickHouseKeeperInstallationRuntime { From 63c67429bd77610e551dd40de31774d672cfb995 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 16:59:18 +0500 Subject: [PATCH 013/233] dev: unify naming --- pkg/apis/clickhouse.altinity.com/v1/type_chi.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index 0edef3cd0..2d0b31257 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -482,20 +482,20 @@ func (cr *ClickHouseInstallation) Copy(opts types.CopyCROptions) *ClickHouseInst return nil } - var chi2 *ClickHouseInstallation - if err := json.Unmarshal(jsonBytes, &chi2); err != nil { + var cr2 *ClickHouseInstallation + if err := json.Unmarshal(jsonBytes, &cr2); err != nil { return nil } if opts.SkipStatus { - chi2.Status = nil + cr2.Status = nil } if opts.SkipManagedFields { - chi2.SetManagedFields(nil) + cr2.SetManagedFields(nil) } - return chi2 + return cr2 } // JSON returns JSON string @@ -527,7 +527,7 @@ func (cr *ClickHouseInstallation) YAML(opts types.CopyCROptions) string { return string(yamlBytes) } -// FirstHost returns first host of the CHI +// FirstHost returns first host of the CR func (cr *ClickHouseInstallation) FirstHost() *Host { var result *Host cr.WalkHosts(func(host *Host) error { From 8ad813f5b851910b9a8f2b99762e33a723dc962a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:17:59 +0500 Subject: [PATCH 014/233] dev: remove unused --- pkg/apis/clickhouse.altinity.com/v1/types.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 9206ef9ce..d25fbc157 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -39,8 +39,8 @@ type ClickHouseInstallation struct { meta.TypeMeta `json:",inline" yaml:",inline"` meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` - Spec ChiSpec `json:"spec" yaml:"spec"` - Status *Status `json:"status,omitempty" yaml:"status,omitempty"` + Spec ChiSpec `json:"spec" yaml:"spec"` + Status *Status `json:"status,omitempty" yaml:"status,omitempty"` runtime *ClickHouseInstallationRuntime `json:"-" yaml:"-"` statusCreatorMutex sync.Mutex `json:"-" yaml:"-"` @@ -73,20 +73,6 @@ func (runtime *ClickHouseInstallationRuntime) UnlockCommonConfig() { runtime.commonConfigMutex.Unlock() } -func (runtime *ClickHouseInstallationRuntime) HasReferenceSoftwareVersion() bool { - if runtime == nil { - return false - } - return runtime.MinVersion != nil -} - -func (runtime *ClickHouseInstallationRuntime) GetReferenceSoftwareVersion() *swversion.SoftWareVersion { - if runtime.HasReferenceSoftwareVersion() { - return runtime.MinVersion - } - return nil -} - // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object From c8a9b6c7009b55515f46814796424abdb18199fc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:18:26 +0500 Subject: [PATCH 015/233] dev: clarify logger --- pkg/controller/chi/worker-reconciler-chi.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index c08f621d3..e7ca5f6bb 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -182,7 +182,7 @@ func (w *worker) logSWVersion(ctx context.Context, cr *api.ClickHouseInstallatio l.M(host).Info("Host software version: %s %s", host.GetName(), host.Runtime.Version.Render()) return nil }) - l.M(cr).Info("CR software versions [min, max]: %s %s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render()) + l.M(cr).Info("CR software versions min=%s max=%s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render()) } // reconcile reconciles Custom Resource @@ -405,6 +405,7 @@ func (w *worker) hostForceRestart(ctx context.Context, host *api.Host, opts *sta } metrics.HostReconcilesRestart(ctx, host.GetCR()) + return nil } From a025f68f3212aa9a6f8acd4bb3fd4c66991dde10 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:18:48 +0500 Subject: [PATCH 016/233] dev: unify naming --- pkg/controller/chi/worker.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index e13a17864..f53562e6e 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -22,6 +22,8 @@ import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/altinity/queue" + log "github.com/altinity/clickhouse-operator/pkg/announcer" api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/apis/common/types" @@ -43,7 +45,6 @@ import ( commonNormalizer "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer" "github.com/altinity/clickhouse-operator/pkg/model/managers" "github.com/altinity/clickhouse-operator/pkg/util" - "github.com/altinity/queue" ) // FinalizerName specifies name of the finalizer to be used with CHI @@ -528,19 +529,19 @@ func (w *worker) logHosts(cr api.ICustomResource) { }) } -func (w *worker) createTemplatedCR(_chi *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation { - l := w.a.V(1).M(_chi).F() +func (w *worker) createTemplatedCR(_cr *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation { + l := w.a.V(1).M(_cr).F() - if _chi.HasAncestor() { - l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_chi)) + if _cr.HasAncestor() { + l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_cr)) } else { - l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_chi)) + l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_cr)) } - chi := w.createTemplated(_chi, _opts...) - chi.SetAncestor(w.createTemplated(_chi.GetAncestorT())) + cr := w.createTemplated(_cr, _opts...) + cr.SetAncestor(w.createTemplated(_cr.GetAncestorT())) - return chi + return cr } func (w *worker) createTemplated(c *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation { @@ -548,6 +549,6 @@ func (w *worker) createTemplated(c *api.ClickHouseInstallation, _opts ...*common if len(_opts) > 0 { opts = _opts[0] } - chi, _ := w.normalizer.CreateTemplated(c, opts) - return chi + cr, _ := w.normalizer.CreateTemplated(c, opts) + return cr } From d39f86bb2b24bd533451c31f5706a6a5af32b379 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:19:25 +0500 Subject: [PATCH 017/233] dev: introduce force restart and action plan --- pkg/controller/chk/worker-reconciler-chk.go | 73 ++++++++++++++++----- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index c440dd23c..71bb55bde 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -51,19 +51,16 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee new = w.buildCR(ctx, new) - actionPlan := api.MakeActionPlan(new.GetAncestorT(), new) - w.a.M(new).V(1).Info(actionPlan.Log("buildCR")) - switch { - case actionPlan.HasActionsToDo(): + case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") default: w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile") return nil } - w.markReconcileStart(ctx, new, actionPlan) - w.setHostStatusesPreliminary(ctx, new, actionPlan) + w.markReconcileStart(ctx, new) + w.setHostStatusesPreliminary(ctx, new) if err := w.reconcile(ctx, new); err != nil { // Something went wrong @@ -114,6 +111,11 @@ func (w *worker) buildCR(ctx context.Context, _cr *apiChk.ClickHouseKeeperInstal w.fillCurSTS(ctx, cr) w.logSWVersion(ctx, cr) + actionPlan := api.MakeActionPlan(cr.GetAncestorT(), cr) + cr.EnsureRuntime().ActionPlan = actionPlan + cr.EnsureStatus().SetActionPlan(actionPlan) + w.a.V(1).M(cr).Info(actionPlan.Log("buildCR")) + return cr } @@ -154,7 +156,7 @@ func (w *worker) logSWVersion(ctx context.Context, cr *apiChk.ClickHouseKeeperIn l.M(host).Info("Host software version: %s %s", host.GetName(), host.Runtime.Version.Render()) return nil }) - l.M(cr).Info("CR software versions [min, max]: %s %s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render()) + l.M(cr).Info("CR software versions min=%s max=%s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render()) } // reconcile reconciles Custom Resource @@ -345,18 +347,12 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o host.Runtime.CurStatefulSet, _ = w.c.kube.STS().Get(ctx, host) w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version) - // In case we have to force-restart host - // We'll do it via replicas: 0 in StatefulSet. if w.shouldForceRestartHost(ctx, host) { - w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName()) - w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true) - _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts) - metrics.HostReconcilesRestart(ctx, host.GetCR()) - // At this moment StatefulSet has 0 replicas. - // First stage of RollingUpdate completed. + w.a.V(1).M(host).F().Info("Reconcile host STS force restart: %s", host.GetName()) + _ = w.hostForceRestart(ctx, host, opts) } - w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false) + w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, host.IsStopped()) // We are in place, where we can reconcile StatefulSet to desired configuration. w.a.V(1).M(host).F().Info("Reconcile host STS: %s. Reconcile StatefulSet", host.GetName()) @@ -381,6 +377,20 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o return err } +func (w *worker) hostForceRestart(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error { + w.a.V(1).M(host).F().Info("Reconcile host. Force restart: %s", host.GetName()) + + // In case we have to force-restart host + // We'll do it via replicas: 0 in StatefulSet. + w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true) + _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts) + metrics.HostReconcilesRestart(ctx, host.GetCR()) + // At this moment StatefulSet has 0 replicas. + // First stage of RollingUpdate completed. + + return nil +} + // reconcileHostService reconciles host's Service func (w *worker) reconcileHostService(ctx context.Context, host *api.Host) error { service := w.task.Creator().CreateService(interfaces.ServiceHost, host).First() @@ -489,7 +499,7 @@ func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.C // Process shards using specified concurrency level while maintaining specified max concurrency percentage. // Loop over shards. workersNum := w.getReconcileShardsWorkersNum(shards, opts) - w.a.V(1).Info("Starting rest of shards on workers: %d", workersNum) + w.a.V(1).Info("Starting rest of shards on workers. Workers num: %d", workersNum) for startShardIndex := startShard; startShardIndex < len(shards); startShardIndex += workersNum { endShardIndex := startShardIndex + workersNum if endShardIndex > len(shards) { @@ -572,6 +582,8 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error { // Create artifacts w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false) + w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), host.Runtime.Version.Render()) + if err := w.reconcileHostPrepare(ctx, host); err != nil { return err } @@ -718,6 +730,14 @@ func (w *worker) reconcileHostMainDomain(ctx context.Context, host *api.Host) er // reconcileHostIncludeIntoAllActivities includes specified ClickHouse host into all activities func (w *worker) reconcileHostIncludeIntoAllActivities(ctx context.Context, host *api.Host) error { + if !w.shouldIncludeHost(host) { + w.a.V(1). + M(host).F(). + Info("No need to include host into cluster. Host/shard/cluster: %d/%d/%s", + host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName) + return nil + } + // Include host back into all activities - such as cluster, service, etc err := w.includeHost(ctx, host) if err != nil { @@ -728,5 +748,24 @@ func (w *worker) reconcileHostIncludeIntoAllActivities(ctx context.Context, host return err } + l := w.a.V(1). + WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileCompleted). + WithAction(host.GetCR()). + M(host).F() + + // In case host is unable to report its version we are done with inclusion + switch { + case host.IsStopped(): + l.Info("Reconcile Host completed. Host is stopped: %s", host.GetName()) + return nil + case host.IsTroubleshoot(): + l.Info("Reconcile Host completed. Host is in troubleshoot mode: %s", host.GetName()) + return nil + } + + // Report host software version + version := w.getHostSoftwareVersion(ctx, host) + l.Info("Reconcile Host completed. Host: %s ClickHouse version running: %s", host.GetName(), version.Render()) + return nil } From 2aca322e403e460324129e45b12bdc837847eb75 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:19:54 +0500 Subject: [PATCH 018/233] dev: rework ap handling --- pkg/controller/chk/worker.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/controller/chk/worker.go b/pkg/controller/chk/worker.go index dbe402caa..209aa7ae6 100644 --- a/pkg/controller/chk/worker.go +++ b/pkg/controller/chk/worker.go @@ -166,14 +166,14 @@ func (w *worker) shouldForceRestartHost(ctx context.Context, host *api.Host) boo } } -func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation, ap api.IActionPlan) { +func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { if util.IsContextDone(ctx) { log.V(1).Info("Reconcile is aborted. cr: %s ", cr.GetName()) return } // Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress - cr.EnsureStatus().ReconcileStart(ap.GetRemovedHostsNum()) + cr.EnsureStatus().ReconcileStart(cr.EnsureRuntime().ActionPlan) _ = w.c.updateCRObjectStatus(ctx, cr, types.UpdateStatusOptions{ CopyStatusOptions: types.CopyStatusOptions{ CopyStatusFieldGroup: types.CopyStatusFieldGroup{ @@ -188,7 +188,7 @@ func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKe WithActions(cr). M(cr).F(). Info("reconcile started, task id: %s", cr.GetSpecT().GetTaskID()) - w.a.V(2).M(cr).F().Info("action plan\n%s\n", ap.String()) + w.a.V(2).M(cr).F().Info("action plan\n%s\n", cr.EnsureRuntime().ActionPlan.String()) } func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *apiChk.ClickHouseKeeperInstallation) { @@ -264,14 +264,14 @@ func (w *worker) markReconcileCompletedUnsuccessfully(ctx context.Context, cr *a Warning("reconcile completed UNSUCCESSFULLY, task id: %s", cr.GetSpecT().GetTaskID()) } -func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation, ap api.IActionPlan) { +func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { if util.IsContextDone(ctx) { log.V(1).Info("Reconcile is aborted. cr: %s ", cr.GetName()) return } existingObjects := w.c.discovery(ctx, cr) - ap.WalkAdded( + cr.EnsureRuntime().ActionPlan.WalkAdded( // Walk over added clusters func(cluster api.ICluster) { w.a.V(1).M(cr).Info("Walking over AP added clusters. Cluster: %s", cluster.GetName()) @@ -324,7 +324,7 @@ func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.Clic }, ) - ap.WalkModified( + cr.EnsureRuntime().ActionPlan.WalkModified( func(cluster api.ICluster) { w.a.V(1).M(cr).Info("Walking over AP modified clusters. Cluster: %s", cluster.GetName()) }, @@ -375,19 +375,19 @@ func (w *worker) logHosts(cr api.ICustomResource) { }) } -func (w *worker) createTemplatedCR(_chk *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation { - l := w.a.V(1).M(_chk).F() +func (w *worker) createTemplatedCR(_cr *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation { + l := w.a.V(1).M(_cr).F() - if _chk.HasAncestor() { - l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_chk)) + if _cr.HasAncestor() { + l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_cr)) } else { - l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_chk)) + l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_cr)) } - chk := w.createTemplated(_chk, _opts...) - chk.SetAncestor(w.createTemplated(_chk.GetAncestorT())) + cr := w.createTemplated(_cr, _opts...) + cr.SetAncestor(w.createTemplated(_cr.GetAncestorT())) - return chk + return cr } func (w *worker) createTemplated(c *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation { @@ -395,8 +395,8 @@ func (w *worker) createTemplated(c *apiChk.ClickHouseKeeperInstallation, _opts . if len(_opts) > 0 { opts = _opts[0] } - chk, _ := w.normalizer.CreateTemplated(c, opts) - return chk + cr, _ := w.normalizer.CreateTemplated(c, opts) + return cr } // getRaftGeneratorOptions build base set of RaftOptions From 4b94e7d2fd73dd9bb8bab1bffb629fcdfb2ce36f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 24 Dec 2025 17:20:10 +0500 Subject: [PATCH 019/233] test: pip --- tests/e2e/test_operator.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 9f02c5cf7..c5885eadc 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5866,6 +5866,7 @@ def test_020005(self): with Finally("I clean up"): delete_test_namespace() + @TestScenario @Name("test_020006. Test https://github.com/Altinity/clickhouse-operator/issues/1863") def test_020006(self): @@ -5888,6 +5889,7 @@ def test_020006(self): with Finally("I clean up"): delete_test_namespace() + @TestScenario @Name("test_020007. Test fractional CPU requests/limits handling for CHK") def test_020007(self): From ff98162d4c2e0f7530f558a33a0caa43e3f3565b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20B=C4=85k?= Date: Thu, 25 Dec 2025 13:48:24 +0100 Subject: [PATCH 020/233] docs: quote boolean-like values in CHOp config example YAML --- docs/chi-examples/70-chop-config.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/chi-examples/70-chop-config.yaml b/docs/chi-examples/70-chop-config.yaml index a00ca3ffe..d70068749 100644 --- a/docs/chi-examples/70-chop-config.yaml +++ b/docs/chi-examples/70-chop-config.yaml @@ -154,16 +154,16 @@ spec: # - to be included into a ClickHouse cluster # respectfully before moving forward with host reconcile wait: - exclude: true - queries: true - include: false + exclude: "true" + queries: "true" + include: "false" # The operator during reconcile procedure should wait for replicas to catch-up # replication delay a.k.a replication lag for the following replicas replicas: # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up - all: no + all: "no" # New replicas only are requested to wait for replication to catch-up - new: yes + new: "yes" # Replication catch-up is considered to be completed as soon as replication delay # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas" # is within this specified delay (in seconds) @@ -172,11 +172,11 @@ spec: # Whether the operator during host launch procedure should wait for startup probe to succeed. # In case probe is unspecified wait is assumed to be completed successfully. # Default option value is to do not wait. - startup: no + startup: "no" # Whether the operator during host launch procedure should wait for readiness probe to succeed. # In case probe is unspecified wait is assumed to be completed successfully. # Default option value is to wait. - readiness: yes + readiness: "yes" ################################################ ## From 937f9d0e331cd8667417c99984e5bde4c0794d1c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:05:57 +0500 Subject: [PATCH 021/233] dev: bump go version --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3f00e5c23..a85b1901e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/altinity/clickhouse-operator -go 1.25.4 +go 1.25.5 replace ( github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.0 From c1962a38961fe2d0e0ab7a7c0035b1a9e86123ab Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:06:33 +0500 Subject: [PATCH 022/233] dev: remove from chk shards walker-processor --- pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go index 1341f2b66..a8eeb1ae4 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go @@ -651,7 +651,6 @@ func (cr *ClickHouseKeeperInstallation) WalkTillError( ctx context.Context, fCRPreliminary func(ctx context.Context, chi *ClickHouseKeeperInstallation) error, fCluster func(ctx context.Context, cluster *Cluster) error, - fShards func(ctx context.Context, shards []*ChkShard) error, fCRFinal func(ctx context.Context, chi *ClickHouseKeeperInstallation) error, ) error { if err := fCRPreliminary(ctx, cr); err != nil { @@ -663,14 +662,6 @@ func (cr *ClickHouseKeeperInstallation) WalkTillError( if err := fCluster(ctx, cluster); err != nil { return err } - - shards := make([]*ChkShard, 0, len(cluster.Layout.Shards)) - for shardIndex := range cluster.Layout.Shards { - shards = append(shards, cluster.Layout.Shards[shardIndex]) - } - if err := fShards(ctx, shards); err != nil { - return err - } } if err := fCRFinal(ctx, cr); err != nil { From d5db05f92ef198b189409f88a02f3f30a6d5d878 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:07:24 +0500 Subject: [PATCH 023/233] dev: metrics functions named in neutral way --- pkg/controller/chi/metrics/pkg.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/controller/chi/metrics/pkg.go b/pkg/controller/chi/metrics/pkg.go index ddbc9255e..ed87c2e8a 100644 --- a/pkg/controller/chi/metrics/pkg.go +++ b/pkg/controller/chi/metrics/pkg.go @@ -21,20 +21,20 @@ import ( "github.com/altinity/clickhouse-operator/pkg/util" ) -func CHIInitZeroValues(ctx context.Context, src labelsSource) { +func CRInitZeroValues(ctx context.Context, src labelsSource) { chiInitZeroValues(ctx, src) } -func CHIReconcilesStarted(ctx context.Context, src labelsSource) { +func CRReconcilesStarted(ctx context.Context, src labelsSource) { chiReconcilesStarted(ctx, src) } -func CHIReconcilesCompleted(ctx context.Context, src labelsSource) { +func CRReconcilesCompleted(ctx context.Context, src labelsSource) { chiReconcilesCompleted(ctx, src) } -func CHIReconcilesAborted(ctx context.Context, src labelsSource) { +func CRReconcilesAborted(ctx context.Context, src labelsSource) { chiReconcilesAborted(ctx, src) } -func CHIReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) { +func CRReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) { chiReconcilesTimings(ctx, src, seconds) } @@ -67,7 +67,7 @@ func PodDelete(ctx context.Context) { var r = map[string]bool{} var mx = sync.Mutex{} -func CHIRegister(ctx context.Context, src labelsSource) { +func CRRegister(ctx context.Context, src labelsSource) { mx.Lock() defer mx.Unlock() @@ -81,7 +81,7 @@ func CHIRegister(ctx context.Context, src labelsSource) { chiRegister(ctx, src) } -func CHIUnregister(ctx context.Context, src labelsSource) { +func CRUnregister(ctx context.Context, src labelsSource) { mx.Lock() defer mx.Unlock() From e5fa7464dc5bf1785e07c0eca307b72ad72132e4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:07:52 +0500 Subject: [PATCH 024/233] dev: work on deleter --- pkg/controller/chi/worker-deleter.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/controller/chi/worker-deleter.go b/pkg/controller/chi/worker-deleter.go index 0e3fafc35..f070a22e4 100644 --- a/pkg/controller/chi/worker-deleter.go +++ b/pkg/controller/chi/worker-deleter.go @@ -61,8 +61,8 @@ func (w *worker) clean(ctx context.Context, cr api.ICustomResource) { cr.(*api.ClickHouseInstallation).EnsureStatus().SyncHostTablesCreated() } -// dropReplicas cleans Zookeeper for replicas that are properly deleted - via Action Plan -func (w *worker) dropReplicas(ctx context.Context, cr *api.ClickHouseInstallation) { +// dropZKReplicas cleans Zookeeper for replicas that are properly deleted - via Action Plan +func (w *worker) dropZKReplicas(ctx context.Context, cr *api.ClickHouseInstallation) { // Iterate over Action Plan and drop all replicas that are properly removed as removed hosts w.a.V(1).M(cr).F().S().Info("drop replicas based on AP") cnt := 0 @@ -72,7 +72,7 @@ func (w *worker) dropReplicas(ctx context.Context, cr *api.ClickHouseInstallatio func(shard api.IShard) { }, func(host *api.Host) { - _ = w.dropReplica(ctx, host, NewDropReplicaOptions().SetRegularDrop()) + _ = w.dropZKReplica(ctx, host, NewDropReplicaOptions().SetRegularDrop()) cnt++ }, ) @@ -244,7 +244,7 @@ func (w *worker) discoveryAndDeleteCR(ctx context.Context, cr api.ICustomResourc return nil } - metrics.CHIUnregister(ctx, cr) + metrics.CRUnregister(ctx, cr) objs := w.c.discovery(ctx, cr) if objs.NumStatefulSet() > 0 { @@ -445,8 +445,8 @@ func (a dropReplicaOptionsArr) First() *dropReplicaOptions { return nil } -// dropReplica drops replica's info from Zookeeper -func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.Host, opts *dropReplicaOptions) error { +// dropZKReplica drops replica's info from Zookeeper +func (w *worker) dropZKReplica(ctx context.Context, hostToDrop *api.Host, opts *dropReplicaOptions) error { if hostToDrop == nil { w.a.V(1).F().Error("FAILED to drop replica. Need to have host to drop. hostToDrop: %s", hostToDrop.GetName()) return nil From 6a844165fa6ddb2619deafd2614f780c871d25e3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:08:21 +0500 Subject: [PATCH 025/233] dev: explicitly use zk functions --- pkg/controller/chi/worker-migrator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-migrator.go b/pkg/controller/chi/worker-migrator.go index f3b44a620..8394cb193 100644 --- a/pkg/controller/chi/worker-migrator.go +++ b/pkg/controller/chi/worker-migrator.go @@ -16,6 +16,7 @@ package chi import ( "context" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" @@ -86,7 +87,7 @@ func (w *worker) migrateTables(ctx context.Context, host *api.Host, opts *migrat Info( "Need to drop replica on host %d to shard %d in cluster %s", host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName) - w.dropReplica(ctx, host, NewDropReplicaOptions().SetForceDropUponStorageLoss()) + w.dropZKReplica(ctx, host, NewDropReplicaOptions().SetForceDropUponStorageLoss()) } w.a.V(1). From 4832b1c35c28d5e56df705e62455bcd80b3d0adc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:09:02 +0500 Subject: [PATCH 026/233] dev: naming and move zk clean to the very end of recocnile --- pkg/controller/chi/worker-reconciler-chi.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index e7ca5f6bb..0ddff8d95 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -57,8 +57,8 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal w.a.M(new).S().P() defer w.a.M(new).E().P() - metrics.CHIInitZeroValues(ctx, new) - metrics.CHIReconcilesStarted(ctx, new) + metrics.CRInitZeroValues(ctx, new) + metrics.CRReconcilesStarted(ctx, new) startTime := time.Now() new = w.buildCR(ctx, new) @@ -86,7 +86,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal err = common.ErrCRUDAbort w.markReconcileCompletedUnsuccessfully(ctx, new, err) if errors.Is(err, common.ErrCRUDAbort) { - metrics.CHIReconcilesAborted(ctx, new) + metrics.CRReconcilesAborted(ctx, new) } } else { // Reconcile successful @@ -97,13 +97,14 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal } w.clean(ctx, new) - w.dropReplicas(ctx, new) w.addToMonitoring(new) w.waitForIPAddresses(ctx, new) w.finalizeReconcileAndMarkCompleted(ctx, new) - metrics.CHIReconcilesCompleted(ctx, new) - metrics.CHIReconcilesTimings(ctx, new, time.Since(startTime).Seconds()) + w.dropZKReplicas(ctx, new) + + metrics.CRReconcilesCompleted(ctx, new) + metrics.CRReconcilesTimings(ctx, new, time.Since(startTime).Seconds()) } return nil From e5354bfc6d31e58fe3229031443e578c4ceaf8f7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:09:49 +0500 Subject: [PATCH 027/233] dev: batch runner is unused now - no need to keep --- .../chi/worker-reconciler-helper.go | 39 ------------------- 1 file changed, 39 deletions(-) diff --git a/pkg/controller/chi/worker-reconciler-helper.go b/pkg/controller/chi/worker-reconciler-helper.go index 4896ca123..67eac8b3a 100644 --- a/pkg/controller/chi/worker-reconciler-helper.go +++ b/pkg/controller/chi/worker-reconciler-helper.go @@ -23,7 +23,6 @@ import ( "github.com/altinity/clickhouse-operator/pkg/apis/swversion" "github.com/altinity/clickhouse-operator/pkg/controller/common" "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset" - "github.com/altinity/clickhouse-operator/pkg/util" ) func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *swversion.SoftWareVersion { @@ -148,44 +147,6 @@ func (w *worker) runConcurrently(ctx context.Context, workersNum int, startShard return err } -func (w *worker) runConcurrentlyInBatches(ctx context.Context, workersNum int, start int, shards []*api.ChiShard) error { - for startShardIndex := 0; startShardIndex < len(shards); startShardIndex += workersNum { - endShardIndex := util.IncTopped(startShardIndex, workersNum, len(shards)) - concurrentlyProcessedShards := shards[startShardIndex:endShardIndex] - w.a.V(1).Info("Starting shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex) - - // Processing error protected with mutex - var err error - var errLock sync.Mutex - - wg := sync.WaitGroup{} - wg.Add(len(concurrentlyProcessedShards)) - // Launch shard concurrent processing - for j := range concurrentlyProcessedShards { - shard := concurrentlyProcessedShards[j] - w.a.V(1).Info("Starting shard on worker. Shard index: %d", start+startShardIndex+j) - go func() { - defer wg.Done() - w.a.V(1).Info("Starting shard on goroutine. Shard index: %d", start+startShardIndex+j) - if e := w.reconcileShardWithHosts(ctx, shard); e != nil { - errLock.Lock() - err = e - errLock.Unlock() - } - w.a.V(1).Info("Finished shard on goroutine. Shard index: %d", start+startShardIndex+j) - }() - } - w.a.V(1).Info("Starting to wait shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex) - wg.Wait() - w.a.V(1).Info("Finished to wait shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex) - if err != nil { - w.a.V(1).Warning("Skipping rest of shards due to an error: %v", err) - return err - } - } - return nil -} - func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) (*statefulset.ReconcileOptions, *migrateTableOptions) { w.a.V(1). M(host).F(). From 04c79b1ead74af58a74a28fc95354069adb73482 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:10:26 +0500 Subject: [PATCH 028/233] dev: neutral naming --- pkg/controller/chi/worker.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index f53562e6e..7d202baed 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -303,11 +303,11 @@ func (w *worker) updateCHI(ctx context.Context, old, new *api.ClickHouseInstalla new = n.(*api.ClickHouseInstallation) } - metrics.CHIRegister(ctx, new) + metrics.CRRegister(ctx, new) if w.deleteCHI(ctx, old, new) { // CHI is being deleted - metrics.CHIUnregister(ctx, new) + metrics.CRUnregister(ctx, new) return nil } @@ -364,7 +364,7 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *api w.a.V(1).M(_cr).F().S().Info("finalize reconcile") - // Update CHI object + // Update CR object _ = w.finalizeCR( ctx, _cr, From d8369711cdac5851616ee63209639f624430fb66 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:10:58 +0500 Subject: [PATCH 029/233] dev: clean chk config mapper --- pkg/controller/chk/worker-config-map.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pkg/controller/chk/worker-config-map.go b/pkg/controller/chk/worker-config-map.go index 515d8f313..c2dc6e05b 100644 --- a/pkg/controller/chk/worker-config-map.go +++ b/pkg/controller/chk/worker-config-map.go @@ -21,7 +21,6 @@ import ( core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" - log "github.com/altinity/clickhouse-operator/pkg/announcer" apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" "github.com/altinity/clickhouse-operator/pkg/util" @@ -33,11 +32,6 @@ func (w *worker) reconcileConfigMap( cr apiChi.ICustomResource, configMap *core.ConfigMap, ) error { - if util.IsContextDone(ctx) { - log.V(2).Info("task is done") - return nil - } - w.a.V(2).M(cr).S().P() defer w.a.V(2).M(cr).E().P() @@ -67,11 +61,6 @@ func (w *worker) reconcileConfigMap( // updateConfigMap func (w *worker) updateConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error { - if util.IsContextDone(ctx) { - log.V(2).Info("task is done") - return nil - } - updatedConfigMap, err := w.c.updateConfigMap(ctx, configMap) if err == nil { w.a.V(1). @@ -95,11 +84,6 @@ func (w *worker) updateConfigMap(ctx context.Context, cr apiChi.ICustomResource, // createConfigMap func (w *worker) createConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error { - if util.IsContextDone(ctx) { - log.V(2).Info("task is done") - return nil - } - err := w.c.createConfigMap(ctx, configMap) if err == nil { w.a.V(1). From 6a011756198571222b8041c5edfe5008dbfc1f5b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:11:33 +0500 Subject: [PATCH 030/233] dev: workings unification --- pkg/controller/chk/worker-deleter.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chk/worker-deleter.go b/pkg/controller/chk/worker-deleter.go index df73fa53a..c44149a4e 100644 --- a/pkg/controller/chk/worker-deleter.go +++ b/pkg/controller/chk/worker-deleter.go @@ -30,7 +30,7 @@ import ( func (w *worker) clean(ctx context.Context, cr api.ICustomResource) { if util.IsContextDone(ctx) { - log.V(2).Info("task is done") + log.V(1).Info("Reconcile clean is aborted. CR: %s ", cr.GetName()) return } @@ -45,9 +45,9 @@ func (w *worker) clean(ctx context.Context, cr api.ICustomResource) { w.a.V(1).M(cr).F().Info("List of successfully reconciled objects:\n%s", w.task.RegistryReconciled()) objs := w.c.discovery(ctx, cr) need := w.task.RegistryReconciled() - w.a.V(1).M(cr).F().Info("Existing objects:\n%s", objs) + w.a.V(1).M(cr).F().Info("List of existing objects:\n%s", objs) objs.Subtract(need) - w.a.V(1).M(cr).F().Info("Non-reconciled objects:\n%s", objs) + w.a.V(1).M(cr).F().Info("List of non-reconciled objects:\n%s", objs) if w.purge(ctx, cr, objs, w.task.RegistryFailed()) > 0 { util.WaitContextDoneOrTimeout(ctx, 1*time.Minute) } @@ -63,7 +63,7 @@ func (w *worker) purge( reconcileFailedObjs *model.Registry, ) (cnt int) { if util.IsContextDone(ctx) { - log.V(2).Info("task is done") + log.V(1).Info("Purge is aborted. CR: %s ", cr.GetName()) return cnt } From 46215c79cd7892274daecb0b38470ca6a5247893 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:12:26 +0500 Subject: [PATCH 031/233] dev: baseline helper functions --- .../chk/worker-reconciler-helper.go | 60 +++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chk/worker-reconciler-helper.go b/pkg/controller/chk/worker-reconciler-helper.go index d912d2ae3..36e890733 100644 --- a/pkg/controller/chk/worker-reconciler-helper.go +++ b/pkg/controller/chk/worker-reconciler-helper.go @@ -16,12 +16,13 @@ package chk import ( "context" - apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" - "github.com/altinity/clickhouse-operator/pkg/controller/common" - "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset" + "sync" + apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/apis/swversion" + "github.com/altinity/clickhouse-operator/pkg/controller/common" + "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset" ) func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *swversion.SoftWareVersion { @@ -30,7 +31,7 @@ func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *sw } // getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shard reconcile -func (w *worker) getReconcileShardsWorkersNum(shards []*apiChk.ChkShard, opts *common.ReconcileShardsAndHostsOptions) int { +func (w *worker) getReconcileShardsWorkersNum(cluster *apiChk.Cluster, opts *common.ReconcileShardsAndHostsOptions) int { return 1 } @@ -45,6 +46,56 @@ func (w *worker) reconcileShardsAndHostsFetchOpts(ctx context.Context) *common.R } } +func (w *worker) runConcurrently(ctx context.Context, workersNum int, startShardIndex int, shards []*apiChk.ChkShard) error { + if len(shards) == 0 { + return nil + } + + type shardReconcile struct { + shard *apiChk.ChkShard + index int + } + + ch := make(chan *shardReconcile) + wg := sync.WaitGroup{} + + // Launch tasks feeder + wg.Add(1) + go func() { + defer wg.Done() + defer close(ch) + for i, shard := range shards { + ch <- &shardReconcile{ + shard, + startShardIndex + i, + } + } + }() + + // Launch workers + var err error + var errLock sync.Mutex + for i := 0; i < workersNum; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for rq := range ch { + w.a.V(1).Info("Starting shard index: %d on worker", rq.index) + if e := w.reconcileShardWithHosts(ctx, rq.shard); e != nil { + errLock.Lock() + err = e + errLock.Unlock() + } + } + }() + } + + w.a.V(1).Info("Starting to wait shards from index: %d on workers.", startShardIndex) + wg.Wait() + w.a.V(1).Info("Finished to wait shards from index: %d on workers.", startShardIndex) + return err +} + func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) *statefulset.ReconcileOptions { w.a.V(1). M(host).F(). @@ -53,6 +104,7 @@ func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) *statefulset.Re // In case of data loss detection on existing volumes, we need to: // 1. recreate StatefulSet // 2. run tables migration again + stsReconcileOpts := statefulset.NewReconcileStatefulSetOptions().SetForceRecreate() return stsReconcileOpts } From 5622c7f97ade837d7e807243c63ebf941e12ffc5 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:13:03 +0500 Subject: [PATCH 032/233] dev: baseline service processor --- pkg/controller/chk/worker-service.go | 33 ++++++++++++++++++---------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/pkg/controller/chk/worker-service.go b/pkg/controller/chk/worker-service.go index f0ae7b12c..602fb0c52 100644 --- a/pkg/controller/chk/worker-service.go +++ b/pkg/controller/chk/worker-service.go @@ -21,6 +21,7 @@ import ( core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" + log "github.com/altinity/clickhouse-operator/pkg/announcer" chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" "github.com/altinity/clickhouse-operator/pkg/util" @@ -91,15 +92,19 @@ func (w *worker) updateService( // spec.resourceVersion is required in order to update an object newService.ResourceVersion = curService.ResourceVersion - // - // Migrate ClusterIP to the new service - // - // spec.clusterIP field is immutable, need to use already assigned value - // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies - // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP - newService.Spec.ClusterIP = curService.Spec.ClusterIP + if newService.Spec.ClusterIP == core.ClusterIPNone { + // In case if new service has no ClusterIP requested, we'll keep it unassigned. + // Otherwise we need to migrate IP address assigned earlier to new service in order to reuse it + log.V(1).Info("switch service %s to IP-less mode. ClusterIP=None", util.NamespacedName(newService)) + } else { + // Migrate assigned IP value - ClusterIP - to the new service + // spec.clusterIP field is immutable, need to use already assigned value + // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies + // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP + newService.Spec.ClusterIP = curService.Spec.ClusterIP + } // // Migrate existing ports to the new service for NodePort and LoadBalancer services @@ -114,9 +119,13 @@ func (w *worker) updateService( // No changes in service type is allowed. // Already exposed port details can not be changed. - serviceTypeIsNodePort := (curService.Spec.Type == core.ServiceTypeNodePort) && (newService.Spec.Type == core.ServiceTypeNodePort) - serviceTypeIsLoadBalancer := (curService.Spec.Type == core.ServiceTypeLoadBalancer) && (newService.Spec.Type == core.ServiceTypeLoadBalancer) - if serviceTypeIsNodePort || serviceTypeIsLoadBalancer { + // Service type of new and cur service is the same. + // In case it is not the same service has to be just recreated. + // So we can check for one type only - let's check for type of new service + typeIsNodePort := newService.Spec.Type == core.ServiceTypeNodePort + typeIsLoadBalancer := newService.Spec.Type == core.ServiceTypeLoadBalancer + if typeIsNodePort || typeIsLoadBalancer { + // Migrate cur ports to new service for i := range newService.Spec.Ports { newPort := &newService.Spec.Ports[i] for j := range curService.Spec.Ports { From afda54e065ca36921d9582824f7290cd5e31a4ce Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:13:42 +0500 Subject: [PATCH 033/233] dev: steps to unify helpers api --- pkg/controller/chk/worker-status-helpers.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/controller/chk/worker-status-helpers.go b/pkg/controller/chk/worker-status-helpers.go index 90e2ba556..d80998b01 100644 --- a/pkg/controller/chk/worker-status-helpers.go +++ b/pkg/controller/chk/worker-status-helpers.go @@ -40,6 +40,11 @@ func (w *worker) areUsableOldAndNew(old, new *apiChk.ClickHouseKeeperInstallatio return true } +// isAfterFinalizerInstalled checks whether we are just installed finalizer +func (w *worker) isAfterFinalizerInstalled(old, new *apiChk.ClickHouseKeeperInstallation) bool { + return false +} + // isGenerationTheSame checks whether old and new CHI have the same generation func (w *worker) isGenerationTheSame(old, new *apiChk.ClickHouseKeeperInstallation) bool { if !w.areUsableOldAndNew(old, new) { From dec5a3e0ada30bb3a119e93984a802a2e2b8306a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:14:13 +0500 Subject: [PATCH 034/233] dev: baseline chk finalization --- pkg/controller/chk/worker.go | 86 ++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 49 deletions(-) diff --git a/pkg/controller/chk/worker.go b/pkg/controller/chk/worker.go index 209aa7ae6..567b9f85b 100644 --- a/pkg/controller/chk/worker.go +++ b/pkg/controller/chk/worker.go @@ -166,6 +166,27 @@ func (w *worker) shouldForceRestartHost(ctx context.Context, host *api.Host) boo } } +func (w *worker) finalizeCR( + ctx context.Context, + obj meta.Object, + updateStatusOpts types.UpdateStatusOptions, + f func(*apiChk.ClickHouseKeeperInstallation), +) error { + chi, err := w.buildCRFromObj(ctx, obj) + if err != nil { + log.V(1).Error("Unable to finalize CR: %s err: %v", util.NamespacedName(obj), err) + return err + } + + if f != nil { + f(chi) + } + + _ = w.c.updateCRObjectStatus(ctx, chi, updateStatusOpts) + + return nil +} + func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { if util.IsContextDone(ctx) { log.V(1).Info("Reconcile is aborted. cr: %s ", cr.GetName()) @@ -199,34 +220,23 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *api w.a.V(1).M(_cr).F().S().Info("finalize reconcile") - // Update CHI object - if chi, err := w.createCRFromObjectMeta(_cr, true, commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]()); err == nil { - w.a.V(1).M(chi).Info("updating endpoints for CR-2 %s", chi.Name) - ips := w.c.getPodsIPs(ctx, chi) - w.a.V(1).M(chi).Info("IPs of the CR-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips) - opts := commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]() - opts.DefaultUserAdditionalIPs = ips - if chi, err := w.createCRFromObjectMeta(_cr, true, opts); err == nil { - w.a.V(1).M(chi).Info("Update users IPS-2") - chi.SetAncestor(chi.GetTarget()) - chi.SetTarget(nil) - chi.EnsureStatus().ReconcileComplete() - // TODO unify with update endpoints - w.newTask(chi, chi.GetAncestorT()) - //w.reconcileConfigMapCommonUsers(ctx, chi) - w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{ - CopyStatusOptions: types.CopyStatusOptions{ - CopyStatusFieldGroup: types.CopyStatusFieldGroup{ - FieldGroupWholeStatus: true, - }, + // Update CR object + _ = w.finalizeCR( + ctx, + _cr, + types.UpdateStatusOptions{ + CopyStatusOptions: types.CopyStatusOptions{ + CopyStatusFieldGroup: types.CopyStatusFieldGroup{ + FieldGroupWholeStatus: true, }, - }) - } else { - w.a.M(_cr).F().Error("internal unable to find CR by %v err: %v", _cr.GetLabels(), err) - } - } else { - w.a.M(_cr).F().Error("external unable to find CR by %v err %v", _cr.GetLabels(), err) - } + }, + }, + func(c *apiChk.ClickHouseKeeperInstallation) { + c.SetAncestor(c.GetTarget()) + c.SetTarget(nil) + c.EnsureStatus().ReconcileComplete() + }, + ) w.a.V(1). WithEvent(_cr, a.EventActionReconcile, a.EventReasonReconcileCompleted). @@ -417,25 +427,3 @@ func (w *worker) options() *config.FilesGeneratorOptions { w.a.Info("RaftOptions: %s", opts) return config.NewFilesGeneratorOptions().SetRaftOptions(opts) } - -// createCRFromObjectMeta -func (w *worker) createCRFromObjectMeta( - meta meta.Object, - isCHI bool, - options *commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation], -) (*apiChk.ClickHouseKeeperInstallation, error) { - w.a.V(3).M(meta).S().P() - defer w.a.V(3).M(meta).E().P() - - chi, err := w.c.GetCHIByObjectMeta(meta, isCHI) - if err != nil { - return nil, err - } - - chi, err = w.normalizer.CreateTemplated(chi, options) - if err != nil { - return nil, err - } - - return chi, nil -} From fac2bbff70d71fec3a859b34d5b6f37db3c352aa Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:14:33 +0500 Subject: [PATCH 035/233] dev: unify chk monitoring interface --- pkg/controller/chk/metrics/interface.go | 22 ++++++++++ pkg/controller/chk/metrics/pkg.go | 55 +++++++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 pkg/controller/chk/metrics/interface.go create mode 100644 pkg/controller/chk/metrics/pkg.go diff --git a/pkg/controller/chk/metrics/interface.go b/pkg/controller/chk/metrics/interface.go new file mode 100644 index 000000000..1fb2d5182 --- /dev/null +++ b/pkg/controller/chk/metrics/interface.go @@ -0,0 +1,22 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +type labelsSource interface { + GetName() string + GetNamespace() string + GetLabels() map[string]string + GetAnnotations() map[string]string +} diff --git a/pkg/controller/chk/metrics/pkg.go b/pkg/controller/chk/metrics/pkg.go new file mode 100644 index 000000000..ba890c523 --- /dev/null +++ b/pkg/controller/chk/metrics/pkg.go @@ -0,0 +1,55 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" +) + +func CRInitZeroValues(ctx context.Context, src labelsSource) { +} + +func CRReconcilesStarted(ctx context.Context, src labelsSource) { +} +func CRReconcilesCompleted(ctx context.Context, src labelsSource) { +} +func CRReconcilesAborted(ctx context.Context, src labelsSource) { +} +func CRReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) { +} + +func HostReconcilesStarted(ctx context.Context, src labelsSource) { +} +func HostReconcilesCompleted(ctx context.Context, src labelsSource) { +} +func HostReconcilesRestart(ctx context.Context, src labelsSource) { +} +func HostReconcilesErrors(ctx context.Context, src labelsSource) { +} +func HostReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) { +} + +func PodAdd(ctx context.Context) { +} +func PodUpdate(ctx context.Context) { +} +func PodDelete(ctx context.Context) { +} + +func CRRegister(ctx context.Context, src labelsSource) { +} + +func CRUnregister(ctx context.Context, src labelsSource) { +} From d202e9d1a408851114791a5b0e22f7b94a091d8a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:15:03 +0500 Subject: [PATCH 036/233] dev: unify chk monitoring --- pkg/controller/chk/worker-monitoring.go | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 pkg/controller/chk/worker-monitoring.go diff --git a/pkg/controller/chk/worker-monitoring.go b/pkg/controller/chk/worker-monitoring.go new file mode 100644 index 000000000..027e1b559 --- /dev/null +++ b/pkg/controller/chk/worker-monitoring.go @@ -0,0 +1,27 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chk + +import ( + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" +) + +// excludeFromMonitoring excludes stopped CR from monitoring +func (w *worker) excludeFromMonitoring(chi *api.ClickHouseKeeperInstallation) { +} + +// addToMonitoring adds CR to monitoring +func (w *worker) addToMonitoring(chi *api.ClickHouseKeeperInstallation) { +} From 33aadd800b069cd871b2e2069b5ac699a404e99f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:15:33 +0500 Subject: [PATCH 037/233] dev: baseline reconciler --- pkg/controller/chk/worker-reconciler-chk.go | 144 ++++++++++++-------- 1 file changed, 87 insertions(+), 57 deletions(-) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index 71bb55bde..f3c0f063b 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -17,7 +17,7 @@ package chk import ( "context" "errors" - "sync" + "fmt" "time" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/apis/common/types" - "github.com/altinity/clickhouse-operator/pkg/controller/chi/metrics" + "github.com/altinity/clickhouse-operator/pkg/controller/chk/metrics" "github.com/altinity/clickhouse-operator/pkg/controller/common" a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset" @@ -46,20 +46,35 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee common.LogOldAndNew("non-normalized yet (native)", old, new) + switch { + case w.isAfterFinalizerInstalled(old, new): + w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-1") + case w.isGenerationTheSame(old, new): + log.V(2).M(new).F().Info("isGenerationTheSame() - nothing to do here, exit") + return nil + } + w.a.M(new).S().P() defer w.a.M(new).E().P() + metrics.CRInitZeroValues(ctx, new) + metrics.CRReconcilesStarted(ctx, new) + startTime := time.Now() + new = w.buildCR(ctx, new) switch { case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") + case w.isAfterFinalizerInstalled(new.GetAncestorT(), new): + w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2") default: w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile") return nil } w.markReconcileStart(ctx, new) + w.excludeFromMonitoring(new) w.setHostStatusesPreliminary(ctx, new) if err := w.reconcile(ctx, new); err != nil { @@ -71,6 +86,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee err = common.ErrCRUDAbort w.markReconcileCompletedUnsuccessfully(ctx, new, err) if errors.Is(err, common.ErrCRUDAbort) { + metrics.CRReconcilesAborted(ctx, new) } } else { // Reconcile successful @@ -81,8 +97,12 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee } w.clean(ctx, new) + w.addToMonitoring(new) w.waitForIPAddresses(ctx, new) w.finalizeReconcileAndMarkCompleted(ctx, new) + + metrics.CRReconcilesCompleted(ctx, new) + metrics.CRReconcilesTimings(ctx, new, time.Since(startTime).Seconds()) } return nil @@ -182,7 +202,6 @@ func (w *worker) reconcile(ctx context.Context, cr *apiChk.ClickHouseKeeperInsta ctx, w.reconcileCRAuxObjectsPreliminary, w.reconcileCluster, - w.reconcileShardsAndHosts, w.reconcileCRAuxObjectsFinal, ) } @@ -282,7 +301,7 @@ func (w *worker) reconcileCRAuxObjectsFinal(ctx context.Context, cr *apiChk.Clic } func (w *worker) includeAllHostsIntoCluster(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { - // Not appropriate + // Not applicable } // reconcileConfigMapCommon reconciles common ConfigMap @@ -343,16 +362,16 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o log.V(1).M(host).F().S().Info("reconcile StatefulSet start") defer log.V(1).M(host).F().E().Info("reconcile StatefulSet end") - version := w.getHostSoftwareVersion(ctx, host) - host.Runtime.CurStatefulSet, _ = w.c.kube.STS().Get(ctx, host) + w.a.V(1).M(host).F().Info("Reconcile host STS: %s. App version: %s", host.GetName(), host.Runtime.Version.Render()) - w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version) + // Start with force-restart host if w.shouldForceRestartHost(ctx, host) { w.a.V(1).M(host).F().Info("Reconcile host STS force restart: %s", host.GetName()) _ = w.hostForceRestart(ctx, host, opts) } w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, host.IsStopped()) + opts = w.prepareStsReconcileOptsWaitSection(host, opts) // We are in place, where we can reconcile StatefulSet to desired configuration. w.a.V(1).M(host).F().Info("Reconcile host STS: %s. Reconcile StatefulSet", host.GetName()) @@ -380,17 +399,33 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o func (w *worker) hostForceRestart(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error { w.a.V(1).M(host).F().Info("Reconcile host. Force restart: %s", host.GetName()) - // In case we have to force-restart host - // We'll do it via replicas: 0 in StatefulSet. - w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true) - _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts) + if host.IsStopped() || (w.hostSoftwareRestart(ctx, host) != nil) { + _ = w.hostScaleDown(ctx, host, opts) + } + metrics.HostReconcilesRestart(ctx, host.GetCR()) - // At this moment StatefulSet has 0 replicas. - // First stage of RollingUpdate completed. return nil } +func (w *worker) hostSoftwareRestart(ctx context.Context, host *api.Host) error { + return fmt.Errorf("inapplicable so far") +} + +func (w *worker) hostScaleDown(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error { + w.a.V(1).M(host).F().Info("Reconcile host. Host shutdown via scale down: %s", host.GetName()) + + w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true) + err := w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts) + if err != nil { + w.a.V(1).M(host).F().Info("Host shutdown abort 1. Host: %s err: %v", host.GetName(), err) + return err + } + + w.a.V(1).M(host).F().Info("Host shutdown success. Host: %s", host.GetName()) + return nil +} + // reconcileHostService reconciles host's Service func (w *worker) reconcileHostService(ctx context.Context, host *api.Host) error { service := w.task.Creator().CreateService(interfaces.ServiceHost, host).First() @@ -429,6 +464,9 @@ func (w *worker) reconcileCluster(ctx context.Context, cluster *apiChk.Cluster) if err := w.reconcileClusterPodDisruptionBudget(ctx, cluster); err != nil { return err } + if err := w.reconcileClusterShardsAndHosts(ctx, cluster); err != nil { + return err + } return nil } @@ -464,15 +502,17 @@ func (w *worker) reconcileClusterPodDisruptionBudget(ctx context.Context, cluste return nil } -// reconcileShardsAndHosts reconciles shards and hosts of each shard -func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.ChkShard) error { +// reconcileClusterShardsAndHosts reconciles shards and hosts of each shard +func (w *worker) reconcileClusterShardsAndHosts(ctx context.Context, cluster *apiChk.Cluster) error { + shards := cluster.Layout.Shards[:] + // Sanity check - has to have shard(s) if len(shards) == 0 { return nil } - log.V(1).F().S().Info("reconcileShardsAndHosts start") - defer log.V(1).F().E().Info("reconcileShardsAndHosts end") + log.V(1).F().S().Info("reconcileClusterShardsAndHosts start") + defer log.V(1).F().E().Info("reconcileClusterShardsAndHosts end") opts := w.reconcileShardsAndHostsFetchOpts(ctx) @@ -492,45 +532,17 @@ func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.C return err } - // Since shard with 0 index is already done, we'll proceed with the 1-st + // Since shard with 0 index is already done, we'll proceed concurrently starting with the 1-st startShard = 1 } // Process shards using specified concurrency level while maintaining specified max concurrency percentage. // Loop over shards. - workersNum := w.getReconcileShardsWorkersNum(shards, opts) + workersNum := w.getReconcileShardsWorkersNum(cluster, opts) w.a.V(1).Info("Starting rest of shards on workers. Workers num: %d", workersNum) - for startShardIndex := startShard; startShardIndex < len(shards); startShardIndex += workersNum { - endShardIndex := startShardIndex + workersNum - if endShardIndex > len(shards) { - endShardIndex = len(shards) - } - concurrentlyProcessedShards := shards[startShardIndex:endShardIndex] - - // Processing error protected with mutex - var err error - var errLock sync.Mutex - - wg := sync.WaitGroup{} - wg.Add(len(concurrentlyProcessedShards)) - // Launch shard concurrent processing - for j := range concurrentlyProcessedShards { - shard := concurrentlyProcessedShards[j] - go func() { - defer wg.Done() - if e := w.reconcileShardWithHosts(ctx, shard); e != nil { - errLock.Lock() - err = e - errLock.Unlock() - return - } - }() - } - wg.Wait() - if err != nil { - w.a.V(1).Warning("Skipping rest of shards due to an error: %v", err) - return err - } + if err := w.runConcurrently(ctx, workersNum, startShard, shards[startShard:]); err != nil { + w.a.V(1).Info("Finished with ERROR rest of shards on workers: %d, err: %v", workersNum, err) + return err } w.a.V(1).Info("Finished successfully rest of shards on workers: %d", workersNum) return nil @@ -574,14 +586,14 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error { w.a.V(2).M(host).S().P() defer w.a.V(2).M(host).E().P() + metrics.HostReconcilesStarted(ctx, host.GetCR()) + startTime := time.Now() + if host.IsFirstInCR() { _ = w.reconcileCRServicePreliminary(ctx, host.GetCR()) defer w.reconcileCRServiceFinal(ctx, host.GetCR()) } - // Create artifacts - w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false) - w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), host.Runtime.Version.Render()) if err := w.reconcileHostPrepare(ctx, host); err != nil { @@ -620,6 +632,10 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error { }, }, }) + + metrics.HostReconcilesCompleted(ctx, host.GetCR()) + metrics.HostReconcilesTimings(ctx, host.GetCR(), time.Since(startTime).Seconds()) + return nil } @@ -641,12 +657,9 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error { stsReconcileOpts *statefulset.ReconcileOptions ) - //if !host.IsLast() { - // stsReconcileOpts = stsReconcileOpts.SetDoNotWait() - //} - // Reconcile ConfigMap if err := w.reconcileConfigMapHost(ctx, host); err != nil { + metrics.HostReconcilesErrors(ctx, host.GetCR()) w.a.V(1). M(host).F(). Warning("Reconcile Host Main - unable to reconcile ConfigMap. Host: %s Err: %v", host.GetName(), err) @@ -682,6 +695,7 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error { // Reconcile StatefulSet if err := w.reconcileHostStatefulSet(ctx, host, stsReconcileOpts); err != nil { + metrics.HostReconcilesErrors(ctx, host.GetCR()) w.a.V(1). M(host).F(). Warning("Reconcile Host Main - unable to reconcile StatefulSet. Host: %s Err: %v", host.GetName(), err) @@ -705,6 +719,22 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error { return nil } +func (w *worker) prepareStsReconcileOptsWaitSection(host *api.Host, opts *statefulset.ReconcileOptions) *statefulset.ReconcileOptions { + if host.GetCluster().GetReconcile().Host.Wait.Probes.GetStartup().IsTrue() { + opts = opts.SetWaitUntilStarted() + w.a.V(1). + M(host).F(). + Warning("Setting option SetWaitUntilStarted ") + } + if host.GetCluster().GetReconcile().Host.Wait.Probes.GetReadiness().IsTrue() { + opts = opts.SetWaitUntilReady() + w.a.V(1). + M(host).F(). + Warning("Setting option SetWaitUntilReady") + } + return opts +} + func (w *worker) reconcileHostPVCs(ctx context.Context, host *api.Host) storage.ErrorDataPersistence { return storage.NewStorageReconciler( w.task, From 56e10dc799ec206518403f7caa968fa69f1ea780 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:16:08 +0500 Subject: [PATCH 038/233] dev: format --- pkg/controller/chi/worker-migrator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-migrator.go b/pkg/controller/chi/worker-migrator.go index 8394cb193..00acb7f44 100644 --- a/pkg/controller/chi/worker-migrator.go +++ b/pkg/controller/chi/worker-migrator.go @@ -16,7 +16,7 @@ package chi import ( "context" - + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" From 461f94b0407c5950b20bf274f2bcd9fc769c6440 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 26 Dec 2025 14:18:01 +0500 Subject: [PATCH 039/233] env: helm chart --- deploy/helm/clickhouse-operator/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md index 7628d4ca7..62f288c2b 100644 --- a/deploy/helm/clickhouse-operator/README.md +++ b/deploy/helm/clickhouse-operator/README.md @@ -122,7 +122,7 @@ crdHook: | serviceMonitor.clickhouseMetrics.metricRelabelings | list | `[]` | | | serviceMonitor.clickhouseMetrics.relabelings | list | `[]` | | | serviceMonitor.clickhouseMetrics.scrapeTimeout | string | `""` | | -| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. You can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below | +| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below | | serviceMonitor.operatorMetrics.interval | string | `"30s"` | | | serviceMonitor.operatorMetrics.metricRelabelings | list | `[]` | | | serviceMonitor.operatorMetrics.relabelings | list | `[]` | | From f9344a61ea5d718cf9a6bba3f176dd6987aae838 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20B=C4=85k?= Date: Wed, 31 Dec 2025 17:56:24 +0100 Subject: [PATCH 040/233] docs: clarify operator config changes require restart (no self-reconcile) --- docs/operator_configuration.md | 2 +- docs/security_hardening.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/operator_configuration.md b/docs/operator_configuration.md index 73c11ec4b..fd7235c25 100644 --- a/docs/operator_configuration.md +++ b/docs/operator_configuration.md @@ -18,7 +18,7 @@ Operator settings are initialized in-order from 3 sources: * etc-clickhouse-operator-files configmap (also a part of default [clickhouse-operator-install-bundle.yaml][clickhouse-operator-install-bundle.yaml] * `ClickHouseOperatorConfiguration` resource. See [example][70-chop-config.yaml] for details. -Next sources merges with the previous one. Changes to `etc-clickhouse-operator-files` are not monitored, but picked up if operator is restarted. Changes to `ClickHouseOperatorConfiguration` are monitored by an operator and applied immediately. +Next sources merge with the previous ones. Currently the operator does not self-reconcile its own configuration: changes to `etc-clickhouse-operator-files` or `ClickHouseOperatorConfiguration` are read only at startup and require an operator restart to apply. `config.yaml` has following settings: diff --git a/docs/security_hardening.md b/docs/security_hardening.md index 7bf5d4fe9..c4c92a99a 100644 --- a/docs/security_hardening.md +++ b/docs/security_hardening.md @@ -73,7 +73,7 @@ stringData: We recommend that you do not include the **user** and **password** within the operator configuration without a **secret**, though it is also supported. -To change '**clickhouse_operator**' user password you can modify `etc-clickhouse-operator-files` configmap or create `ClickHouseOperatorConfiguration` object. +To change '**clickhouse_operator**' user password you can modify `etc-clickhouse-operator-files` configmap or create `ClickHouseOperatorConfiguration` object, then restart the operator to apply the change. See [operator configuration](https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md) for more information about operator configuration files. From e14318f7122064bcfe1606bc8174aec8828e8ae6 Mon Sep 17 00:00:00 2001 From: Kilian Ries Date: Mon, 5 Jan 2026 14:43:33 +0100 Subject: [PATCH 041/233] add missing datasource --- .../ClickHouseKeeper_dashboard.json | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/grafana-dashboard/ClickHouseKeeper_dashboard.json b/grafana-dashboard/ClickHouseKeeper_dashboard.json index 2b47c9419..fbb298822 100644 --- a/grafana-dashboard/ClickHouseKeeper_dashboard.json +++ b/grafana-dashboard/ClickHouseKeeper_dashboard.json @@ -983,6 +983,24 @@ ], "templating": { "list": [ + { + "current": { + "selected": false, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, { "allValue": ".+", "current": {}, From 99312a7f3b0bb3bd674327f1928bcc03b606ff90 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:26:45 +0500 Subject: [PATCH 042/233] dev: unify crd manifest spec --- ...l-yaml-template-01-section-crd-03-chk.yaml | 30 +++++-------- ...=> worker-wait-exclude-include-restart.go} | 42 +++++++++---------- 2 files changed, 32 insertions(+), 40 deletions(-) rename pkg/controller/chk/{worker-exclude-include-wait.go => worker-wait-exclude-include-restart.go} (76%) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml index 5323073bf..ca90b8a61 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml @@ -49,11 +49,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -64,20 +63,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -243,10 +233,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -284,7 +276,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -528,6 +520,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: <<: *TypeStringBool description: | @@ -545,11 +542,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/pkg/controller/chk/worker-exclude-include-wait.go b/pkg/controller/chk/worker-wait-exclude-include-restart.go similarity index 76% rename from pkg/controller/chk/worker-exclude-include-wait.go rename to pkg/controller/chk/worker-wait-exclude-include-restart.go index 5334d0c75..4bc534ede 100644 --- a/pkg/controller/chk/worker-exclude-include-wait.go +++ b/pkg/controller/chk/worker-wait-exclude-include-restart.go @@ -24,18 +24,26 @@ import ( "github.com/altinity/clickhouse-operator/pkg/util" ) -func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseKeeperInstallation) { +// waitForIPAddresses waits for all pods to get IP address assigned +func (w *worker) waitForIPAddresses(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { if util.IsContextDone(ctx) { - log.V(2).Info("task is done") + log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", chi.GetName()) return } - if chk.IsStopped() { + + if cr.IsStopped() { // No need to wait for stopped CHI return } - w.a.V(1).M(chk).F().S().Info("wait for IP addresses to be assigned to all pods") + + l := w.a.V(1).M(cr) + l.F().S().Info("wait for IP addresses to be assigned to all pods") + + // Let's limit polling time start := time.Now() - w.c.poll(ctx, chk, func(c *apiChk.ClickHouseKeeperInstallation, e error) bool { + timeout := 1 * time.Minute + + w.c.poll(ctx, cr, func(c *apiChk.ClickHouseKeeperInstallation, e error) bool { // TODO fix later // status IPs list can be empty // Instead of doing in status: @@ -43,19 +51,21 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseK // cur.EnsureStatus().SetPodIPs(podIPs) // and here // c.Status.GetPodIPs() - podIPs := w.c.getPodsIPs(ctx, chk) + podIPs := w.c.getPodsIPs(ctx, cr) if len(podIPs) >= len(c.Status.GetPods()) { + l.Info("all IP addresses are in place") // Stop polling - w.a.V(1).M(c).Info("all IP addresses are in place") return false } - if time.Since(start) > 1*time.Minute { + if time.Since(start) > timeout { + l.Warning("not all IP addresses are in place but time has elapsed") // Stop polling - w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed") return false } + + l.Info("still waiting - not all IP addresses are in place yet") + // Continue polling - w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet") return true }) } @@ -70,13 +80,8 @@ func (w *worker) shouldIncludeHost(host *api.Host) bool { return true } -// includeHost includes host back back into ClickHouse clusters +// includeHost includes host back into ClickHouse clusters func (w *worker) includeHost(ctx context.Context, host *api.Host) error { - if util.IsContextDone(ctx) { - log.V(2).Info("task is done") - return nil - } - if !w.shouldIncludeHost(host) { w.a.V(1). M(host).F(). @@ -90,11 +95,6 @@ func (w *worker) includeHost(ctx context.Context, host *api.Host) error { // includeHostIntoRaftCluster includes host into raft configuration func (w *worker) includeHostIntoRaftCluster(ctx context.Context, host *api.Host) { - if util.IsContextDone(ctx) { - log.V(2).Info("task is done") - return - } - w.a.V(1). M(host).F(). Info("going to include host. Host/shard/cluster: %d/%d/%s", From 0fc8c9a3d385afbf1a0bd5dc52ead636b689e60b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:27:31 +0500 Subject: [PATCH 043/233] dev: actualize chk stopped check --- pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go index a8eeb1ae4..4c658b287 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go @@ -423,7 +423,10 @@ func (cr *ClickHouseKeeperInstallation) IsAuto() bool { // IsStopped checks whether CR is stopped func (cr *ClickHouseKeeperInstallation) IsStopped() bool { - return false + if cr == nil { + return false + } + return cr.GetSpecT().GetStop().Value() } // IsRollingUpdate checks whether CHI should perform rolling update From 772961e67255605b42a63352b2c379609d3f411b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:28:08 +0500 Subject: [PATCH 044/233] dev: enable stop check on cluster level --- .../clickhouse-keeper.altinity.com/v1/type_cluster.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go index 301287826..1638fba08 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go @@ -220,6 +220,11 @@ func (cluster *Cluster) GetServiceTemplate() (*apiChi.ServiceTemplate, bool) { return nil, false } +// GetCR gets parent CR +func (cluster *Cluster) GetCR() *ClickHouseKeeperInstallation { + return cluster.Runtime.CHK +} + func (cluster *Cluster) GetAncestor() apiChi.ICluster { return (*Cluster)(nil) } @@ -373,6 +378,11 @@ func (cluster *Cluster) IsNonZero() bool { return cluster != nil } +// IsStopped checks whether host is stopped +func (cluster *Cluster) IsStopped() bool { + return cluster.GetCR().IsStopped() +} + func (cluster *Cluster) Ensure(create func() *Cluster) *Cluster { if cluster == nil { cluster = create() From 0e01da2fe9ab40877b752ad23f805eeccb0825eb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:29:21 +0500 Subject: [PATCH 045/233] dev: introduce stop flag for chk --- .../v1/type_spec.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go index 28d15e41b..5a7003691 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go @@ -22,6 +22,7 @@ import ( // ChkSpec defines spec section of ClickHouseKeeper resource type ChkSpec struct { TaskID *types.Id `json:"taskID,omitempty" yaml:"taskID,omitempty"` + Stop *types.StringBool `json:"stop,omitempty" yaml:"stop,omitempty"` NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"` Suspend *types.StringBool `json:"suspend,omitempty" yaml:"suspend,omitempty"` Reconciling *apiChi.ChiReconcile `json:"reconciling,omitempty" yaml:"reconciling,omitempty"` @@ -47,6 +48,13 @@ func (spec *ChkSpec) GetTaskID() *types.Id { return spec.TaskID } +func (spec *ChkSpec) GetStop() *types.StringBool { + if spec == nil { + return (*types.StringBool)(nil) + } + return spec.Stop +} + func (spec *ChkSpec) GetNamespaceDomainPattern() *types.String { if spec == nil { return (*types.String)(nil) @@ -90,6 +98,9 @@ func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) { if !spec.HasTaskID() { spec.TaskID = spec.TaskID.MergeFrom(from.TaskID) } + if !spec.Stop.HasValue() { + spec.Stop = spec.Stop.MergeFrom(from.Stop) + } if !spec.NamespaceDomainPattern.HasValue() { spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern) } @@ -100,6 +111,10 @@ func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) { if from.HasTaskID() { spec.TaskID = spec.TaskID.MergeFrom(from.TaskID) } + if from.Stop.HasValue() { + // Override by non-empty values only + spec.Stop = from.Stop + } if from.NamespaceDomainPattern.HasValue() { spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern) } From 26998e477ef3c86616118c76ef474b61cd0f21c1 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:30:13 +0500 Subject: [PATCH 046/233] dev: merge suspend --- pkg/apis/clickhouse.altinity.com/v1/type_spec.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go index 650a73849..3969e8aaf 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go @@ -157,8 +157,7 @@ func (spec *ChiSpec) MergeFrom(from *ChiSpec, _type MergeType) { spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern) } if from.Suspend.HasValue() { - // Override by non-empty values only - spec.Suspend = from.Suspend + spec.Suspend = spec.Suspend.MergeFrom(from.Suspend) } } From 5ce5f6e40f4227ec0b621fc233d8a06730ed8ad3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:31:17 +0500 Subject: [PATCH 047/233] dev: generalize monitoring functions --- pkg/controller/chi/worker-monitoring.go | 36 ++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index ef837d06e..523e25e72 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -19,34 +19,34 @@ import ( a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" ) -// excludeFromMonitoring excludes stopped CHI from monitoring -func (w *worker) excludeFromMonitoring(chi *api.ClickHouseInstallation) { - if !chi.IsStopped() { - // No need to exclude non-stopped CHI +// excludeFromMonitoring excludes stopped CR from monitoring +func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { + if !cr.IsStopped() { + // No need to exclude non-stopped CR return } // CR is stopped, let's exclude it from monitoring // because it makes no sense to send SQL requests to stopped instances w.a.V(1). - WithEvent(chi, a.EventActionReconcile, a.EventReasonReconcileInProgress). - WithAction(chi). - M(chi).F(). - Info("exclude CHI from monitoring") - w.c.deleteWatch(chi) + WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). + WithAction(cr). + M(cr).F(). + Info("exclude CR from monitoring") + w.c.deleteWatch(cr) } -// addToMonitoring adds CHI to monitoring -func (w *worker) addToMonitoring(chi *api.ClickHouseInstallation) { - if chi.IsStopped() { - // No need to add stopped CHI +// addToMonitoring adds CR to monitoring +func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) { + if cr.IsStopped() { + // No need to add stopped CR return } w.a.V(1). - WithEvent(chi, a.EventActionReconcile, a.EventReasonReconcileInProgress). - WithAction(chi). - M(chi).F(). - Info("add CHI to monitoring") - w.c.updateWatch(chi) + WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). + WithAction(cr). + M(cr).F(). + Info("add CR to monitoring") + w.c.updateWatch(cr) } From 932486b15120b3345ab30abadbaaeae15cbc5ac6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:31:54 +0500 Subject: [PATCH 048/233] dev: minor generalization --- pkg/controller/chi/worker-reconciler-chi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 0ddff8d95..3527ae06c 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -257,7 +257,7 @@ func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomReso defer log.V(2).F().E().Info("second stage") if cr.IsStopped() { - // Stopped CHI must have no entry point + // Stopped CR must have no entry point return nil } From 5765af9313ac5dade6e6173d6250c2389b4d8d2a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:32:51 +0500 Subject: [PATCH 049/233] dev: generalize ip wit func --- .../chi/worker-wait-exclude-include-restart.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/controller/chi/worker-wait-exclude-include-restart.go b/pkg/controller/chi/worker-wait-exclude-include-restart.go index 743a06e65..f613e76eb 100644 --- a/pkg/controller/chi/worker-wait-exclude-include-restart.go +++ b/pkg/controller/chi/worker-wait-exclude-include-restart.go @@ -29,25 +29,25 @@ import ( ) // waitForIPAddresses waits for all pods to get IP address assigned -func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInstallation) { +func (w *worker) waitForIPAddresses(ctx context.Context, cr *api.ClickHouseInstallation) { if util.IsContextDone(ctx) { - log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", chi.GetName()) + log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", cr.GetName()) return } - if chi.IsStopped() { + if cr.IsStopped() { // No need to wait for stopped CHI return } - l := w.a.V(1).M(chi) + l := w.a.V(1).M(cr) l.F().S().Info("wait for IP addresses to be assigned to all pods") // Let's limit polling time start := time.Now() timeout := 1 * time.Minute - w.c.poll(ctx, chi, func(c *api.ClickHouseInstallation, e error) bool { + w.c.poll(ctx, cr, func(c *api.ClickHouseInstallation, e error) bool { // TODO fix later // status IPs list can be empty // Instead of doing in status: @@ -55,7 +55,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInst // cur.EnsureStatus().SetPodIPs(podIPs) // and here // c.Status.GetPodIPs() - podIPs := w.c.getPodsIPs(ctx, chi) + podIPs := w.c.getPodsIPs(ctx, cr) if len(podIPs) >= len(c.Status.GetPods()) { l.Info("all IP addresses are in place") // Stop polling From b29aa2a33d1843ca57f994605046a30311805103 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:33:56 +0500 Subject: [PATCH 050/233] dev: generalize monitoring --- pkg/controller/chk/worker-monitoring.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/chk/worker-monitoring.go b/pkg/controller/chk/worker-monitoring.go index 027e1b559..c524383f2 100644 --- a/pkg/controller/chk/worker-monitoring.go +++ b/pkg/controller/chk/worker-monitoring.go @@ -19,9 +19,9 @@ import ( ) // excludeFromMonitoring excludes stopped CR from monitoring -func (w *worker) excludeFromMonitoring(chi *api.ClickHouseKeeperInstallation) { +func (w *worker) excludeFromMonitoring(cr *api.ClickHouseKeeperInstallation) { } // addToMonitoring adds CR to monitoring -func (w *worker) addToMonitoring(chi *api.ClickHouseKeeperInstallation) { +func (w *worker) addToMonitoring(cr *api.ClickHouseKeeperInstallation) { } From 6afbb4f0be214e58be28d81f59aff96485dc1f06 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:34:39 +0500 Subject: [PATCH 051/233] dev: minor generalization --- pkg/controller/chk/worker-reconciler-chk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index f3c0f063b..6891c1525 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -261,7 +261,7 @@ func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomReso defer log.V(2).F().E().Info("second stage") if cr.IsStopped() { - // Stopped CHI must have no entry point + // Stopped CR must have no entry point return nil } From 36ba513def6ae42458bc1d773869c8904af9b4e3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:35:15 +0500 Subject: [PATCH 052/233] dev: generalize ip wait --- pkg/controller/chk/worker-wait-exclude-include-restart.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chk/worker-wait-exclude-include-restart.go b/pkg/controller/chk/worker-wait-exclude-include-restart.go index 4bc534ede..d2ff67bda 100644 --- a/pkg/controller/chk/worker-wait-exclude-include-restart.go +++ b/pkg/controller/chk/worker-wait-exclude-include-restart.go @@ -27,7 +27,7 @@ import ( // waitForIPAddresses waits for all pods to get IP address assigned func (w *worker) waitForIPAddresses(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) { if util.IsContextDone(ctx) { - log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", chi.GetName()) + log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", cr.GetName()) return } From d97787f48000afc5ee598d42e84e023dd7d9956c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:37:57 +0500 Subject: [PATCH 053/233] dev: introduce suspend normalizer --- pkg/model/chi/normalizer/normalizer.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go index ec4d51709..0e0bc8925 100644 --- a/pkg/model/chi/normalizer/normalizer.go +++ b/pkg/model/chi/normalizer/normalizer.go @@ -155,6 +155,7 @@ func (n *Normalizer) normalizeSpec() { n.req.GetTarget().GetSpecT().Stop = n.normalizeStop(n.req.GetTarget().GetSpecT().Stop) n.req.GetTarget().GetSpecT().Restart = n.normalizeRestart(n.req.GetTarget().GetSpecT().Restart) n.req.GetTarget().GetSpecT().Troubleshoot = n.normalizeTroubleshoot(n.req.GetTarget().GetSpecT().Troubleshoot) + n.req.GetTarget().GetSpecT().Suspend = n.normalizeSuspend(n.req.GetTarget().GetSpecT().Suspend) n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern) n.req.GetTarget().GetSpecT().Templating = n.normalizeTemplating(n.req.GetTarget().GetSpecT().Templating) n.normalizeReconciling() @@ -255,6 +256,17 @@ func (n *Normalizer) normalizeTroubleshoot(troubleshoot *types.StringBool) *type return types.NewStringBool(false) } +// normalizeSuspend normalizes .spec.suspend +func (n *Normalizer) normalizeSuspend(suspend *types.StringBool) *types.StringBool { + if suspend.IsValid() { + // It is bool, use as it is + return suspend + } + + // In case it is unknown value - just use set it to false + return types.NewStringBool(false) +} + func isNamespaceDomainPatternValid(namespaceDomainPattern *types.String) bool { if strings.Count(namespaceDomainPattern.Value(), "%s") > 1 { return false From 5c5164484aefa46cfa6a10a065aef1399181a2e8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 15:59:52 +0500 Subject: [PATCH 054/233] dev: introduce stop+suspend normalization --- pkg/model/chk/normalizer/normalizer.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go index 881b060d3..e1d2170e1 100644 --- a/pkg/model/chk/normalizer/normalizer.go +++ b/pkg/model/chk/normalizer/normalizer.go @@ -143,6 +143,8 @@ func (n *Normalizer) normalizeTarget() (*chk.ClickHouseKeeperInstallation, error func (n *Normalizer) normalizeSpec() { // Walk over Spec datatype fields n.req.GetTarget().GetSpecT().TaskID = n.normalizeTaskID(n.req.GetTarget().GetSpecT().TaskID) + n.req.GetTarget().GetSpecT().Stop = n.normalizeStop(n.req.GetTarget().GetSpecT().Stop) + n.req.GetTarget().GetSpecT().Suspend = n.normalizeSuspend(n.req.GetTarget().GetSpecT().Suspend) n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern) n.normalizeReconciling() n.req.GetTarget().GetSpecT().Reconcile = n.normalizeReconcile(n.req.GetTarget().GetSpecT().Reconcile) @@ -208,6 +210,28 @@ func (n *Normalizer) normalizeTaskID(taskID *types.Id) *types.Id { return types.NewAutoId() } +// normalizeStop normalizes .spec.stop +func (n *Normalizer) normalizeStop(stop *types.StringBool) *types.StringBool { + if stop.IsValid() { + // It is bool, use as it is + return stop + } + + // In case it is unknown value - just use set it to false + return types.NewStringBool(false) +} + +// normalizeSuspend normalizes .spec.suspend +func (n *Normalizer) normalizeSuspend(suspend *types.StringBool) *types.StringBool { + if suspend.IsValid() { + // It is bool, use as it is + return suspend + } + + // In case it is unknown value - just use set it to false + return types.NewStringBool(false) +} + func isNamespaceDomainPatternValid(namespaceDomainPattern *types.String) bool { if strings.Count(namespaceDomainPattern.Value(), "%s") > 1 { return false From 5784734694d5ff09f3526faa4355466c55143385 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 16:00:27 +0500 Subject: [PATCH 055/233] dev: codegenerator --- .../v1/zz_generated.deepcopy.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go index 645b11f29..525c080f3 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go @@ -288,6 +288,11 @@ func (in *ChkSpec) DeepCopyInto(out *ChkSpec) { *out = new(types.Id) **out = **in } + if in.Stop != nil { + in, out := &in.Stop, &out.Stop + *out = new(types.StringBool) + **out = **in + } if in.NamespaceDomainPattern != nil { in, out := &in.NamespaceDomainPattern, &out.NamespaceDomainPattern *out = new(types.String) @@ -612,6 +617,11 @@ func (in *Status) DeepCopyInto(out *Status) { *out = new(ClickHouseKeeperInstallation) (*in).DeepCopyInto(*out) } + if in.ActionPlan != nil { + in, out := &in.ActionPlan, &out.ActionPlan + *out = new(clickhousealtinitycomv1.ActionPlan) + (*in).DeepCopyInto(*out) + } if in.HostsWithTablesCreated != nil { in, out := &in.HostsWithTablesCreated, &out.HostsWithTablesCreated *out = make([]string, len(*in)) From dea01a94c50a2a2e599abe714902a6f01939d234 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 16:00:51 +0500 Subject: [PATCH 056/233] env: manifests --- .../clickhouse-operator-install-ansible.yaml | 30 ++---- ...house-operator-install-bundle-v1beta1.yaml | 30 ++---- .../clickhouse-operator-install-bundle.yaml | 30 ++---- ...use-operator-install-template-v1beta1.yaml | 30 ++---- .../clickhouse-operator-install-template.yaml | 30 ++---- .../clickhouse-operator-install-tf.yaml | 30 ++---- deploy/operator/parts/crd.yaml | 100 ++++++++---------- 7 files changed, 112 insertions(+), 168 deletions(-) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index 196533b00..63c55e27e 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -3519,11 +3519,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3534,20 +3533,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3713,10 +3703,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3754,7 +3746,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3998,6 +3990,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: <<: *TypeStringBool description: | @@ -4015,11 +4012,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index 6a7842498..bcaf40f47 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -3481,11 +3481,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3496,20 +3495,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3675,10 +3665,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3716,7 +3708,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3959,6 +3951,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: !!merge <<: *TypeStringBool description: | @@ -3976,11 +3973,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - !!merge <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 2ef6c61e5..e4b67175d 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -3512,11 +3512,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3527,20 +3526,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3706,10 +3696,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3747,7 +3739,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3991,6 +3983,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: <<: *TypeStringBool description: | @@ -4008,11 +4005,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index bbc1adfa3..507154642 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -3481,11 +3481,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3496,20 +3495,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3675,10 +3665,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3716,7 +3708,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3959,6 +3951,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: !!merge <<: *TypeStringBool description: | @@ -3976,11 +3973,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - !!merge <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 6f5aea129..6090c6463 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -3512,11 +3512,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3527,20 +3526,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3706,10 +3696,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3747,7 +3739,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3991,6 +3983,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: <<: *TypeStringBool description: | @@ -4008,11 +4005,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index 707bf9190..dd30e5c4b 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -3519,11 +3519,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -3534,20 +3533,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -3713,10 +3703,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -3754,7 +3746,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -3998,6 +3990,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: <<: *TypeStringBool description: | @@ -4015,11 +4012,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index beb4a811e..807e75bd4 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -8043,11 +8043,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -8058,20 +8057,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -8237,10 +8227,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -8278,7 +8270,7 @@ spec: stop: type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -8609,6 +8601,46 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + type: object + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: type: string enum: @@ -8651,46 +8683,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - type: object - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - serviceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" - serviceTemplates: - type: array - description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" - nullable: true - items: - type: string - clusterServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" - shardServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" - volumeClaimTemplate: - type: string - description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | From b0a43a29799953a7ddde98d65ae026327f30a6f9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 16:01:04 +0500 Subject: [PATCH 057/233] env: helm --- ...ations.clickhouse-keeper.altinity.com.yaml | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml index 201f1ffcb..498938a52 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml @@ -49,11 +49,10 @@ spec: type: string description: Resource status jsonPath: .status.status - - name: hosts-unchanged + - name: hosts-completed type: integer - description: Unchanged hosts count - priority: 1 # show in wide view - jsonPath: .status.hostsUnchanged + description: Completed hosts count + jsonPath: .status.hostsCompleted - name: hosts-updated type: integer description: Updated hosts count @@ -64,20 +63,11 @@ spec: description: Added hosts count priority: 1 # show in wide view jsonPath: .status.hostsAdded - - name: hosts-completed - type: integer - description: Completed hosts count - jsonPath: .status.hostsCompleted - name: hosts-deleted type: integer description: Hosts deleted count priority: 1 # show in wide view jsonPath: .status.hostsDeleted - - name: hosts-delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.hostsDelete - name: endpoint type: string description: Client access endpoint @@ -243,10 +233,12 @@ spec: normalized: type: object description: "Normalized resource requested" + nullable: true x-kubernetes-preserve-unknown-fields: true normalizedCompleted: type: object description: "Normalized resource completed" + nullable: true x-kubernetes-preserve-unknown-fields: true hostsWithTablesCreated: type: array @@ -284,7 +276,7 @@ spec: stop: &TypeStringBool type: string description: | - Allows to stop all ClickHouse clusters defined in a CHI. + Allows to stop all ClickHouse Keeper clusters defined in a CHK. Works as the following: - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. @@ -527,6 +519,11 @@ spec: description: | optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` pdbManaged: !!merge <<: *TypeStringBool description: | @@ -544,11 +541,6 @@ spec: by specifying 0. This is a mutually exclusive setting with "minAvailable". minimum: 0 maximum: 65535 - templates: - !!merge <<: *TypeTemplateNames - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` layout: type: object description: | From 3d2c803342780d23b1d84d79eca3dec7a953e75c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:17:04 +0500 Subject: [PATCH 058/233] dev: generalize enqueue checker --- pkg/controller/chi/controller.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 3534ca622..8dab0dead 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -924,15 +924,10 @@ func (c *Controller) handleObject(obj interface{}) { // TODO c.enqueueObject(chi.Namespace, chi.Name, chi) } -func shouldEnqueue(chi *api.ClickHouseInstallation) bool { - if !chop.Config().IsNamespaceWatched(chi.Namespace) { - log.V(2).M(chi).Info("chiInformer: skip enqueue, namespace '%s' is not watched or is in deny list", chi.Namespace) - return false - } - - // if CR is suspended, should skip reconciliation - if chi.Spec.Suspend.Value() { - log.V(5).M(chi).Info("chiInformer: skip enqueue, CHI suspended") +func shouldEnqueue(cr *api.ClickHouseInstallation) bool { + ns := cr.GetNamespace() + if !chop.Config().IsNamespaceWatched(ns) { + log.V(2).M(cr).Info("skip enqueue, namespace '%s' is not watched or is in deny list", ns) return false } From 4c872a05cb5c7aa5a2782cda64b20284b2d62632 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:19:07 +0500 Subject: [PATCH 059/233] dev: move suspend checker into main loop --- pkg/controller/chi/worker-reconciler-chi.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 3527ae06c..6a6c980d5 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -64,12 +64,18 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal new = w.buildCR(ctx, new) switch { + case new.Spec.Suspend.Value(): + // if CR is suspended, should skip reconciliation + w.a.M(new).F().Info("Suspended CR") + metrics.CRReconcilesCompleted(ctx, new) + return nil case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") case w.isAfterFinalizerInstalled(new.GetAncestorT(), new): w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2") default: w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile") + metrics.CRReconcilesCompleted(ctx, new) return nil } From 26adbc191d486e4a334b46ee3b7f9d98f053bbde Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:19:37 +0500 Subject: [PATCH 060/233] dev: minor --- cmd/operator/app/thread_keeper.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/operator/app/thread_keeper.go b/cmd/operator/app/thread_keeper.go index e5e707117..acaa9dbf6 100644 --- a/cmd/operator/app/thread_keeper.go +++ b/cmd/operator/app/thread_keeper.go @@ -59,7 +59,10 @@ func initKeeper(ctx context.Context) error { err = ctrlRuntime. NewControllerManagedBy(manager). - For(&api.ClickHouseKeeperInstallation{}, builder.WithPredicates(keeperPredicate())). + For( + &api.ClickHouseKeeperInstallation{}, + builder.WithPredicates(keeperPredicate()), + ). Owns(&apps.StatefulSet{}). Complete( &controller.Controller{ From 180a3ff7efb6112986c9fb6067b49e2c84bc7175 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:29:19 +0500 Subject: [PATCH 061/233] dev: unify enqueue checker --- pkg/controller/chi/controller.go | 6 +++--- pkg/controller/chk/controller.go | 11 +++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index 8dab0dead..eb3bd3712 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -154,7 +154,7 @@ func (c *Controller) addEventHandlersCHI( chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { chi := obj.(*api.ClickHouseInstallation) - if !shouldEnqueue(chi) { + if !ShouldEnqueue(chi) { return } log.V(3).M(chi).Info("chiInformer.AddFunc") @@ -163,7 +163,7 @@ func (c *Controller) addEventHandlersCHI( UpdateFunc: func(old, new interface{}) { oldChi := old.(*api.ClickHouseInstallation) newChi := new.(*api.ClickHouseInstallation) - if !shouldEnqueue(newChi) { + if !ShouldEnqueue(newChi) { return } log.V(3).M(newChi).Info("chiInformer.UpdateFunc") @@ -924,7 +924,7 @@ func (c *Controller) handleObject(obj interface{}) { // TODO c.enqueueObject(chi.Namespace, chi.Name, chi) } -func shouldEnqueue(cr *api.ClickHouseInstallation) bool { +func ShouldEnqueue(cr *api.ClickHouseInstallation) bool { ns := cr.GetNamespace() if !chop.Config().IsNamespaceWatched(ns) { log.V(2).M(cr).Info("skip enqueue, namespace '%s' is not watched or is in deny list", ns) diff --git a/pkg/controller/chk/controller.go b/pkg/controller/chk/controller.go index 2fc21653e..2456720b3 100644 --- a/pkg/controller/chk/controller.go +++ b/pkg/controller/chk/controller.go @@ -16,6 +16,7 @@ package chk import ( "context" + "github.com/altinity/clickhouse-operator/pkg/chop" "time" log "github.com/altinity/clickhouse-operator/pkg/announcer" @@ -105,3 +106,13 @@ func (c *Controller) poll(ctx context.Context, cr api.ICustomResource, f func(c } } } + +func ShouldEnqueue(cr *apiChk.ClickHouseKeeperInstallation) bool { + ns := cr.GetNamespace() + if !chop.Config().IsNamespaceWatched(ns) { + log.V(2).M(cr).Info("skip enqueue, namespace '%s' is not watched or is in deny list", ns) + return false + } + + return true +} From c6b62a5c18ce3288c99f29ec7519b728384877b8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:29:57 +0500 Subject: [PATCH 062/233] dev: switch to controller checker --- cmd/operator/app/thread_keeper.go | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/cmd/operator/app/thread_keeper.go b/cmd/operator/app/thread_keeper.go index acaa9dbf6..bd09323c6 100644 --- a/cmd/operator/app/thread_keeper.go +++ b/cmd/operator/app/thread_keeper.go @@ -91,40 +91,30 @@ func runKeeper(ctx context.Context) error { func keeperPredicate() predicate.Funcs { return predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - obj, ok := e.Object.(*api.ClickHouseKeeperInstallation) + new, ok := e.Object.(*api.ClickHouseKeeperInstallation) if !ok { return false } - // Check if namespace should be watched (includes deny list check) - if !chop.Config().IsNamespaceWatched(obj.Namespace) { - logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace) + if !controller.ShouldEnqueue(new) { return false } - if obj.Spec.Suspend.Value() { - return false - } return true }, DeleteFunc: func(e event.DeleteEvent) bool { return true }, UpdateFunc: func(e event.UpdateEvent) bool { - obj, ok := e.ObjectNew.(*api.ClickHouseKeeperInstallation) + new, ok := e.ObjectNew.(*api.ClickHouseKeeperInstallation) if !ok { return false } - // Check if namespace should be watched (includes deny list check) - if !chop.Config().IsNamespaceWatched(obj.Namespace) { - logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace) + if !controller.ShouldEnqueue(new) { return false } - if obj.Spec.Suspend.Value() { - return false - } return true }, GenericFunc: func(e event.GenericEvent) bool { From 7c94aa3a849600b20a36ca90c21431dcddf7c642 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:38:29 +0500 Subject: [PATCH 063/233] dev: apply suspend for chk --- pkg/controller/chk/worker-reconciler-chk.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index 6891c1525..00e44dfc1 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -64,12 +64,18 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee new = w.buildCR(ctx, new) switch { + case new.Spec.Suspend.Value(): + // if CR is suspended, should skip reconciliation + w.a.M(new).F().Info("Suspended CR") + metrics.CRReconcilesCompleted(ctx, new) + return nil case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") case w.isAfterFinalizerInstalled(new.GetAncestorT(), new): w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2") default: w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile") + metrics.CRReconcilesCompleted(ctx, new) return nil } From 4cc7110e62c9fa123cdcd4af16972c66e2a60f92 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 12 Jan 2026 18:48:51 +0500 Subject: [PATCH 064/233] dev: clarify versioner --- pkg/apis/swversion/software_version.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/pkg/apis/swversion/software_version.go b/pkg/apis/swversion/software_version.go index 529618ad5..f40cd9497 100644 --- a/pkg/apis/swversion/software_version.go +++ b/pkg/apis/swversion/software_version.go @@ -55,7 +55,10 @@ func (in *SoftWareVersion) DeepCopyInto(out *SoftWareVersion) { } // NewSoftWareVersion creates new software version -// version - specifies original software version, such as: 21 or 21.1 or 21.9.6.24-alpha +// version - specifies original software version, such as: +// a) 21 or +// b) 21.1 or +// c) 21.9.6.24-alpha func NewSoftWareVersion(version string) *SoftWareVersion { if strings.TrimSpace(version) == "" { return nil @@ -64,12 +67,12 @@ func NewSoftWareVersion(version string) *SoftWareVersion { // Fetch comma-separated parts of the software version parts := strings.Split(version, ".") - // Need to have at least something to as a major version + // Need to have at least something to be treated as a major version if len(parts) < 1 { return nil } - // Need to have at least 3 parts in software version specification + // Pad to have 3 parts in software version specification for len(parts) < 3 { parts = append(parts, "0") } @@ -82,15 +85,16 @@ func NewSoftWareVersion(version string) *SoftWareVersion { } } - // Normalized version of the original + // Build normalized version from the original/padded parts normalized := strings.Join(parts, ".") - // Build version + // Build semver version _semver, err := semver.NewVersion(normalized) if err != nil { return nil } + // So far so good, version is available return &SoftWareVersion{ original: version, normalized: normalized, @@ -98,6 +102,8 @@ func NewSoftWareVersion(version string) *SoftWareVersion { } } +// NewSoftWareVersionFromTag build SoftWareVersion from docker image tag +// Tag 'latest' leads to default MaxVersion() func NewSoftWareVersionFromTag(tag string) *SoftWareVersion { if strings.ToLower(strings.TrimSpace(tag)) == "latest" { return MaxVersion() @@ -157,6 +163,7 @@ func (v *SoftWareVersion) IsKnown() bool { return !v.IsUnknown() } +// SetDescription sets string description func (v *SoftWareVersion) SetDescription(format string, args ...interface{}) *SoftWareVersion { if v == nil { return nil From 99f971cf009306c39f8c64b167692ac4bbf00db0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 13 Jan 2026 14:17:06 +0500 Subject: [PATCH 065/233] dev: switch chk versioner --- pkg/controller/chk/worker-reconciler-helper.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chk/worker-reconciler-helper.go b/pkg/controller/chk/worker-reconciler-helper.go index 36e890733..d3e289f5e 100644 --- a/pkg/controller/chk/worker-reconciler-helper.go +++ b/pkg/controller/chk/worker-reconciler-helper.go @@ -26,8 +26,14 @@ import ( ) func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *swversion.SoftWareVersion { + // Try to report tag-based version + if tagBasedVersion := w.getTagBasedVersion(host); tagBasedVersion.IsKnown() { + // Able to report version from the tag + return tagBasedVersion.SetDescription("parsed from the tag: '%s'", tagBasedVersion.GetOriginal()) + } + // Unable to acquire any version - report min one - return swversion.MaxVersion().SetDescription("so far so") + return swversion.MinVersion().SetDescription("min - unable to acquire neither from the tag nor from the app") } // getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shard reconcile From e7374c449b38016f002354225309244bb570cde7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 13 Jan 2026 14:17:39 +0500 Subject: [PATCH 066/233] dev: chk app versioner --- pkg/controller/chk/worker-app-version.go | 29 ++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 pkg/controller/chk/worker-app-version.go diff --git a/pkg/controller/chk/worker-app-version.go b/pkg/controller/chk/worker-app-version.go new file mode 100644 index 000000000..3593be3ec --- /dev/null +++ b/pkg/controller/chk/worker-app-version.go @@ -0,0 +1,29 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chk + +import ( + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/apis/swversion" +) + +func (w *worker) getTagBasedVersion(host *api.Host) *swversion.SoftWareVersion { + // Fetch tag from the image + var tagBasedVersion *swversion.SoftWareVersion + if tag, tagFound := w.task.Creator().GetAppImageTag(host); tagFound { + tagBasedVersion = swversion.NewSoftWareVersionFromTag(tag) + } + return tagBasedVersion +} From 1da5ec071c84a1a235eb149cd50a510abf3d48ed Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 13 Jan 2026 12:46:07 +0300 Subject: [PATCH 067/233] test cluster settings on replicas --- .../manifests/chi/test-003-complex-layout.yaml | 4 ++-- tests/e2e/test_operator.py | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/e2e/manifests/chi/test-003-complex-layout.yaml b/tests/e2e/manifests/chi/test-003-complex-layout.yaml index 590c36258..dfcfcfb62 100644 --- a/tests/e2e/manifests/chi/test-003-complex-layout.yaml +++ b/tests/e2e/manifests/chi/test-003-complex-layout.yaml @@ -15,10 +15,10 @@ spec: replicas: - name: replica0-0 settings: - display_name: replica0 + default_replica_name: myreplica0 - name: replica0-1 settings: - display_name: replica1 + default_replica_name: myreplica1 - name: shard1 replicas: - name: replica1-0 diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index c5885eadc..f70719d56 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -98,8 +98,21 @@ def test_010003(self): "service": 5, }, "pdb": {"cluster1": 0, "cluster2": 1}, + "do_not_delete": 1 }, ) + + chi = "test-003-complex-layout" + cluster = "cluster1" + with Then('Cluster settings should be different on replicas'): + replica0 = clickhouse.query(chi, "select value from system.server_settings where name = 'default_replica_name'", + host=f"chi-{chi}-{cluster}-replica0-0-0") + replica1 = clickhouse.query(chi, "select value from system.server_settings where name = 'default_replica_name'", + host=f"chi-{chi}-{cluster}-replica0-1-0") + print(replica0) + print(replica1) + assert replica0 == "myreplica0" and replica1 == "myreplica1" + with Finally("I clean up"): delete_test_namespace() @@ -124,8 +137,7 @@ def test_010004(self): }, ) with Finally("I clean up"): - with By("deleting test namespace"): - delete_test_namespace() + delete_test_namespace() @TestScenario From 065f59d1b7cbba2c9fb020dbec1585e65bb7372b Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 14 Jan 2026 14:12:52 +0300 Subject: [PATCH 068/233] Do not report non-expired sessions into monitoring --- pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go index 2fceb60f9..00f14e9e5 100644 --- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go +++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go @@ -28,8 +28,9 @@ const ( SELECT database, table, - toString(is_session_expired) AS is_session_expired + '1' AS session_expired FROM system.replicas + WHERE is_session_expired ` queryMetricsSQL = ` From 1fc8198a534e3e19a623b247a7708f6eddfa3386 Mon Sep 17 00:00:00 2001 From: Andrew Seigner Date: Wed, 14 Jan 2026 22:05:52 +0000 Subject: [PATCH 069/233] Fix doc URLs in alerts and dashboards Signed-off-by: Andrew Seigner --- .../Altinity_ClickHouse_Operator_dashboard.json | 14 +++++++------- .../prometheus-alert-rules-clickhouse.yaml | 14 +++++++------- .../Altinity_ClickHouse_Operator_dashboard.json | 14 +++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json b/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json index 0f3e34b85..5b299d9ba 100644 --- a/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json +++ b/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json @@ -1282,7 +1282,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -1412,7 +1412,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-concurrent-queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -1727,7 +1727,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -2253,7 +2253,7 @@ { "targetBlank": true, "title": "max_replica_delay_for_distributed_queries", - "url": "https://clickhouse.com/docs/en/operations/settings/settings#settings-max_replica_delay_for_distributed_queries" + "url": "https://clickhouse.com/docs/operations/settings/settings#max_replica_delay_for_distributed_queries" } ], "options": { @@ -4298,7 +4298,7 @@ { "targetBlank": true, "title": "mark_cache_size", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-mark-cache-size" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#mark_cache_size" }, { "targetBlank": true, @@ -5335,12 +5335,12 @@ { "targetBlank": true, "title": "max_connections", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-connections" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_connections" }, { "targetBlank": true, "title": "max_distributed_connections", - "url": "https://clickhouse.com/docs/en/operations/settings/settings#max-distributed-connections" + "url": "https://clickhouse.com/docs/operations/settings/settings#max_distributed_connections" }, { "targetBlank": true, diff --git a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml index d78349e30..2d4303da5 100644 --- a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml +++ b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml @@ -72,8 +72,8 @@ spec: `increase(chi_clickhouse_event_DNSError[1m])` = {{ with printf "increase(chi_clickhouse_event_DNSError{hostname='%s',exported_namespace='%s'}[1m]) or increase(chi_clickhouse_event_NetworkErrors{hostname='%s',exported_namespace='%s'}[1m])" .Labels.hostname .Labels.exported_namespace .Labels.hostname .Labels.exported_namespace | query }}{{ . | first | value | printf "%.2f" }} errors{{ end }} Please check DNS settings in `/etc/resolve.conf` and `` part of `/etc/clickhouse-server/` See documentation: - - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers - - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-disable-internal-dns-cache + - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#remote_servers + - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#disable_internal_dns_cache - https://clickhouse.com/docs/en/sql-reference/statements/system/ - alert: ClickHouseDistributedFilesToInsertHigh @@ -112,7 +112,7 @@ spec: `increase(chi_clickhouse_event_DistributedConnectionFailAtAll[1m])` = {{ with printf "increase(chi_clickhouse_event_DistributedConnectionFailAtAll{hostname='%s',exported_namespace='%s'}[1m])" .Labels.hostname .Labels.exported_namespace | query }}{{ . | first | value | printf "%.2f" }} errors{{ end }} Please, check communications between clickhouse server and host `remote_servers` in `/etc/clickhouse-server/` - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers + https://clickhouse.com/docs/operations/server-configuration-parameters/settings#remote_servers Also, you can check logs: ```kubectl logs -n {{ $labels.exported_namespace }} $( echo {{ $labels.hostname }} | cut -d '.' -f 1)-0 -f``` @@ -178,7 +178,7 @@ spec: Please use Buffer table https://clickhouse.com/docs/en/engines/table-engines/special/buffer/ or - https://clickhouse.com/docs/en/operations/settings/settings/#async-insert + https://clickhouse.com/docs/operations/settings/settings#async_insert - alert: ClickHouseLongestRunningQuery expr: chi_clickhouse_metric_LongestRunningQuery > 600 @@ -259,7 +259,7 @@ spec: The ClickHouse is adapted to run not a very large number of parallel SQL requests, not every HTTP/TCP(Native)/MySQL protocol connection means a running SQL request, but a large number of open connections can cause a spike in sudden SQL requests, resulting in performance degradation. Also read documentation: - - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#max-concurrent-queries + - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries - alert: ClickHouseTooManyRunningQueries @@ -279,7 +279,7 @@ spec: Look at following documentation parts: - https://clickhouse.com/docs/en/operations/settings/query-complexity/ - https://clickhouse.com/docs/en/operations/quotas/ - - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#max-concurrent-queries + - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries - https://clickhouse.com/docs/en/operations/system-tables/query_log/ - alert: ClickHouseSystemSettingsChanged @@ -527,7 +527,7 @@ spec: chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolTask = {{ with printf "chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolTask{exported_namespace='%s',chi='%s',hostname='%s'}" .Labels.exported_namespace .Labels.chi .Labels.hostname | query }}{{ . | first | value | printf "%.0f" }}{{ end }} chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolSize = {{ with printf "chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolSize{exported_namespace='%s',chi='%s',hostname='%s'}" .Labels.exported_namespace .Labels.chi .Labels.hostname | query }}{{ . | first | value | printf "%.0f" }}{{ end }} - https://kb.altinity.com/altinity-kb-integrations/altinity-kb-kafka/background_message_broker_schedule_pool_size/ - - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size + - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size - https://clickhouse.com/docs/en/operations/system-tables/metrics#backgroundmessagebrokerschedulepoolsize This pool is used for tasks related to message streaming from Apache Kafka or other message brokers. You need to increase `background_message_broker_schedule_pool_size` to fix the problem. diff --git a/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json b/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json index f9a9e24fa..68287e986 100644 --- a/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json +++ b/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json @@ -1282,7 +1282,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -1412,7 +1412,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-concurrent-queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -1727,7 +1727,7 @@ { "targetBlank": true, "title": "max_concurent_queries", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries" }, { "targetBlank": true, @@ -2253,7 +2253,7 @@ { "targetBlank": true, "title": "max_replica_delay_for_distributed_queries", - "url": "https://clickhouse.com/docs/en/operations/settings/settings#settings-max_replica_delay_for_distributed_queries" + "url": "https://clickhouse.com/docs/operations/settings/settings#max_replica_delay_for_distributed_queries" } ], "options": { @@ -4298,7 +4298,7 @@ { "targetBlank": true, "title": "mark_cache_size", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-mark-cache-size" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#mark_cache_size" }, { "targetBlank": true, @@ -5335,12 +5335,12 @@ { "targetBlank": true, "title": "max_connections", - "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-connections" + "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_connections" }, { "targetBlank": true, "title": "max_distributed_connections", - "url": "https://clickhouse.com/docs/en/operations/settings/settings#max-distributed-connections" + "url": "https://clickhouse.com/docs/operations/settings/settings#max_distributed_connections" }, { "targetBlank": true, From 3a2beafa0db036f47dcaf139a9c3d14025744719 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 15:58:51 +0500 Subject: [PATCH 070/233] dev: introduce chk reconciler inheritance --- .../v1/type_cluster.go | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go index 1638fba08..5e4bb7b46 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go @@ -21,15 +21,14 @@ import ( // Cluster defines item of a clusters section of .configuration type Cluster struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"` - Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"` - Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"` - Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"` - PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"` - PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"` - Reconcile apiChi.ClusterReconcile `json:"reconcile" yaml:"reconcile"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"` + Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"` + Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"` + PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"` + PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"` + Reconcile *apiChi.ClusterReconcile `json:"reconcile" yaml:"reconcile,omitempty"` + Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"` Runtime ChkClusterRuntime `json:"-" yaml:"-"` } @@ -137,7 +136,8 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 { } // GetReconcile is a getter -func (cluster *Cluster) GetReconcile() apiChi.ClusterReconcile { +func (cluster *Cluster) GetReconcile() *apiChi.ClusterReconcile { + cluster.Reconcile = cluster.Reconcile.Ensure() return cluster.Reconcile } @@ -182,7 +182,7 @@ func (cluster *Cluster) SelectSettingsSourceFrom(shard apiChi.IShard, replica ap return replica } -// InheritFilesFrom inherits files from CHI +// InheritFilesFrom inherits files from CR func (cluster *Cluster) InheritFilesFrom(chk *ClickHouseKeeperInstallation) { if chk.GetSpecT().Configuration == nil { return @@ -203,6 +203,17 @@ func (cluster *Cluster) InheritFilesFrom(chk *ClickHouseKeeperInstallation) { }) } +// InheritClusterReconcileFrom inherits reconcile runtime from CHI +func (cluster *Cluster) InheritClusterReconcileFrom(chk *ClickHouseKeeperInstallation) { + if chk.Spec.Reconcile == nil { + return + } + reconcile := cluster.GetReconcile() + reconcile.Runtime = reconcile.Runtime.MergeFrom(chk.Spec.Reconcile.Runtime, apiChi.MergeTypeFillEmptyValues) + reconcile.Host = reconcile.Host.MergeFrom(chk.Spec.Reconcile.Host) + cluster.Reconcile = reconcile +} + // InheritTemplatesFrom inherits templates from CHI func (cluster *Cluster) InheritTemplatesFrom(chk *ClickHouseKeeperInstallation) { if chk.GetSpec().GetDefaults() == nil { From 1ccb4737930b69ec9ff3faf547f312b5d0eea3aa Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 15:59:25 +0500 Subject: [PATCH 071/233] dev: switch interface to pointer --- pkg/apis/clickhouse.altinity.com/v1/interface.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/interface.go b/pkg/apis/clickhouse.altinity.com/v1/interface.go index e1bba0ca0..2e91421e7 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/interface.go +++ b/pkg/apis/clickhouse.altinity.com/v1/interface.go @@ -134,7 +134,7 @@ type ICluster interface { SelectSettingsSourceFrom(shard IShard, replica IReplica) any GetRuntime() IClusterRuntime - GetReconcile() ClusterReconcile + GetReconcile() *ClusterReconcile GetServiceTemplate() (*ServiceTemplate, bool) GetAncestor() ICluster } From 8aff729e79b88ece814f2acb669567420a257802 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:06:41 +0500 Subject: [PATCH 072/233] dev: emptify reconcile --- pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go index 5e4bb7b46..3cd12e3d4 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go @@ -27,7 +27,7 @@ type Cluster struct { Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"` PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"` PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"` - Reconcile *apiChi.ClusterReconcile `json:"reconcile" yaml:"reconcile,omitempty"` + Reconcile *apiChi.ClusterReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"` Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"` Runtime ChkClusterRuntime `json:"-" yaml:"-"` From f4f3c0ffbddd5ab1ace374c7653eeeb0e1eb5b02 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:07:19 +0500 Subject: [PATCH 073/233] dev: intrduce poined version --- pkg/apis/clickhouse.altinity.com/v1/type_cluster.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go index cebf8a6ec..6ccdc1ebc 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go @@ -31,7 +31,7 @@ type Cluster struct { Secret *ClusterSecret `json:"secret,omitempty" yaml:"secret,omitempty"` PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"` PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"` - Reconcile ClusterReconcile `json:"reconcile" yaml:"reconcile"` + Reconcile *ClusterReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"` Layout *ChiClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"` Runtime ChiClusterRuntime `json:"-" yaml:"-"` @@ -146,7 +146,8 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 { } // GetReconcile is a getter -func (cluster *Cluster) GetReconcile() ClusterReconcile { +func (cluster *Cluster) GetReconcile() *ClusterReconcile { + cluster.Reconcile = cluster.Reconcile.Ensure() return cluster.Reconcile } @@ -207,7 +208,7 @@ func (cluster *Cluster) InheritZookeeperFrom(chi *ClickHouseInstallation) { cluster.Zookeeper = cluster.Zookeeper.MergeFrom(chi.GetSpecT().Configuration.Zookeeper, MergeTypeFillEmptyValues) } -// InheritFilesFrom inherits files from CHI +// InheritFilesFrom inherits files from CR func (cluster *Cluster) InheritFilesFrom(chi *ClickHouseInstallation) { if chi.GetSpecT().Configuration == nil { return @@ -233,8 +234,10 @@ func (cluster *Cluster) InheritClusterReconcileFrom(chi *ClickHouseInstallation) if chi.Spec.Reconcile == nil { return } - cluster.Reconcile.Runtime = cluster.Reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues) - cluster.Reconcile.Host = cluster.Reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host) + reconcile := cluster.GetReconcile() + reconcile.Runtime = reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues) + reconcile.Host = reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host) + cluster.Reconcile = reconcile } // InheritTemplatesFrom inherits templates from CHI From 97e822963ebf0117f5a06531158f60e36f6055e0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:08:17 +0500 Subject: [PATCH 074/233] dev: paramtrize probes normalizator --- .../v1/type_configuration_chop.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go index be2403e65..9465ff035 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go @@ -446,8 +446,8 @@ type ReconcileHost struct { Drop ReconcileHostDrop `json:"drop" yaml:"drop"` } -func (rh ReconcileHost) Normalize() ReconcileHost { - rh.Wait = rh.Wait.Normalize() +func (rh ReconcileHost) Normalize(readiness *types.StringBool, overwrite bool) ReconcileHost { + rh.Wait = rh.Wait.Normalize(readiness, overwrite) rh.Drop = rh.Drop.Normalize() return rh } @@ -467,7 +467,7 @@ type ReconcileHostWait struct { Probes *ReconcileHostWaitProbes `json:"probes,omitempty" yaml:"probes,omitempty"` } -func (wait ReconcileHostWait) Normalize() ReconcileHostWait { +func (wait ReconcileHostWait) Normalize(readiness *types.StringBool, overwrite bool) ReconcileHostWait { if wait.Replicas == nil { wait.Replicas = &ReconcileHostWaitReplicas{} } @@ -478,10 +478,13 @@ func (wait ReconcileHostWait) Normalize() ReconcileHostWait { } if wait.Probes == nil { - // Default value + // Apply default when probes are not specified at all. wait.Probes = &ReconcileHostWaitProbes{ - Readiness: types.NewStringBool(true), + Readiness: readiness, } + } else if overwrite { + // Force override even when a value is already set. + wait.Probes.Readiness = readiness } return wait @@ -1021,7 +1024,7 @@ func (c *OperatorConfig) normalizeSectionReconcileStatefulSet() { } func (c *OperatorConfig) normalizeSectionReconcileHost() { - c.Reconcile.Host = c.Reconcile.Host.Normalize() + c.Reconcile.Host = c.Reconcile.Host.Normalize(nil, false) } func (c *OperatorConfig) normalizeSectionClickHouseConfigurationUserDefault() { From bdffcbbb4c6eecf5d55b0f6f5ce247d7338e6b86 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:08:47 +0500 Subject: [PATCH 075/233] dev: introduce cluster reconcile ensurer --- pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go index 3cbbb0f91..171277366 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go @@ -44,6 +44,13 @@ type ClusterReconcile struct { Host ReconcileHost `json:"host" yaml:"host"` } +func (reconcile *ClusterReconcile) Ensure() *ClusterReconcile { + if reconcile == nil { + reconcile = &ClusterReconcile{} + } + return reconcile +} + // NewChiReconcile creates new reconcile func NewChiReconcile() *ChiReconcile { return new(ChiReconcile) From 886eecb80de5ba347ae88de410d042f2d9826861 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:09:12 +0500 Subject: [PATCH 076/233] dev: keeper code --- .../v1/zz_generated.deepcopy.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go index 525c080f3..ccd103091 100644 --- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go @@ -476,7 +476,11 @@ func (in *Cluster) DeepCopyInto(out *Cluster) { *out = new(types.Int32) **out = **in } - in.Reconcile.DeepCopyInto(&out.Reconcile) + if in.Reconcile != nil { + in, out := &in.Reconcile, &out.Reconcile + *out = new(clickhousealtinitycomv1.ClusterReconcile) + (*in).DeepCopyInto(*out) + } in.Runtime.DeepCopyInto(&out.Runtime) return } From 26eaf81f6b63379fe09f2dc72fec5082d8e04eb9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:09:32 +0500 Subject: [PATCH 077/233] dev: chi code --- .../clickhouse.altinity.com/v1/zz_generated.deepcopy.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index bfae92b5b..3fa03f173 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -854,7 +854,11 @@ func (in *Cluster) DeepCopyInto(out *Cluster) { *out = new(types.Int32) **out = **in } - in.Reconcile.DeepCopyInto(&out.Reconcile) + if in.Reconcile != nil { + in, out := &in.Reconcile, &out.Reconcile + *out = new(ClusterReconcile) + (*in).DeepCopyInto(*out) + } if in.Layout != nil { in, out := &in.Layout, &out.Layout *out = new(ChiClusterLayout) From 74e33674ddf979173c22f169aeb9d190a4329ac6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:10:04 +0500 Subject: [PATCH 078/233] test: unparallelize keeper --- tests/e2e/test_operator.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index c5885eadc..430b26cd4 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5429,6 +5429,7 @@ def test_010061(self): @TestScenario @Name("test_020000. Test Basic CHK functions") +@Tags("NO_PARALLEL") def test_020000(self): create_shell_namespace_clickhouse_template() @@ -5467,6 +5468,7 @@ def test_020000(self): @TestScenario @Name("test_020001. Test that Kubernetes objects between CHI and CHK does not overlap") +@Tags("NO_PARALLEL") def test_020001(self): create_shell_namespace_clickhouse_template() @@ -5509,6 +5511,7 @@ def test_020001(self): @Name("test_020002. Test CHI with CHK") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_ClickHouseKeeperInstallation_volumeClaimTemplates("1.0")) +@Tags("NO_PARALLEL") def test_020002(self): """Check clickhouse-operator support ClickHouseKeeperInstallation with PVC in keeper manifest.""" @@ -5534,6 +5537,7 @@ def test_020002(self): @TestScenario @Name("test_020003. Clickhouse-keeper upgrade") +@Tags("NO_PARALLEL") def test_020003(self): """Check that clickhouse-operator support upgrading clickhouse-keeper version when clickhouse-keeper defined with ClickHouseKeeperInstallation.""" @@ -5781,6 +5785,7 @@ def test_020004_1(self): @TestScenario @Name("test_020005. Clickhouse-keeper scale-up/scale-down") +@Tags("NO_PARALLEL") def test_020005(self): """Check that clickhouse-operator support scale-up/scale-down without service interruption""" @@ -5869,6 +5874,7 @@ def test_020005(self): @TestScenario @Name("test_020006. Test https://github.com/Altinity/clickhouse-operator/issues/1863") +@Tags("NO_PARALLEL") def test_020006(self): create_shell_namespace_clickhouse_template() @@ -5892,6 +5898,7 @@ def test_020006(self): @TestScenario @Name("test_020007. Test fractional CPU requests/limits handling for CHK") +@Tags("NO_PARALLEL") def test_020007(self): create_shell_namespace_clickhouse_template() From 711138b49bfeab66b60a2c71f939b3deacc39bf3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:11:04 +0500 Subject: [PATCH 079/233] dev: switch to getter --- pkg/controller/chi/worker-reconciler-helper.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/chi/worker-reconciler-helper.go b/pkg/controller/chi/worker-reconciler-helper.go index 67eac8b3a..25aaafee8 100644 --- a/pkg/controller/chi/worker-reconciler-helper.go +++ b/pkg/controller/chi/worker-reconciler-helper.go @@ -69,8 +69,8 @@ func (w *worker) isHostSoftwareAbleToRespond(ctx context.Context, host *api.Host // getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shards reconcile func (w *worker) getReconcileShardsWorkersNum(cluster *api.Cluster, opts *common.ReconcileShardsAndHostsOptions) int { - availableWorkers := float64(cluster.Reconcile.Runtime.ReconcileShardsThreadsNumber) - maxConcurrencyPercent := float64(cluster.Reconcile.Runtime.ReconcileShardsMaxConcurrencyPercent) + availableWorkers := float64(cluster.GetReconcile().Runtime.ReconcileShardsThreadsNumber) + maxConcurrencyPercent := float64(cluster.GetReconcile().Runtime.ReconcileShardsMaxConcurrencyPercent) _100Percent := float64(100) shardsNum := float64(len(cluster.Layout.Shards)) From 3d5afad87f73f57921767c8e185aeaf9bc352bdc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:11:32 +0500 Subject: [PATCH 080/233] dev: clarify reconciler --- pkg/controller/common/statefulset/statefulset-reconciler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go index 901cd4704..b0619cdab 100644 --- a/pkg/controller/common/statefulset/statefulset-reconciler.go +++ b/pkg/controller/common/statefulset/statefulset-reconciler.go @@ -459,6 +459,7 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er namespace := host.Runtime.Address.Namespace log.V(1).M(host).F().Info("%s/%s", namespace, name) + // Fetch cur host's StatefulSet var err error host.Runtime.CurStatefulSet, err = r.sts.Get(ctx, host) if err != nil { @@ -471,7 +472,7 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er return err } - // Scale StatefulSet down to 0 pods count. + // Scale cur host's StatefulSet down to 0 pods count. // This is the proper and graceful way to delete StatefulSet var zero int32 = 0 host.Runtime.CurStatefulSet.Spec.Replicas = &zero @@ -486,7 +487,6 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er // And now delete empty StatefulSet if err := r.sts.Delete(ctx, namespace, name); err == nil { log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name) - // r.hostSTSPoller.WaitHostStatefulSetDeleted(host) } else if apiErrors.IsNotFound(err) { log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name) } else { From 01550e94355cf84213cc54b1cfffa9de8f1b928e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:12:07 +0500 Subject: [PATCH 081/233] dev: introduce chk normalizer const --- pkg/model/chk/normalizer/const.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 pkg/model/chk/normalizer/const.go diff --git a/pkg/model/chk/normalizer/const.go b/pkg/model/chk/normalizer/const.go new file mode 100644 index 000000000..599967f18 --- /dev/null +++ b/pkg/model/chk/normalizer/const.go @@ -0,0 +1,29 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package normalizer + +const ( + // defaultReconcileShardsThreadsNumber specifies the default number of threads usable for concurrent shard reconciliation + // within a single cluster reconciliation. Defaults to 1, which means strictly sequential shard reconciliation. + defaultReconcileShardsThreadsNumber = 1 + + // defaultReconcileShardsMaxConcurrencyPercent specifies the maximum integer percentage of shards that may be reconciled + // concurrently during cluster reconciliation. This counterbalances the fact that this is an operator setting, + // that different clusters will have different shard counts, and that the shard concurrency capacity is specified + // above in terms of a number of threads to use (up to). Example: overriding to 100 means all shards may be + // reconciled concurrently, if the number of shard reconciliation threads is greater than or equal to the number + // of shards in the cluster. + defaultReconcileShardsMaxConcurrencyPercent = 50 +) From 560bafa4faf6170e9ac6495dbe34a2fba846599e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:12:50 +0500 Subject: [PATCH 082/233] dev: switch to parametrized probes recon --- pkg/model/chi/normalizer/normalizer.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go index 0e0bc8925..ceb7f43c2 100644 --- a/pkg/model/chi/normalizer/normalizer.go +++ b/pkg/model/chi/normalizer/normalizer.go @@ -443,7 +443,7 @@ func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost { // Normalize - rh = rh.Normalize() + rh = rh.Normalize(types.NewStringBool(true), false) return rh } @@ -1005,7 +1005,9 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa return clusterLayout } -func (n *Normalizer) normalizeClusterReconcile(reconcile chi.ClusterReconcile) chi.ClusterReconcile { +func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) *chi.ClusterReconcile { + reconcile = reconcile.Ensure() + reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) reconcile.Host = n.normalizeReconcileHost(reconcile.Host) return reconcile From 85a35f6bde38be6bb9efef4f881775682574d8c1 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 19 Jan 2026 16:13:21 +0500 Subject: [PATCH 083/233] dev: intoduce chk reconcile normalizer --- pkg/model/chk/normalizer/normalizer.go | 49 ++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go index e1d2170e1..3d9252490 100644 --- a/pkg/model/chk/normalizer/normalizer.go +++ b/pkg/model/chk/normalizer/normalizer.go @@ -361,13 +361,34 @@ func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiRec // No normalization yet // Runtime - // No normalization yet + // Inherit from chop Config + reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime) + reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) // Host - // No normalization yet + // Inherit from chop Config + reconcile.InheritHostFrom(chop.Config().Reconcile.Host) + reconcile.Host = n.normalizeReconcileHost(reconcile.Host) + return reconcile } +func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi.ReconcileRuntime { + if runtime.ReconcileShardsThreadsNumber == 0 { + runtime.ReconcileShardsThreadsNumber = defaultReconcileShardsThreadsNumber + } + if runtime.ReconcileShardsMaxConcurrencyPercent == 0 { + runtime.ReconcileShardsMaxConcurrencyPercent = defaultReconcileShardsMaxConcurrencyPercent + } + return runtime +} + +func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost { + // Normalize + rh = rh.Normalize(types.NewStringBool(false), true) + return rh +} + func (n *Normalizer) normalizeReconcileCleanup(cleanup *chi.Cleanup) *chi.Cleanup { if cleanup == nil { cleanup = chi.NewCleanup() @@ -569,6 +590,8 @@ func (n *Normalizer) normalizeClusterStage1(cluster *chk.Cluster) *chk.Cluster { func (n *Normalizer) normalizeClusterStage2(cluster *chk.Cluster) *chk.Cluster { // Inherit from .spec.configuration.files cluster.InheritFilesFrom(n.req.GetTarget()) + // Inherit from .spec.reconciling + cluster.InheritClusterReconcileFrom(n.req.GetTarget()) // Inherit from .spec.defaults cluster.InheritTemplatesFrom(n.req.GetTarget()) @@ -577,6 +600,7 @@ func (n *Normalizer) normalizeClusterStage2(cluster *chk.Cluster) *chk.Cluster { cluster.PDBManaged = n.normalizePDBManaged(cluster.PDBManaged) cluster.PDBMaxUnavailable = n.normalizePDBMaxUnavailable(cluster.PDBMaxUnavailable) + cluster.Reconcile = n.normalizeClusterReconcile(cluster.Reconcile) n.appendClusterSecretEnvVar(cluster) @@ -692,6 +716,14 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa return clusterLayout } +func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) *chi.ClusterReconcile { + reconcile = reconcile.Ensure() + + reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) + reconcile.Host = n.normalizeReconcileHost(reconcile.Host) + return reconcile +} + // ensureClusterLayoutShards ensures slice layout.Shards is in place func (n *Normalizer) ensureClusterLayoutShards(layout *chk.ChkClusterLayout) { // Disposition of shards in slice would be @@ -733,7 +765,7 @@ func (n *Normalizer) normalizeShardStage2(shard *chk.ChkShard, cluster *chk.Clus shard.Files = n.normalizeConfigurationFiles(shard.Files) shard.InheritTemplatesFrom(cluster) // Internal replication uses ReplicasCount thus it has to be normalized after shard ReplicaCount normalized - //n.normalizeShardInternalReplication(shard) + n.normalizeShardInternalReplication(shard) } // normalizeReplicaStage1 normalizes a replica - walks over all fields @@ -849,3 +881,14 @@ func (n *Normalizer) normalizeReplicaHosts(replica *chk.ChkReplica, cluster *chk replica.Hosts = append(replica.Hosts, host) } } + +// normalizeShardInternalReplication ensures reasonable values in +// .spec.configuration.clusters.layout.shards.internalReplication +func (n *Normalizer) normalizeShardInternalReplication(shard *chk.ChkShard) { + // Shards with replicas are expected to have internal replication on by default + //defaultInternalReplication := false + //if shard.ReplicasCount > 1 { + // defaultInternalReplication = true + //} + //shard.InternalReplication = shard.InternalReplication.Normalize(defaultInternalReplication) +} From 7a2e742d9998aa8fd89c16caee3328437c446847 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:17:52 +0500 Subject: [PATCH 084/233] dev: start sts reconcile options introduction --- deploy/builder/templates-config/config.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml index 8129e82c9..912429cd5 100644 --- a/deploy/builder/templates-config/config.yaml +++ b/deploy/builder/templates-config/config.yaml @@ -343,6 +343,20 @@ reconcile: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: From f4c31cfcc93822d4d9f110cf9e5e8f510a087964 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:18:52 +0500 Subject: [PATCH 085/233] dev: introduce sts reconcile to chi --- ...l-template-01-section-crd-01-chi-chit.yaml | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml index 012f8ea0f..8ca0c26bf 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml @@ -497,6 +497,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | From 5a60bb28dc414390b24c01a546bbd9eb7ab97667 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:19:28 +0500 Subject: [PATCH 086/233] dev: introduce sts options for a chop config --- ...ml-template-01-section-crd-02-chopconf.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml index 629f369b5..460608791 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml @@ -318,6 +318,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | From 7a69d74e21cd864834fe11c69c83e9fbc9db5226 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:20:09 +0500 Subject: [PATCH 087/233] docs: example --- .../99-clickhouseinstallation-max.yaml | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/docs/chi-examples/99-clickhouseinstallation-max.yaml b/docs/chi-examples/99-clickhouseinstallation-max.yaml index 7eea7110c..8dfa95ae2 100644 --- a/docs/chi-examples/99-clickhouseinstallation-max.yaml +++ b/docs/chi-examples/99-clickhouseinstallation-max.yaml @@ -104,6 +104,53 @@ spec: # Max percentage of concurrent shard reconciles within one cluster in progress reconcileShardsMaxConcurrencyPercent: 50 + # Optional, overwrites reconcile.statefulSet from the operator's config + # Reconcile StatefulSet scenario + statefulSet: + # Create StatefulSet scenario + create: + # What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + onFailure: ignore + + # Update StatefulSet scenario + update: + # How many seconds to wait for created/updated StatefulSet to be 'Ready' + timeout: 300 + # How many seconds to wait between checks/polls for created/updated StatefulSet status + pollInterval: 5 + # What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. + # Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration. + # Follow 'abort' path afterwards. + # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + onFailure: abort + + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + + # Optional, overwrites reconcile.host from the operator's config + # Reconcile Host scenario host: # Whether the operator during reconcile procedure should wait for a ClickHouse host: # - to be excluded from a ClickHouse cluster From a6643e98ed6473b19a0fbddb8faf2e0263906113 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:22:03 +0500 Subject: [PATCH 088/233] dev: introduce sts options in config --- .../v1/type_configuration_chop.go | 35 +++++++++++++++++-- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go index 9465ff035..16987624a 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go @@ -39,7 +39,7 @@ import ( const ( // Default values for update timeout and polling period in seconds defaultStatefulSetUpdateTimeout = 300 - defaultStatefulSetUpdatePollInterval = 15 + defaultStatefulSetUpdatePollInterval = 5 // Default values for ClickHouse user configuration // 1. user/profile @@ -136,6 +136,22 @@ const ( OnStatefulSetUpdateFailureActionIgnore = "ignore" ) +const ( + // What to do in case StatefulSet needs to be recreated due to PVC data loss or missing volumes + // Abort - Loss: abort CHI reconcile + OnStatefulSetRecreateOnDataLossActionAbort = "abort" + + // Recreate - Loss: proceed and recreate StatefulSet + OnStatefulSetRecreateOnDataLossActionRecreate = "recreate" + + // What to do in case StatefulSet needs to be recreated due to update failure or StatefulSet not ready + // Abort - Failure: abort CHI reconcile + OnStatefulSetRecreateOnUpdateFailureActionAbort = "abort" + + // Recreate - Failure: proceed and recreate StatefulSet + OnStatefulSetRecreateOnUpdateFailureActionRecreate = "recreate" +) + const ( defaultMaxReplicationDelay = 10 ) @@ -422,10 +438,15 @@ type OperatorConfigReconcile struct { } `json:"create" yaml:"create"` Update struct { - Timeout uint64 `json:"timeout" yaml:"timeout"` + Timeout uint64 `json:"timeout" yaml:"timeout"` PollInterval uint64 `json:"pollInterval" yaml:"pollInterval"` - OnFailure string `json:"onFailure" yaml:"onFailure"` + OnFailure string `json:"onFailure" yaml:"onFailure"` } `json:"update" yaml:"update"` + + Recreate struct { + OnDataLoss string `json:"onDataLoss" yaml:"onDataLoss"` + OnUpdateFailure string `json:"onUpdateFailure" yaml:"onUpdateFailure"` + } `json:"recreate" yaml:"recreate"` } `json:"statefulSet" yaml:"statefulSet"` Host ReconcileHost `json:"host" yaml:"host"` @@ -1021,6 +1042,14 @@ func (c *OperatorConfig) normalizeSectionReconcileStatefulSet() { if c.Reconcile.StatefulSet.Update.OnFailure == "" { c.Reconcile.StatefulSet.Update.OnFailure = OnStatefulSetUpdateFailureActionRollback } + + // Default Recreate actions - recreate + if c.Reconcile.StatefulSet.Recreate.OnDataLoss == "" { + c.Reconcile.StatefulSet.Recreate.OnDataLoss = OnStatefulSetRecreateOnDataLossActionRecreate + } + if c.Reconcile.StatefulSet.Recreate.OnUpdateFailure == "" { + c.Reconcile.StatefulSet.Recreate.OnUpdateFailure = OnStatefulSetRecreateOnUpdateFailureActionRecreate + } } func (c *OperatorConfig) normalizeSectionReconcileHost() { From 5124f83818467b834a697eb7232c5902c99a2aeb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:24:50 +0500 Subject: [PATCH 089/233] dev: merge chi-level config on a cluster level --- pkg/apis/clickhouse.altinity.com/v1/type_cluster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go index 6ccdc1ebc..a379677f8 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go @@ -236,6 +236,7 @@ func (cluster *Cluster) InheritClusterReconcileFrom(chi *ClickHouseInstallation) } reconcile := cluster.GetReconcile() reconcile.Runtime = reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues) + reconcile.StatefulSet = reconcile.StatefulSet.MergeFrom(chi.Spec.Reconcile.StatefulSet) reconcile.Host = reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host) cluster.Reconcile = reconcile } From 3e99cb03c60edf68a7a3cf1f658344547073ce93 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:28:07 +0500 Subject: [PATCH 090/233] dev: introduce reconciler code --- .../v1/type_reconcile.go | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go index 171277366..83ceb1947 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go @@ -33,6 +33,8 @@ type ChiReconcile struct { // Runtime specifies runtime settings Runtime ReconcileRuntime `json:"runtime,omitempty" yaml:"runtime,omitempty"` + // StatefulSet specifies StatefulSet reconcile settings + StatefulSet ReconcileStatefulSet `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"` // Host specifies host-lever reconcile settings Host ReconcileHost `json:"host" yaml:"host"` } @@ -40,10 +42,60 @@ type ChiReconcile struct { type ClusterReconcile struct { // Runtime specifies runtime settings Runtime ReconcileRuntime `json:"runtime" yaml:"runtime"` + // StatefulSet specifies StatefulSet reconcile settings + StatefulSet ReconcileStatefulSet `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"` // Host specifies host-lever reconcile settings Host ReconcileHost `json:"host" yaml:"host"` } +// ReconcileStatefulSet defines StatefulSet reconcile settings +type ReconcileStatefulSet struct { + Create ReconcileStatefulSetCreate `json:"create,omitempty" yaml:"create,omitempty"` + Update ReconcileStatefulSetUpdate `json:"update,omitempty" yaml:"update,omitempty"` + Recreate ReconcileStatefulSetRecreate `json:"recreate,omitempty" yaml:"recreate,omitempty"` +} + +// ReconcileStatefulSetCreate defines StatefulSet create settings +type ReconcileStatefulSetCreate struct { + OnFailure string `json:"onFailure,omitempty" yaml:"onFailure,omitempty"` +} + +// ReconcileStatefulSetUpdate defines StatefulSet update settings +type ReconcileStatefulSetUpdate struct { + Timeout uint64 `json:"timeout,omitempty" yaml:"timeout,omitempty"` + PollInterval uint64 `json:"pollInterval,omitempty" yaml:"pollInterval,omitempty"` + OnFailure string `json:"onFailure,omitempty" yaml:"onFailure,omitempty"` +} + +// ReconcileStatefulSetRecreate defines StatefulSet recreate settings +type ReconcileStatefulSetRecreate struct { + OnDataLoss string `json:"onDataLoss,omitempty" yaml:"onDataLoss,omitempty"` + OnUpdateFailure string `json:"onUpdateFailure,omitempty" yaml:"onUpdateFailure,omitempty"` +} + +// MergeFrom merges from specified ReconcileStatefulSet +func (s ReconcileStatefulSet) MergeFrom(from ReconcileStatefulSet) ReconcileStatefulSet { + if s.Create.OnFailure == "" { + s.Create.OnFailure = from.Create.OnFailure + } + if s.Update.Timeout == 0 { + s.Update.Timeout = from.Update.Timeout + } + if s.Update.PollInterval == 0 { + s.Update.PollInterval = from.Update.PollInterval + } + if s.Update.OnFailure == "" { + s.Update.OnFailure = from.Update.OnFailure + } + if s.Recreate.OnDataLoss == "" { + s.Recreate.OnDataLoss = from.Recreate.OnDataLoss + } + if s.Recreate.OnUpdateFailure == "" { + s.Recreate.OnUpdateFailure = from.Recreate.OnUpdateFailure + } + return s +} + func (reconcile *ClusterReconcile) Ensure() *ClusterReconcile { if reconcile == nil { reconcile = &ClusterReconcile{} @@ -88,6 +140,7 @@ func (r *ChiReconcile) MergeFrom(from *ChiReconcile, _type MergeType) *ChiReconc r.Cleanup = r.Cleanup.MergeFrom(from.Cleanup, _type) r.Macros = r.Macros.MergeFrom(from.Macros, _type) r.Runtime = r.Runtime.MergeFrom(from.Runtime, _type) + r.StatefulSet = r.StatefulSet.MergeFrom(from.StatefulSet) r.Host = r.Host.MergeFrom(from.Host) return r @@ -194,6 +247,31 @@ func (r *ChiReconcile) InheritRuntimeFrom(from OperatorConfigReconcileRuntime) { } } +func (r *ChiReconcile) InheritStatefulSetFrom(from OperatorConfigReconcile) { + if r == nil { + return + } + + if r.StatefulSet.Create.OnFailure == "" { + r.StatefulSet.Create.OnFailure = from.StatefulSet.Create.OnFailure + } + if r.StatefulSet.Update.Timeout == 0 { + r.StatefulSet.Update.Timeout = from.StatefulSet.Update.Timeout + } + if r.StatefulSet.Update.PollInterval == 0 { + r.StatefulSet.Update.PollInterval = from.StatefulSet.Update.PollInterval + } + if r.StatefulSet.Update.OnFailure == "" { + r.StatefulSet.Update.OnFailure = from.StatefulSet.Update.OnFailure + } + if r.StatefulSet.Recreate.OnDataLoss == "" { + r.StatefulSet.Recreate.OnDataLoss = from.StatefulSet.Recreate.OnDataLoss + } + if r.StatefulSet.Recreate.OnUpdateFailure == "" { + r.StatefulSet.Recreate.OnUpdateFailure = from.StatefulSet.Recreate.OnUpdateFailure + } +} + func (r *ChiReconcile) InheritHostFrom(from ReconcileHost) { r.Host = r.Host.MergeFrom(from) } From 454bf9b37169c287adcc957c16262e75700a5cec Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:28:41 +0500 Subject: [PATCH 091/233] dev: nomrliazer consta --- pkg/model/chi/normalizer/const.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/model/chi/normalizer/const.go b/pkg/model/chi/normalizer/const.go index 599967f18..82df80377 100644 --- a/pkg/model/chi/normalizer/const.go +++ b/pkg/model/chi/normalizer/const.go @@ -26,4 +26,10 @@ const ( // reconciled concurrently, if the number of shard reconciliation threads is greater than or equal to the number // of shards in the cluster. defaultReconcileShardsMaxConcurrencyPercent = 50 + + // defaultStatefulSetUpdateTimeout specifies the default timeout in seconds for StatefulSet update + defaultStatefulSetUpdateTimeout = 300 + + // defaultStatefulSetUpdatePollInterval specifies the default poll interval in seconds for StatefulSet update + defaultStatefulSetUpdatePollInterval = 5 ) From df056c8c362aa7f5efc4b0d9f928b98240e5b9bb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:29:46 +0500 Subject: [PATCH 092/233] dev: chi normalizer --- pkg/model/chi/normalizer/normalizer.go | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go index ceb7f43c2..3afea5f17 100644 --- a/pkg/model/chi/normalizer/normalizer.go +++ b/pkg/model/chi/normalizer/normalizer.go @@ -423,6 +423,11 @@ func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiRec reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime) reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) + // StatefulSet + // Inherit from chop Config + reconcile.InheritStatefulSetFrom(chop.Config().Reconcile) + reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet) + // Host // Inherit from chop Config reconcile.InheritHostFrom(chop.Config().Reconcile.Host) @@ -441,6 +446,31 @@ func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi return runtime } +func (n *Normalizer) normalizeReconcileStatefulSet(sts chi.ReconcileStatefulSet) chi.ReconcileStatefulSet { + // Create + if sts.Create.OnFailure == "" { + sts.Create.OnFailure = chi.OnStatefulSetCreateFailureActionDelete + } + // Update + if sts.Update.Timeout == 0 { + sts.Update.Timeout = defaultStatefulSetUpdateTimeout + } + if sts.Update.PollInterval == 0 { + sts.Update.PollInterval = defaultStatefulSetUpdatePollInterval + } + if sts.Update.OnFailure == "" { + sts.Update.OnFailure = chi.OnStatefulSetUpdateFailureActionRollback + } + // Recreate + if sts.Recreate.OnDataLoss == "" { + sts.Recreate.OnDataLoss = chi.OnStatefulSetRecreateOnDataLossActionRecreate + } + if sts.Recreate.OnUpdateFailure == "" { + sts.Recreate.OnUpdateFailure = chi.OnStatefulSetRecreateOnUpdateFailureActionRecreate + } + return sts +} + func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost { // Normalize rh = rh.Normalize(types.NewStringBool(true), false) @@ -1009,6 +1039,7 @@ func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) reconcile = reconcile.Ensure() reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) + reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet) reconcile.Host = n.normalizeReconcileHost(reconcile.Host) return reconcile } From cd4c0580cf4e7b995e929299b1b7642f4a39017b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:30:23 +0500 Subject: [PATCH 093/233] dev: normalizer const --- pkg/model/chk/normalizer/const.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/model/chk/normalizer/const.go b/pkg/model/chk/normalizer/const.go index 599967f18..82df80377 100644 --- a/pkg/model/chk/normalizer/const.go +++ b/pkg/model/chk/normalizer/const.go @@ -26,4 +26,10 @@ const ( // reconciled concurrently, if the number of shard reconciliation threads is greater than or equal to the number // of shards in the cluster. defaultReconcileShardsMaxConcurrencyPercent = 50 + + // defaultStatefulSetUpdateTimeout specifies the default timeout in seconds for StatefulSet update + defaultStatefulSetUpdateTimeout = 300 + + // defaultStatefulSetUpdatePollInterval specifies the default poll interval in seconds for StatefulSet update + defaultStatefulSetUpdatePollInterval = 5 ) From 31b49df93831d9e666b46c49eee44cabac18af39 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:30:44 +0500 Subject: [PATCH 094/233] dev: chk normalizer --- pkg/model/chk/normalizer/normalizer.go | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go index 3d9252490..a63ebc057 100644 --- a/pkg/model/chk/normalizer/normalizer.go +++ b/pkg/model/chk/normalizer/normalizer.go @@ -365,6 +365,11 @@ func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiRec reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime) reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) + // StatefulSet + // Inherit from chop Config + reconcile.InheritStatefulSetFrom(chop.Config().Reconcile) + reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet) + // Host // Inherit from chop Config reconcile.InheritHostFrom(chop.Config().Reconcile.Host) @@ -383,6 +388,31 @@ func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi return runtime } +func (n *Normalizer) normalizeReconcileStatefulSet(sts chi.ReconcileStatefulSet) chi.ReconcileStatefulSet { + // Create + if sts.Create.OnFailure == "" { + sts.Create.OnFailure = chi.OnStatefulSetCreateFailureActionDelete + } + // Update + if sts.Update.Timeout == 0 { + sts.Update.Timeout = defaultStatefulSetUpdateTimeout + } + if sts.Update.PollInterval == 0 { + sts.Update.PollInterval = defaultStatefulSetUpdatePollInterval + } + if sts.Update.OnFailure == "" { + sts.Update.OnFailure = chi.OnStatefulSetUpdateFailureActionRollback + } + // Recreate + if sts.Recreate.OnDataLoss == "" { + sts.Recreate.OnDataLoss = chi.OnStatefulSetRecreateOnDataLossActionRecreate + } + if sts.Recreate.OnUpdateFailure == "" { + sts.Recreate.OnUpdateFailure = chi.OnStatefulSetRecreateOnUpdateFailureActionRecreate + } + return sts +} + func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost { // Normalize rh = rh.Normalize(types.NewStringBool(false), true) @@ -720,6 +750,7 @@ func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) reconcile = reconcile.Ensure() reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) + reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet) reconcile.Host = n.normalizeReconcileHost(reconcile.Host) return reconcile } From f3943810806c697f735d47b08c19b8d44fbe8745 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:31:01 +0500 Subject: [PATCH 095/233] dev: config --- config/config.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/config/config.yaml b/config/config.yaml index 3925c35a5..1bd3d9d86 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -349,6 +349,20 @@ reconcile: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: From fbcd856242c1377ad16d14b28e918dcab9708f2f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:31:18 +0500 Subject: [PATCH 096/233] env: helm --- ...installations.clickhouse.altinity.com.yaml | 74 +++++++++++++++++++ ...tiontemplates.clickhouse.altinity.com.yaml | 74 +++++++++++++++++++ ...onfigurations.clickhouse.altinity.com.yaml | 18 +++++ .../files/ClickHouseKeeper_dashboard.json | 18 +++++ deploy/helm/clickhouse-operator/values.yaml | 13 ++++ 5 files changed, 197 insertions(+) diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml index 8869bd1f5..603aabf08 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml @@ -497,6 +497,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml index 90ec27602..a22c51032 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml @@ -497,6 +497,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml index 7a7ca67e7..9fccdc78c 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml @@ -318,6 +318,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | diff --git a/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json b/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json index 566f68240..b69a07347 100644 --- a/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json +++ b/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json @@ -1001,6 +1001,24 @@ "skipUrlSync": false, "type": "datasource" }, + { + "current": { + "selected": false, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, { "allValue": ".+", "current": {}, diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 5567163ec..b0a7e285c 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -559,6 +559,19 @@ configs: # Follow 'abort' path afterwards. # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: From 66968640a8e4142afd434bc3658d481e1898c29d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 22 Jan 2026 19:31:37 +0500 Subject: [PATCH 097/233] env: manifests --- .../clickhouse-operator-install-ansible.yaml | 180 ++++++++++ ...house-operator-install-bundle-v1beta1.yaml | 180 ++++++++++ .../clickhouse-operator-install-bundle.yaml | 180 ++++++++++ ...use-operator-install-template-v1beta1.yaml | 180 ++++++++++ .../clickhouse-operator-install-template.yaml | 180 ++++++++++ .../clickhouse-operator-install-tf.yaml | 180 ++++++++++ deploy/operator/parts/crd.yaml | 314 ++++++++++++++++++ 7 files changed, 1394 insertions(+) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index 63c55e27e..645adffdb 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -504,6 +504,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1963,6 +2037,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3243,6 +3391,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -4995,6 +5161,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index bcaf40f47..5e3be768e 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -492,6 +492,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1939,6 +2013,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3210,6 +3358,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -5194,6 +5360,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index e4b67175d..29e97fa42 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -497,6 +497,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1956,6 +2030,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3236,6 +3384,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -5254,6 +5420,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index 507154642..3ce02b772 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -492,6 +492,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1939,6 +2013,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3210,6 +3358,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -4941,6 +5107,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 6090c6463..07e9067cd 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -497,6 +497,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1956,6 +2030,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3236,6 +3384,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -4988,6 +5154,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index dd30e5c4b..d10a5fde0 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -504,6 +504,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -1963,6 +2037,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: &TypeReconcileHost type: object description: | @@ -3243,6 +3391,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | @@ -4995,6 +5161,20 @@ data: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index 807e75bd4..efb4723ba 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -706,6 +706,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: type: object description: | @@ -1320,6 +1394,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: type: object description: | @@ -4258,6 +4406,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: type: object description: | @@ -4872,6 +5094,80 @@ spec: minimum: 0 maximum: 100 description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" host: type: object description: | @@ -7422,6 +7718,24 @@ spec: 1. abort - do nothing, just break the process and wait for admin. 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. host: type: object description: | From 3be0d8fb354895fe00f0b8eabc08936e9d2e0ce7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 23 Jan 2026 17:00:04 +0500 Subject: [PATCH 098/233] dev: on data loss abort for chi --- pkg/controller/chi/worker-reconciler-chi.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 6a6c980d5..1b9510c34 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -778,15 +778,26 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error { w.a.V(1).M(host).F().Info("Reconcile PVCs and data loss for host: %s", host.GetName()) - // In case data loss or volumes missing detected we may need to specify additional reconcile options + // In case data loss or volumes missing detected we may + // 1. need to specify additional reconcile options + // 2. abort the reconcile completely err := w.reconcileHostPVCs(ctx, host) + onDataLoss := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnDataLoss switch { case storage.ErrIsDataLoss(err): + if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort { + w.a.V(1).M(host).F().Warning("Data loss detected for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName()) + return common.ErrCRUDAbort + } stsReconcileOpts, migrateTableOpts = w.hostPVCsDataLossDetectedOptions(host) w.a.V(1). M(host).F(). Info("Data loss detected for host: %s.", host.GetName()) case storage.ErrIsVolumeMissed(err): + if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort { + w.a.V(1).M(host).F().Warning("Data volume missed for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName()) + return common.ErrCRUDAbort + } // stsReconcileOpts, migrateTableOpts = w.hostPVCsDataVolumeMissedDetectedOptions(host) stsReconcileOpts, migrateTableOpts = w.hostPVCsDataLossDetectedOptions(host) w.a.V(1). From 3186effa63e8a56f00a88e02963b5a469716a428 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 23 Jan 2026 17:00:23 +0500 Subject: [PATCH 099/233] dev: on data loss abort for chk --- pkg/controller/chk/worker-reconciler-chk.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index 00e44dfc1..abc888359 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -676,15 +676,26 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error { w.a.V(1).M(host).F().Info("Reconcile PVCs and data loss for host: %s", host.GetName()) - // In case data loss or volumes missing detected we may need to specify additional reconcile options + // In case data loss or volumes missing detected we may + // 1. need to specify additional reconcile options + // 2. abort the reconcile completely err := w.reconcileHostPVCs(ctx, host) + onDataLoss := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnDataLoss switch { case storage.ErrIsDataLoss(err): + if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort { + w.a.V(1).M(host).F().Warning("Data loss detected for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName()) + return common.ErrCRUDAbort + } stsReconcileOpts = w.hostPVCsDataLossDetectedOptions(host) w.a.V(1). M(host).F(). Info("Data loss detected for host: %s.", host.GetName()) case storage.ErrIsVolumeMissed(err): + if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort { + w.a.V(1).M(host).F().Warning("Data volume missed for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName()) + return common.ErrCRUDAbort + } // stsReconcileOpts, migrateTableOpts = w.hostPVCsDataVolumeMissedDetectedOptions(host) stsReconcileOpts = w.hostPVCsDataLossDetectedOptions(host) w.a.V(1). From 800860eb1b38ee6b1b038d7c2982ab4d152c7a03 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 23 Jan 2026 18:23:27 +0500 Subject: [PATCH 100/233] dev: clarify create and introduce abort for recreate --- .../statefulset/statefulset-reconciler.go | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go index b0619cdab..0d0660d50 100644 --- a/pkg/controller/common/statefulset/statefulset-reconciler.go +++ b/pkg/controller/common/statefulset/statefulset-reconciler.go @@ -280,6 +280,11 @@ func (r *Reconciler) updateStatefulSet(ctx context.Context, host *api.Host, regi r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name) return nil case common.ErrCRUDRecreate: + onUpdateFailure := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnUpdateFailure + if onUpdateFailure == api.OnStatefulSetRecreateOnUpdateFailureActionAbort { + r.a.V(1).M(host).Warning("Update StatefulSet(%s/%s) - would need recreate but aborting as configured (onUpdateFailure: abort)", namespace, name) + return common.ErrCRUDAbort + } r.a.WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateInProgress). WithAction(host.GetCR()). M(host).F(). @@ -321,37 +326,53 @@ func (r *Reconciler) createStatefulSet(ctx context.Context, host *api.Host, regi }) } + return r.shouldAbortOrContinueCreateStatefulSet(action, host) +} + +func (r *Reconciler) shouldAbortOrContinueCreateStatefulSet(action error, host *api.Host) error { + statefulSet := host.Runtime.DesiredStatefulSet switch action { case nil: + // Continue r.a.V(1). WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateCompleted). WithAction(host.GetCR()). M(host).F(). Info("Create StatefulSet: %s - completed", util.NamespaceNameString(statefulSet)) return nil + case common.ErrCRUDAbort: + // Abort r.a.WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateFailed). WithAction(host.GetCR()). WithError(host.GetCR()). M(host).F(). Error("Create StatefulSet: %s - failed with error: %v", util.NamespaceNameString(statefulSet), action) - return action + return common.ErrCRUDAbort + case common.ErrCRUDIgnore: + // Continue r.a.WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateFailed). WithAction(host.GetCR()). M(host).F(). Warning("Create StatefulSet: %s - error ignored", util.NamespaceNameString(statefulSet)) return nil + case common.ErrCRUDRecreate: + // Continue r.a.V(1).M(host).Warning("Got recreate action. Ignore and continue for now") return nil + case common.ErrCRUDUnexpectedFlow: + // Continue r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now") return nil - } - r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now") - return nil + default: + // Continue + r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now") + return nil + } } // createStatefulSet is an internal function, used in reconcileStatefulSet only From 218c37662fc4defa5fe6551cf9a7f5d245f40c5b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 23 Jan 2026 18:45:15 +0500 Subject: [PATCH 101/233] dev: clarify recreate stateful set --- .../statefulset/statefulset-reconciler.go | 83 +++++++++++++------ 1 file changed, 56 insertions(+), 27 deletions(-) diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go index 0d0660d50..dadfee9b8 100644 --- a/pkg/controller/common/statefulset/statefulset-reconciler.go +++ b/pkg/controller/common/statefulset/statefulset-reconciler.go @@ -252,52 +252,81 @@ func (r *Reconciler) updateStatefulSet(ctx context.Context, host *api.Host, regi action := common.ErrCRUDRecreate if k8s.IsStatefulSetReady(curStatefulSet) { - action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host, opts) + if action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host, opts); action == nil { + // Straightforward success + if register { + host.GetCR().IEnsureStatus().HostUpdated() + _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{ + CopyStatusOptions: types.CopyStatusOptions{ + CopyStatusFieldGroup: types.CopyStatusFieldGroup{ + FieldGroupMain: true, + }, + }, + }) + } + r.a.V(1). + WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateCompleted). + WithAction(host.GetCR()). + M(host).F(). + Info("Update StatefulSet(%s/%s) - completed", namespace, name) + + // All is done here + return nil + } } + // Something is incorrect, need to decide next moves + switch action { - case nil: - if register { - host.GetCR().IEnsureStatus().HostUpdated() - _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{ - CopyStatusOptions: types.CopyStatusOptions{ - CopyStatusFieldGroup: types.CopyStatusFieldGroup{ - FieldGroupMain: true, - }, - }, - }) - } - r.a.V(1). - WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateCompleted). - WithAction(host.GetCR()). - M(host).F(). - Info("Update StatefulSet(%s/%s) - completed", namespace, name) - return nil - case common.ErrCRUDAbort: - r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got abort. Abort", namespace, name) - return common.ErrCRUDAbort - case common.ErrCRUDIgnore: - r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name) - return nil case common.ErrCRUDRecreate: + // Second attempt requested + onUpdateFailure := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnUpdateFailure if onUpdateFailure == api.OnStatefulSetRecreateOnUpdateFailureActionAbort { r.a.V(1).M(host).Warning("Update StatefulSet(%s/%s) - would need recreate but aborting as configured (onUpdateFailure: abort)", namespace, name) return common.ErrCRUDAbort } + + // Continue second attempt r.a.WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateInProgress). WithAction(host.GetCR()). M(host).F(). Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name) common.DumpStatefulSetDiff(host, curStatefulSet, newStatefulSet) return r.recreateStatefulSet(ctx, host, register, opts) + + default: + // Decide on other non-successful cases + return r.shouldAbortOrContinueUpdateStatefulSet(action, host) + } +} + +func (r *Reconciler) shouldAbortOrContinueUpdateStatefulSet(action error, host *api.Host) error { + newStatefulSet := host.Runtime.DesiredStatefulSet + namespace := newStatefulSet.Namespace + name := newStatefulSet.Name + + switch action { + case common.ErrCRUDAbort: + // Abort + r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got abort. Abort", namespace, name) + return common.ErrCRUDAbort + + case common.ErrCRUDIgnore: + // Continue + r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name) + return nil + case common.ErrCRUDUnexpectedFlow: + // Continue r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now") return nil - } - r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now") - return nil + default: + // Continue + r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now") + return nil + } } // createStatefulSet From 709c56c53d1a9353523bc05b25e2116e28ef85bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20B=C4=85k?= Date: Wed, 21 Jan 2026 14:13:54 +0100 Subject: [PATCH 102/233] Fix installer to default template URL to OPERATOR_VERSION --- .../operator-web-installer/clickhouse-operator-install.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/operator-web-installer/clickhouse-operator-install.sh b/deploy/operator-web-installer/clickhouse-operator-install.sh index 1c3feb65a..368d88f7f 100755 --- a/deploy/operator-web-installer/clickhouse-operator-install.sh +++ b/deploy/operator-web-installer/clickhouse-operator-install.sh @@ -141,9 +141,6 @@ check_envsubst_available # Manifest is expected to be ready-to-use manifest file MANIFEST="${MANIFEST:-""}" -# Template can have params to substitute -DEFAULT_TEMPLATE="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml" -TEMPLATE="${TEMPLATE:-"${DEFAULT_TEMPLATE}"}" # Namespace to install operator OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"kube-system"}" METRICS_EXPORTER_NAMESPACE="${OPERATOR_NAMESPACE}" @@ -153,6 +150,9 @@ if [[ -z "${OPERATOR_VERSION}" ]]; then RELEASE_VERSION=$(get_file https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/release) fi OPERATOR_VERSION="${OPERATOR_VERSION:-"${RELEASE_VERSION}"}" +# Template can have params to substitute +DEFAULT_TEMPLATE="https://raw.githubusercontent.com/Altinity/clickhouse-operator/${OPERATOR_VERSION:-master}/deploy/operator/clickhouse-operator-install-template.yaml" +TEMPLATE="${TEMPLATE:-"${DEFAULT_TEMPLATE}"}" OPERATOR_IMAGE="${OPERATOR_IMAGE:-"altinity/clickhouse-operator:${OPERATOR_VERSION}"}" OPERATOR_IMAGE_PULL_POLICY="${OPERATOR_IMAGE_PULL_POLICY:-"Always"}" METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"altinity/metrics-exporter:${OPERATOR_VERSION}"}" @@ -199,4 +199,4 @@ elif [[ ! -z "${TEMPLATE}" ]]; then ) else echo "Neither manifest nor template available. Abort." -fi +fi \ No newline at end of file From 295d6363495dd15da7f7db6ce94c08700d9b8189 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 14:59:20 +0500 Subject: [PATCH 103/233] dev: add regexp --- .../clickhouse.altinity.com/v1/type_configuration_chop.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go index 16987624a..0b394f2c1 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go @@ -371,6 +371,11 @@ type OperatorConfigClickHouse struct { Timeouts struct { Collect time.Duration `json:"collect" yaml:"collect"` } `json:"timeouts" yaml:"timeouts"` + // TablesRegexp specifies regexp to match tables in system database to fetch metrics from. + // Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + // Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + // Set to empty string to fetch only from system.metrics without using merge(). + TablesRegexp string `json:"tablesRegexp" yaml:"tablesRegexp"` } `json:"metrics" yaml:"metrics"` } From f978a9ab132f8b5edc2cabccd9bf51a95a5810eb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 14:59:50 +0500 Subject: [PATCH 104/233] dev: config --- deploy/builder/templates-config/config.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml index 912429cd5..ea0346bdc 100644 --- a/deploy/builder/templates-config/config.yaml +++ b/deploy/builder/templates-config/config.yaml @@ -239,6 +239,11 @@ clickhouse: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: From 0afe1ba875d669c5bdc53194f4d5259f558ccaad Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:00:14 +0500 Subject: [PATCH 105/233] dev: crd --- ...r-install-yaml-template-01-section-crd-02-chopconf.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml index 460608791..89ed44712 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml @@ -240,6 +240,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" From ade11f4813f301662d1890b0244c145cd3b95fce Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:01:53 +0500 Subject: [PATCH 106/233] dev: implement metrics fetcher --- .../clickhouse/clickhouse_metrics_fetcher.go | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go index 00f14e9e5..9d790b438 100644 --- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go +++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go @@ -17,6 +17,8 @@ package clickhouse import ( "context" "database/sql" + "fmt" + "github.com/MakeNowJust/heredoc" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" @@ -33,13 +35,13 @@ const ( WHERE is_session_expired ` - queryMetricsSQL = ` + queryMetricsSQLTemplate = ` SELECT concat('metric.', metric) AS metric, toString(value) AS value, '' AS description, 'gauge' AS type - FROM merge('system','^(metrics|custom_metrics)$') + FROM %s UNION ALL SELECT concat('metric.', metric) AS metric, @@ -139,12 +141,17 @@ const ( // ClickHouseMetricsFetcher specifies clickhouse fetcher object type ClickHouseMetricsFetcher struct { connectionParams *clickhouse.EndpointConnectionParams + tablesRegexp string } // NewClickHouseFetcher creates new clickhouse fetcher object -func NewClickHouseFetcher(endpointConnectionParams *clickhouse.EndpointConnectionParams) *ClickHouseMetricsFetcher { +func NewClickHouseFetcher( + endpointConnectionParams *clickhouse.EndpointConnectionParams, + tablesRegexp string, +) *ClickHouseMetricsFetcher { return &ClickHouseMetricsFetcher{ connectionParams: endpointConnectionParams, + tablesRegexp: tablesRegexp, } } @@ -152,11 +159,22 @@ func (f *ClickHouseMetricsFetcher) connection() *clickhouse.Connection { return clickhouse.GetPooledDBConnection(f.connectionParams) } +// buildMetricsTableSource returns the FROM clause for the metrics query. +// If tablesRegexp is set, it uses merge() to query tables matching the regexp. +// Otherwise, it queries only system.metrics. +func (f *ClickHouseMetricsFetcher) buildMetricsTableSource() string { + if f.tablesRegexp == "" { + return "system.metrics" + } + return fmt.Sprintf("merge('system','%s')", f.tablesRegexp) +} + // getClickHouseQueryMetrics requests metrics data from ClickHouse func (f *ClickHouseMetricsFetcher) getClickHouseQueryMetrics(ctx context.Context) (Table, error) { + metricsSQL := fmt.Sprintf(queryMetricsSQLTemplate, f.buildMetricsTableSource()) return f.clickHouseQueryScanRows( ctx, - queryMetricsSQL, + metricsSQL, func(rows *sql.Rows, data *Table) error { var metric, value, description, _type string if err := rows.Scan(&metric, &value, &description, &_type); err == nil { From a9b87fcfabb6eb724f6e1bef5f4ba105e44610b6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:02:20 +0500 Subject: [PATCH 107/233] dev: constructore --- pkg/metrics/clickhouse/exporter.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index b4eecac0d..1f696e7bd 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -167,7 +167,10 @@ func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsF clusterConnectionParams.Port = int(host.HTTPSPort) } - return NewClickHouseFetcher(clusterConnectionParams.NewEndpointConnectionParams(host.Hostname)) + return NewClickHouseFetcher( + clusterConnectionParams.NewEndpointConnectionParams(host.Hostname), + chop.Config().ClickHouse.Metrics.TablesRegexp, + ) } // collectHostMetrics collects metrics from one host and writes them into chan From dcc745581a5239cd36f4166713f7fc52a5305d19 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:02:39 +0500 Subject: [PATCH 108/233] docs: example --- docs/chi-examples/70-chop-config.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/chi-examples/70-chop-config.yaml b/docs/chi-examples/70-chop-config.yaml index d70068749..a492937d5 100644 --- a/docs/chi-examples/70-chop-config.yaml +++ b/docs/chi-examples/70-chop-config.yaml @@ -81,6 +81,23 @@ spec: # Port where to connect to ClickHouse instances to port: 8123 + ################################################ + ## + ## Metrics collection + ## + ################################################ + metrics: + timeouts: + # Timeout used to limit metrics collection request. In seconds. + # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. + # All collected metrics are returned. + collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$". + tablesRegexp: "^(metrics|custom_metrics)$" + ################################################ ## ## Templates Section From ed484975dca19e5efdf2e8fa500d117fdc60bd13 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:02:55 +0500 Subject: [PATCH 109/233] dev: configs --- config/config-dev.yaml | 19 +++++++++++++++++++ config/config.yaml | 5 +++++ 2 files changed, 24 insertions(+) diff --git a/config/config-dev.yaml b/config/config-dev.yaml index 88a239a4e..2ccb66425 100644 --- a/config/config-dev.yaml +++ b/config/config-dev.yaml @@ -247,6 +247,11 @@ clickhouse: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: @@ -351,6 +356,20 @@ reconcile: # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort + # Recreate StatefulSet scenario + recreate: + # What to do in case operator is in need to recreate StatefulSet? + # Possible options: + # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, + # do not try to fix or delete or update it, just abort reconcile cycle. + # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. + # 2. recreate - proceed and recreate StatefulSet. + + # Triggered when PVC data loss or missing volumes are detected + onDataLoss: recreate + # Triggered when StatefulSet update fails or StatefulSet is not ready + onUpdateFailure: recreate + # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: diff --git a/config/config.yaml b/config/config.yaml index 1bd3d9d86..6376820d6 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -245,6 +245,11 @@ clickhouse: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: From 76c9df9eda5070815efd264313c831318d0761d7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:03:06 +0500 Subject: [PATCH 110/233] env: manifests --- .../clickhouse-operator-install-ansible.yaml | 12 ++++++++++++ .../clickhouse-operator-install-bundle-v1beta1.yaml | 12 ++++++++++++ .../operator/clickhouse-operator-install-bundle.yaml | 12 ++++++++++++ ...clickhouse-operator-install-template-v1beta1.yaml | 12 ++++++++++++ .../clickhouse-operator-install-template.yaml | 12 ++++++++++++ deploy/operator/clickhouse-operator-install-tf.yaml | 12 ++++++++++++ deploy/operator/parts/crd.yaml | 7 +++++++ 7 files changed, 79 insertions(+) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index 645adffdb..f1906c5c1 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -3313,6 +3313,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5057,6 +5064,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index 5e3be768e..082b180ca 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -3280,6 +3280,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5256,6 +5263,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 29e97fa42..0b434f1d8 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -3306,6 +3306,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5316,6 +5323,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index 3ce02b772..c828fb4f7 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -3280,6 +3280,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5003,6 +5010,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 07e9067cd..937ec0adc 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -3306,6 +3306,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5050,6 +5057,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index d10a5fde0..586982906 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -3313,6 +3313,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" @@ -5057,6 +5064,11 @@ data: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index efb4723ba..665839690 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -7640,6 +7640,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" From f78435355444fc30ee3bbc6cca951fe41d1c6cf1 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:03:17 +0500 Subject: [PATCH 111/233] env: helm chart --- ...ouseoperatorconfigurations.clickhouse.altinity.com.yaml | 7 +++++++ deploy/helm/clickhouse-operator/values.yaml | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml index 9fccdc78c..eae70729a 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml @@ -240,6 +240,13 @@ spec: Timeout used to limit metrics collection request. In seconds. Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Set to empty string to fetch only from system.metrics without using merge(). + Default is "^(metrics|custom_metrics)$". template: type: object description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index b0a7e285c..ea502c924 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -464,6 +464,11 @@ configs: # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 + # Regexp to match tables in system database to fetch metrics from. + # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + # Set to empty string to fetch only from system.metrics without using merge(). + # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. + tablesRegexp: "^(metrics|custom_metrics)$" keeper: configuration: ################################################ From 97164196dabd4d148a9b13087e1fc36f3ebda315 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:24:54 +0500 Subject: [PATCH 112/233] dev: normalizer --- .../clickhouse.altinity.com/v1/type_configuration_chop.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go index 0b394f2c1..2a58578c8 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go @@ -78,6 +78,9 @@ const ( // defaultTimeoutCollect specifies default timeout to collect metrics from the ClickHouse instance. In seconds defaultTimeoutCollect = 8 + // defaultMetricsTablesRegexp specifies default regexp to match tables in system database to fetch metrics from + defaultMetricsTablesRegexp = "^(metrics|custom_metrics)$" + // defaultReconcileCHIsThreadsNumber specifies default number of controller threads running concurrently. // Used in case no other specified in config defaultReconcileCHIsThreadsNumber = 1 @@ -374,7 +377,6 @@ type OperatorConfigClickHouse struct { // TablesRegexp specifies regexp to match tables in system database to fetch metrics from. // Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. // Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. - // Set to empty string to fetch only from system.metrics without using merge(). TablesRegexp string `json:"tablesRegexp" yaml:"tablesRegexp"` } `json:"metrics" yaml:"metrics"` } @@ -1142,6 +1144,10 @@ func (c *OperatorConfig) normalizeSectionClickHouseMetrics() { } // Adjust seconds to time.Duration c.ClickHouse.Metrics.Timeouts.Collect = c.ClickHouse.Metrics.Timeouts.Collect * time.Second + + if c.ClickHouse.Metrics.TablesRegexp == "" { + c.ClickHouse.Metrics.TablesRegexp = defaultMetricsTablesRegexp + } } func (c *OperatorConfig) normalizeSectionLogger() { From 549b6fb3242c620c65190ffbb8762d37ae3a69e8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:25:38 +0500 Subject: [PATCH 113/233] dev: fallback to safe defaults --- pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go index 9d790b438..71ffa5e06 100644 --- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go +++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go @@ -161,10 +161,9 @@ func (f *ClickHouseMetricsFetcher) connection() *clickhouse.Connection { // buildMetricsTableSource returns the FROM clause for the metrics query. // If tablesRegexp is set, it uses merge() to query tables matching the regexp. -// Otherwise, it queries only system.metrics. func (f *ClickHouseMetricsFetcher) buildMetricsTableSource() string { if f.tablesRegexp == "" { - return "system.metrics" + return "merge('system','^(metrics|custom_metrics)$')" } return fmt.Sprintf("merge('system','%s')", f.tablesRegexp) } From b511809d8728a564a024a3cd21b7f21bb1a27503 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:25:58 +0500 Subject: [PATCH 114/233] docs: example --- docs/chi-examples/70-chop-config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/chi-examples/70-chop-config.yaml b/docs/chi-examples/70-chop-config.yaml index a492937d5..05ee2e1e4 100644 --- a/docs/chi-examples/70-chop-config.yaml +++ b/docs/chi-examples/70-chop-config.yaml @@ -94,7 +94,6 @@ spec: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$". tablesRegexp: "^(metrics|custom_metrics)$" From 2a6b117c747ecefa047e98d7c4dd59d7808322ee Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:26:22 +0500 Subject: [PATCH 115/233] dev: config --- deploy/builder/templates-config/config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml index ea0346bdc..c2770da49 100644 --- a/deploy/builder/templates-config/config.yaml +++ b/deploy/builder/templates-config/config.yaml @@ -241,7 +241,6 @@ clickhouse: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" From 990eab1adbbc2122a06e395250f21d10f333e292 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:26:37 +0500 Subject: [PATCH 116/233] dev: crd --- ...perator-install-yaml-template-01-section-crd-02-chopconf.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml index 89ed44712..dfe3ce30e 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml @@ -245,7 +245,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object From 47f0b1d56791050be8675cd87639b7d32e478b57 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:26:52 +0500 Subject: [PATCH 117/233] dev: config update --- config/config-dev.yaml | 1 - config/config.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/config/config-dev.yaml b/config/config-dev.yaml index 2ccb66425..431c1f92b 100644 --- a/config/config-dev.yaml +++ b/config/config-dev.yaml @@ -249,7 +249,6 @@ clickhouse: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/config/config.yaml b/config/config.yaml index 6376820d6..ad55c58f7 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -247,7 +247,6 @@ clickhouse: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" From 70e067108365a7f9af0e193242f25cb356927b0f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:27:18 +0500 Subject: [PATCH 118/233] env: manifests --- deploy/operator/clickhouse-operator-install-ansible.yaml | 2 -- deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml | 2 -- deploy/operator/clickhouse-operator-install-bundle.yaml | 2 -- .../operator/clickhouse-operator-install-template-v1beta1.yaml | 2 -- deploy/operator/clickhouse-operator-install-template.yaml | 2 -- deploy/operator/clickhouse-operator-install-tf.yaml | 2 -- deploy/operator/parts/crd.yaml | 1 - 7 files changed, 13 deletions(-) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index f1906c5c1..bb0a1965a 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -3318,7 +3318,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5066,7 +5065,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index 082b180ca..b577d3be0 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -3285,7 +3285,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5265,7 +5264,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 0b434f1d8..534c8841f 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -3311,7 +3311,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5325,7 +5324,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index c828fb4f7..2c0302f15 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -3285,7 +3285,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5012,7 +5011,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 937ec0adc..4ae74db95 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -3311,7 +3311,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5059,7 +5058,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index 586982906..f02a601c7 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -3318,7 +3318,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object @@ -5066,7 +5065,6 @@ data: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index 665839690..6b450ae64 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -7645,7 +7645,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object From 07b9acd657ca20357feb7a4d2960581bee31ab08 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 26 Jan 2026 15:27:30 +0500 Subject: [PATCH 119/233] env: helm --- ...clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml | 1 - deploy/helm/clickhouse-operator/values.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml index eae70729a..fb41e3d13 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml @@ -245,7 +245,6 @@ spec: description: | Regexp to match tables in system database to fetch metrics from. Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - Set to empty string to fetch only from system.metrics without using merge(). Default is "^(metrics|custom_metrics)$". template: type: object diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index ea502c924..c6f3faaef 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -466,7 +466,6 @@ configs: collect: 9 # Regexp to match tables in system database to fetch metrics from. # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. - # Set to empty string to fetch only from system.metrics without using merge(). # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics. tablesRegexp: "^(metrics|custom_metrics)$" keeper: From ab2fa34af05833e2e3139bbf6914ba21df42cb43 Mon Sep 17 00:00:00 2001 From: Eva Yu Date: Thu, 29 Jan 2026 16:22:55 -0800 Subject: [PATCH 120/233] Add CRDHook annotations Add custom annotations to CRD Hook Resources Signed-off-by: Eva Yu --- .../templates/hooks/crd-install-configmap.yaml | 3 +++ .../templates/hooks/crd-install-job.yaml | 3 +++ .../templates/hooks/crd-install-rbac.yaml | 9 +++++++++ deploy/helm/clickhouse-operator/values.yaml | 2 ++ 4 files changed, 17 insertions(+) diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml index 1946b6e51..e31c127de 100644 --- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml +++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml @@ -11,6 +11,9 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-7" "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.crdHook.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} data: clickhouseinstallations.yaml: | {{ .Files.Get "crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml" | indent 4 }} diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml index 9b9b4e005..bb650cf63 100644 --- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml +++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml @@ -11,6 +11,9 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-5" "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.crdHook.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} spec: template: metadata: diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml index bc776c990..78ccea60c 100644 --- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml +++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml @@ -12,6 +12,9 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-6" "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.crdHook.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -24,6 +27,9 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-6" "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.crdHook.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} rules: - apiGroups: - apiextensions.k8s.io @@ -47,6 +53,9 @@ metadata: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-6" "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.crdHook.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 5567163ec..bad9344f3 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -32,6 +32,8 @@ crdHook: tolerations: [] # crdHook.affinity -- affinity for CRD installation job affinity: {} + # crdHook.annotations -- additional annotations for CRD installation job + annotations: {} operator: image: # operator.image.repository -- image repository From 2d96563fa5b0a4b2c60ceab8ffe25abe6052fd43 Mon Sep 17 00:00:00 2001 From: slach Date: Wed, 4 Feb 2026 15:40:34 +0400 Subject: [PATCH 121/233] fix https://github.com/Altinity/clickhouse-operator/issues/1916 --- .../clickhouse-operator/templates/hooks/crd-install-job.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml index bb650cf63..16e1d226d 100644 --- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml +++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml @@ -24,6 +24,10 @@ spec: spec: serviceAccountName: {{ include "altinity-clickhouse-operator.fullname" . }}-crd-install restartPolicy: OnFailure + {{- with .Values.crdHook.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.crdHook.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} From 8495379e29f392ec51e9193983e19c3b527f5322 Mon Sep 17 00:00:00 2001 From: slach Date: Wed, 4 Feb 2026 15:41:45 +0400 Subject: [PATCH 122/233] fix https://github.com/Altinity/clickhouse-operator/issues/1916 --- deploy/helm/clickhouse-operator/values.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 551087d63..9b438dc4d 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -18,6 +18,10 @@ crdHook: tag: "latest" # crdHook.image.pullPolicy -- image pull policy for CRD installation job pullPolicy: IfNotPresent + # crdHook.imagePullSecrets -- image pull secrets for CRD installation job + # possible value format `[{"name":"your-secret-name"}]`, + # check `kubectl explain pod.spec.imagePullSecrets` for details + imagePullSecrets: [] # crdHook.resources -- resource limits and requests for CRD installation job resources: {} # limits: From f7cb3bc46d4790cfd406fe5c4cb06c702a4d5455 Mon Sep 17 00:00:00 2001 From: Eva Yu Date: Fri, 6 Feb 2026 16:33:11 -0800 Subject: [PATCH 123/233] docs: clarify CHIT usage - describe usage of spec.taskID for reconcilitation & detail usage of RollingUpdate Signed-off-by: Eva Yu --- docs/operator_configuration.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/docs/operator_configuration.md b/docs/operator_configuration.md index 73c11ec4b..053dae4ed 100644 --- a/docs/operator_configuration.md +++ b/docs/operator_configuration.md @@ -163,5 +163,31 @@ spec: ... ``` +#### Applying Changes from ClickHouseInstallationTemplates + +Changes applied to a ClickHouseInstallationTemaplte do not automatically trigger a reconcile of the ClickHouseInstallations using the template. This is by design and intended to preserve user control and prevent undesirable rollouts to ClickHouseInstallations. + +To apply the changes to ClickHouseInstallations, update the spec.taskID: + +``` +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +... +spec: + taskID: "randomly-generated-string" +... +``` + +> Note, ClickHouse settings applied to the ClickHouse server through `spec.configuration.settings` in a ClickHouseInstallationTemplate will not trigger a server restart whether or not the setting requires a server restart to be applied. To apply the settings and restart the server, you should also set `spec.restart` to `'RollingUpdate'`. RollingUpdate should be used sparingly. It is typically removed after usage to prevent unecessary restarts: + +``` +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +... +spec: + restart: "RollingUpdate" +... +``` + [clickhouse-operator-install-bundle.yaml]: ../deploy/operator/clickhouse-operator-install-bundle.yaml [70-chop-config.yaml]: ./chi-examples/70-chop-config.yaml From a55629cc9a5812ad55109d8920d036fe0c9918d8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:02:18 +0500 Subject: [PATCH 124/233] dev: adjust resource index --- pkg/metrics/clickhouse/chi_index.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/metrics/clickhouse/chi_index.go b/pkg/metrics/clickhouse/chi_index.go index 29904d6db..585818f3a 100644 --- a/pkg/metrics/clickhouse/chi_index.go +++ b/pkg/metrics/clickhouse/chi_index.go @@ -16,17 +16,17 @@ package clickhouse import "github.com/altinity/clickhouse-operator/pkg/apis/metrics" -type chInstallationsIndex map[string]*metrics.WatchedCR +type crInstallationsIndex map[string]*metrics.WatchedCR -func (i chInstallationsIndex) slice() []*metrics.WatchedCR { +func (i crInstallationsIndex) slice() []*metrics.WatchedCR { res := make([]*metrics.WatchedCR, 0) - for _, chi := range i { - res = append(res, chi) + for _, cr := range i { + res = append(res, cr) } return res } -func (i chInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) { +func (i crInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) { if i == nil { return nil, false } @@ -36,14 +36,14 @@ func (i chInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) { return nil, false } -func (i chInstallationsIndex) set(key string, value *metrics.WatchedCR) { +func (i crInstallationsIndex) set(key string, value *metrics.WatchedCR) { if i == nil { return } i[key] = value } -func (i chInstallationsIndex) remove(key string) { +func (i crInstallationsIndex) remove(key string) { if i == nil { return } @@ -52,9 +52,9 @@ func (i chInstallationsIndex) remove(key string) { } } -func (i chInstallationsIndex) walk(f func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) { - // Loop over ClickHouseInstallations - for _, chi := range i { - chi.WalkHosts(f) +func (i crInstallationsIndex) walk(f func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) { + // Loop over Custom Resources + for _, cr := range i { + cr.WalkHosts(f) } } From 85c4fd4ae1eee5f1bd658d26d8e021ebc94ac028 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:03:20 +0500 Subject: [PATCH 125/233] dev: exporter use updated cr --- pkg/metrics/clickhouse/exporter.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 1f696e7bd..1488eeb9e 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -43,8 +43,8 @@ import ( type Exporter struct { collectorTimeout time.Duration - // chInstallations maps CHI name to list of hostnames (of string type) of this installation - chInstallations chInstallationsIndex + // crInstallations maps CR name to list of hostnames (of string type) of this CR + crInstallations crInstallationsIndex mutex sync.RWMutex toRemoveFromWatched sync.Map @@ -56,14 +56,14 @@ var _ prometheus.Collector = &Exporter{} // NewExporter returns a new instance of Exporter type func NewExporter(collectorTimeout time.Duration) *Exporter { return &Exporter{ - chInstallations: make(map[string]*metrics.WatchedCR), + crInstallations: make(map[string]*metrics.WatchedCR), collectorTimeout: collectorTimeout, } } // getWatchedCHIs func (e *Exporter) getWatchedCHIs() []*metrics.WatchedCR { - return e.chInstallations.slice() + return e.crInstallations.slice() } // Collect implements prometheus.Collector Collect method @@ -94,7 +94,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { log.V(1).Infof("Launching host collectors [%s]", time.Since(start)) var wg = sync.WaitGroup{} - e.chInstallations.walk(func(chi *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) { + e.crInstallations.walk(func(chi *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) { wg.Add(1) go func(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, ch chan<- prometheus.Metric) { defer wg.Done() @@ -135,7 +135,7 @@ func (e *Exporter) removeFromWatched(chi *metrics.WatchedCR) { e.mutex.Lock() defer e.mutex.Unlock() log.V(1).Infof("Remove ClickHouseInstallation (%s/%s)", chi.Namespace, chi.Name) - e.chInstallations.remove(chi.IndexKey()) + e.crInstallations.remove(chi.IndexKey()) } // updateWatched updates Exporter.chInstallation map with values from chInstances slice @@ -143,7 +143,7 @@ func (e *Exporter) updateWatched(chi *metrics.WatchedCR) { e.mutex.Lock() defer e.mutex.Unlock() log.V(1).Infof("Update ClickHouseInstallation (%s/%s): %s", chi.Namespace, chi.Name, chi) - e.chInstallations.set(chi.IndexKey(), chi) + e.crInstallations.set(chi.IndexKey(), chi) } // newFetcher returns new Metrics Fetcher for specified host From 82efa10650c4294a20650bc0c394282f5ad3734c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:03:37 +0500 Subject: [PATCH 126/233] env: helm --- deploy/helm/clickhouse-operator/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md index 62f288c2b..f20676a1a 100644 --- a/deploy/helm/clickhouse-operator/README.md +++ b/deploy/helm/clickhouse-operator/README.md @@ -75,10 +75,12 @@ crdHook: | commonLabels | object | `{}` | set of labels that will be applied to all the resources for the operator | | configs | object | check the `values.yaml` file for the config content (auto-generated from latest operator release) | clickhouse operator configs | | crdHook.affinity | object | `{}` | affinity for CRD installation job | +| crdHook.annotations | object | `{}` | additional annotations for CRD installation job | | crdHook.enabled | bool | `true` | enable automatic CRD installation/update via pre-install/pre-upgrade hooks when disabled, CRDs must be installed manually using kubectl apply | | crdHook.image.pullPolicy | string | `"IfNotPresent"` | image pull policy for CRD installation job | | crdHook.image.repository | string | `"bitnami/kubectl"` | image repository for CRD installation job | | crdHook.image.tag | string | `"latest"` | image tag for CRD installation job | +| crdHook.imagePullSecrets | list | `[]` | image pull secrets for CRD installation job possible value format `[{"name":"your-secret-name"}]`, check `kubectl explain pod.spec.imagePullSecrets` for details | | crdHook.nodeSelector | object | `{}` | node selector for CRD installation job | | crdHook.resources | object | `{}` | resource limits and requests for CRD installation job | | crdHook.tolerations | list | `[]` | tolerations for CRD installation job | From 1710496db988dc6d06c2dd405456091479a100d7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:05:33 +0500 Subject: [PATCH 127/233] dev: file naming --- pkg/metrics/clickhouse/{chi_index.go => cr_index.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pkg/metrics/clickhouse/{chi_index.go => cr_index.go} (100%) diff --git a/pkg/metrics/clickhouse/chi_index.go b/pkg/metrics/clickhouse/cr_index.go similarity index 100% rename from pkg/metrics/clickhouse/chi_index.go rename to pkg/metrics/clickhouse/cr_index.go From d1d5df18e561bdd8d78301f9e51ed8caa39f10d3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:24:34 +0500 Subject: [PATCH 128/233] dev: clarification --- pkg/metrics/clickhouse/exporter.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 1488eeb9e..0788366d3 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -43,7 +43,7 @@ import ( type Exporter struct { collectorTimeout time.Duration - // crInstallations maps CR name to list of hostnames (of string type) of this CR + // crInstallations is an index of watched CRs crInstallations crInstallationsIndex mutex sync.RWMutex @@ -83,7 +83,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { log.V(1).Infof("Collect completed [%s]", time.Since(start)) }() - // Collect should have timeout + // Collection process should have limited duration ctx, cancel := context.WithTimeout(context.Background(), e.collectorTimeout) defer cancel() @@ -130,7 +130,7 @@ func (e *Exporter) cleanup() { log.V(2).Info("Completed cleanup") } -// removeFromWatched deletes record from Exporter.chInstallation map identified by chiName key +// removeFromWatched deletes record from watched index func (e *Exporter) removeFromWatched(chi *metrics.WatchedCR) { e.mutex.Lock() defer e.mutex.Unlock() @@ -138,7 +138,7 @@ func (e *Exporter) removeFromWatched(chi *metrics.WatchedCR) { e.crInstallations.remove(chi.IndexKey()) } -// updateWatched updates Exporter.chInstallation map with values from chInstances slice +// updateWatched updates watched index func (e *Exporter) updateWatched(chi *metrics.WatchedCR) { e.mutex.Lock() defer e.mutex.Unlock() @@ -354,7 +354,7 @@ func (e *Exporter) fetchCHI(r *http.Request) (*metrics.WatchedCR, error) { return nil, fmt.Errorf("unable to parse CHI from request") } -// updateWatchedCHI serves HTTPS request to add CHI to the list of watched CHIs +// updateWatchedCHI serves HTTP request to add CHI to the list of watched CHIs func (e *Exporter) updateWatchedCHI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if chi, err := e.fetchCHI(r); err == nil { @@ -394,7 +394,7 @@ func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *c } func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.ClickHouseInstallation) { - if e.shouldSkipDiscoveredCR(chi) { + if e.shouldNotWatchCR(chi) { log.V(1).Infof("Skip discovered CHI: %s/%s", chi.Namespace, chi.Name) return } @@ -410,9 +410,9 @@ func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.Click e.updateWatched(watchedCHI) } -func (e *Exporter) shouldSkipDiscoveredCR(chi *api.ClickHouseInstallation) bool { +func (e *Exporter) shouldNotWatchCR(chi *api.ClickHouseInstallation) bool { if chi.IsStopped() { - log.V(1).Infof("CHI %s/%s is stopped, skip it", chi.Namespace, chi.Name) + log.V(1).Infof("CHI %s/%s is stopped, unable to watch it", chi.Namespace, chi.Name) return true } From 5b998f1b2a24d84efe1bc01f4efea9a87a48d329 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 14:36:20 +0500 Subject: [PATCH 129/233] dev: extract index constructor --- pkg/metrics/clickhouse/cr_index.go | 4 ++++ pkg/metrics/clickhouse/exporter.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/metrics/clickhouse/cr_index.go b/pkg/metrics/clickhouse/cr_index.go index 585818f3a..5cb299850 100644 --- a/pkg/metrics/clickhouse/cr_index.go +++ b/pkg/metrics/clickhouse/cr_index.go @@ -18,6 +18,10 @@ import "github.com/altinity/clickhouse-operator/pkg/apis/metrics" type crInstallationsIndex map[string]*metrics.WatchedCR +func newCRInstallationsIndex() crInstallationsIndex { + return make(map[string]*metrics.WatchedCR) +} + func (i crInstallationsIndex) slice() []*metrics.WatchedCR { res := make([]*metrics.WatchedCR, 0) for _, cr := range i { diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 0788366d3..c2e8d8fce 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -56,7 +56,7 @@ var _ prometheus.Collector = &Exporter{} // NewExporter returns a new instance of Exporter type func NewExporter(collectorTimeout time.Duration) *Exporter { return &Exporter{ - crInstallations: make(map[string]*metrics.WatchedCR), + crInstallations: newCRInstallationsIndex(), collectorTimeout: collectorTimeout, } } From 22328c4f33d1fbfd0957efb843ed6ca4103e3d47 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 20:52:49 +0500 Subject: [PATCH 130/233] dev: clickhouse metrics fetcher --- .../clickhouse/clickhouse_metrics_fetcher.go | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go index 71ffa5e06..3a9fd2d36 100644 --- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go +++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go @@ -119,7 +119,7 @@ const ( toString(free_space) AS free_space, toString(total_space) AS total_space FROM system.disks - WHERE type IN ('local','Local') + WHERE type IN ('local','Local') ` queryDetachedPartsSQL = ` @@ -138,30 +138,30 @@ const ( ` ) -// ClickHouseMetricsFetcher specifies clickhouse fetcher object -type ClickHouseMetricsFetcher struct { +// MetricsFetcher specifies clickhouse fetcher object +type MetricsFetcher struct { connectionParams *clickhouse.EndpointConnectionParams tablesRegexp string } -// NewClickHouseFetcher creates new clickhouse fetcher object -func NewClickHouseFetcher( +// NewMetricsFetcher creates new clickhouse fetcher object +func NewMetricsFetcher( endpointConnectionParams *clickhouse.EndpointConnectionParams, tablesRegexp string, -) *ClickHouseMetricsFetcher { - return &ClickHouseMetricsFetcher{ +) *MetricsFetcher { + return &MetricsFetcher{ connectionParams: endpointConnectionParams, tablesRegexp: tablesRegexp, } } -func (f *ClickHouseMetricsFetcher) connection() *clickhouse.Connection { +func (f *MetricsFetcher) connection() *clickhouse.Connection { return clickhouse.GetPooledDBConnection(f.connectionParams) } // buildMetricsTableSource returns the FROM clause for the metrics query. // If tablesRegexp is set, it uses merge() to query tables matching the regexp. -func (f *ClickHouseMetricsFetcher) buildMetricsTableSource() string { +func (f *MetricsFetcher) buildMetricsTableSource() string { if f.tablesRegexp == "" { return "merge('system','^(metrics|custom_metrics)$')" } @@ -169,7 +169,7 @@ func (f *ClickHouseMetricsFetcher) buildMetricsTableSource() string { } // getClickHouseQueryMetrics requests metrics data from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseQueryMetrics(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseQueryMetrics(ctx context.Context) (Table, error) { metricsSQL := fmt.Sprintf(queryMetricsSQLTemplate, f.buildMetricsTableSource()) return f.clickHouseQueryScanRows( ctx, @@ -185,7 +185,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryMetrics(ctx context.Context } // getClickHouseSystemParts requests data sizes from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseSystemParts(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseSystemParts(ctx context.Context) (Table, error) { return f.clickHouseQueryScanRows( ctx, querySystemPartsSQL, @@ -207,7 +207,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseSystemParts(ctx context.Context) } // getClickHouseQuerySystemReplicas requests replica information from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemReplicas(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseQuerySystemReplicas(ctx context.Context) (Table, error) { return f.clickHouseQueryScanRows( ctx, querySystemReplicasSQL, @@ -222,7 +222,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemReplicas(ctx context. } // getClickHouseQueryMutations requests mutations information from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseQueryMutations(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseQueryMutations(ctx context.Context) (Table, error) { return f.clickHouseQueryScanRows( ctx, queryMutationsSQL, @@ -237,7 +237,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryMutations(ctx context.Conte } // getClickHouseQuerySystemDisks requests used disks information from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Context) (Table, error) { return f.clickHouseQueryScanRows( ctx, querySystemDisksSQL, @@ -252,7 +252,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Con } // getClickHouseQueryDetachedParts requests detached parts reasons from ClickHouse -func (f *ClickHouseMetricsFetcher) getClickHouseQueryDetachedParts(ctx context.Context) (Table, error) { +func (f *MetricsFetcher) getClickHouseQueryDetachedParts(ctx context.Context) (Table, error) { return f.clickHouseQueryScanRows( ctx, queryDetachedPartsSQL, @@ -267,7 +267,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryDetachedParts(ctx context.C } // clickHouseQueryScanRows scan all rows by external scan function -func (f *ClickHouseMetricsFetcher) clickHouseQueryScanRows( +func (f *MetricsFetcher) clickHouseQueryScanRows( ctx context.Context, sql string, scanner ScanFunction, From 92e44bede6e475bbff5d500ee2f75da309e7a31c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 20:53:27 +0500 Subject: [PATCH 131/233] dev: exporter --- pkg/metrics/clickhouse/exporter.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index c2e8d8fce..04ee81e6c 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -147,7 +147,7 @@ func (e *Exporter) updateWatched(chi *metrics.WatchedCR) { } // newFetcher returns new Metrics Fetcher for specified host -func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsFetcher { +func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *MetricsFetcher { // Make base cluster connection params clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config()) // Adjust base cluster connection params with per-host props @@ -167,7 +167,7 @@ func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsF clusterConnectionParams.Port = int(host.HTTPSPort) } - return NewClickHouseFetcher( + return NewMetricsFetcher( clusterConnectionParams.NewEndpointConnectionParams(host.Hostname), chop.Config().ClickHouse.Metrics.TablesRegexp, ) @@ -180,27 +180,27 @@ func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedC wg := sync.WaitGroup{} wg.Add(6) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostSystemMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostSystemPartsMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostSystemReplicasMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostMutationsMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostSystemDisksMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) { + go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { e.collectHostDetachedPartsMetrics(ctx, host, fetcher, writer) wg.Done() }(ctx, host, fetcher, writer) @@ -210,7 +210,7 @@ func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedC func (e *Exporter) collectHostSystemMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying system metrics for host %s", host.Hostname) @@ -231,7 +231,7 @@ func (e *Exporter) collectHostSystemMetrics( func (e *Exporter) collectHostSystemPartsMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying table sizes for host %s", host.Hostname) @@ -255,7 +255,7 @@ func (e *Exporter) collectHostSystemPartsMetrics( func (e *Exporter) collectHostSystemReplicasMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying system replicas for host %s", host.Hostname) @@ -276,7 +276,7 @@ func (e *Exporter) collectHostSystemReplicasMetrics( func (e *Exporter) collectHostMutationsMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying mutations for host %s", host.Hostname) @@ -297,7 +297,7 @@ func (e *Exporter) collectHostMutationsMetrics( func (e *Exporter) collectHostSystemDisksMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying disks for host %s", host.Hostname) @@ -318,7 +318,7 @@ func (e *Exporter) collectHostSystemDisksMetrics( func (e *Exporter) collectHostDetachedPartsMetrics( ctx context.Context, host *metrics.WatchedHost, - fetcher *ClickHouseMetricsFetcher, + fetcher *MetricsFetcher, writer *CHIPrometheusWriter, ) { log.V(1).Infof("Querying detached parts for host %s", host.Hostname) From bda3cb445206f31cbb6731b549c6a7c717c8d5fa Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:04:06 +0500 Subject: [PATCH 132/233] dev: minor --- pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go index 3a9fd2d36..aea697f3a 100644 --- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go +++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go @@ -155,6 +155,7 @@ func NewMetricsFetcher( } } +// connection is a connection getter func (f *MetricsFetcher) connection() *clickhouse.Connection { return clickhouse.GetPooledDBConnection(f.connectionParams) } From 9c5d07bdad4bbe1bc8b04112c00efef55a577bc6 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:12:05 +0500 Subject: [PATCH 133/233] dev: clarify --- pkg/metrics/clickhouse/exporter.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 04ee81e6c..53a5590fe 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -394,7 +394,7 @@ func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *c } func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.ClickHouseInstallation) { - if e.shouldNotWatchCR(chi) { + if !e.shouldWatchCR(chi) { log.V(1).Infof("Skip discovered CHI: %s/%s", chi.Namespace, chi.Name) return } @@ -410,11 +410,11 @@ func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.Click e.updateWatched(watchedCHI) } -func (e *Exporter) shouldNotWatchCR(chi *api.ClickHouseInstallation) bool { +func (e *Exporter) shouldWatchCR(chi *api.ClickHouseInstallation) bool { if chi.IsStopped() { log.V(1).Infof("CHI %s/%s is stopped, unable to watch it", chi.Namespace, chi.Name) - return true + return false } - return false + return true } From 873ad793758f006ae5e164fc78168b6cd928759f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:20:50 +0500 Subject: [PATCH 134/233] dev: standalone metrics collector --- pkg/metrics/clickhouse/collector.go | 145 ++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 pkg/metrics/clickhouse/collector.go diff --git a/pkg/metrics/clickhouse/collector.go b/pkg/metrics/clickhouse/collector.go new file mode 100644 index 000000000..604b3c803 --- /dev/null +++ b/pkg/metrics/clickhouse/collector.go @@ -0,0 +1,145 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clickhouse + +import ( + "context" + "sync" + "time" + + log "github.com/golang/glog" + + "github.com/altinity/clickhouse-operator/pkg/apis/metrics" +) + +// Collector collects metrics from a single ClickHouse host +type Collector struct { + fetcher *MetricsFetcher + writer *CHIPrometheusWriter +} + +// NewCollector creates a new Collector instance +func NewCollector(fetcher *MetricsFetcher, writer *CHIPrometheusWriter) *Collector { + return &Collector{ + fetcher: fetcher, + writer: writer, + } +} + +// CollectHostMetrics runs all metric collectors for a host in parallel +func (c *Collector) CollectHostMetrics(ctx context.Context, host *metrics.WatchedHost) { + wg := sync.WaitGroup{} + wg.Add(6) + go func() { defer wg.Done(); c.collectSystemMetrics(ctx, host) }() + go func() { defer wg.Done(); c.collectSystemParts(ctx, host) }() + go func() { defer wg.Done(); c.collectSystemReplicas(ctx, host) }() + go func() { defer wg.Done(); c.collectMutations(ctx, host) }() + go func() { defer wg.Done(); c.collectSystemDisks(ctx, host) }() + go func() { defer wg.Done(); c.collectDetachedParts(ctx, host) }() + wg.Wait() +} + +func (c *Collector) collectSystemMetrics(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying system metrics for host %s", host.Hostname) + start := time.Now() + metrics, err := c.fetcher.getClickHouseQueryMetrics(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d system metrics for host %s", elapsed, len(metrics), host.Hostname) + c.writer.WriteMetrics(metrics) + c.writer.WriteOKFetch("system.metrics") + } else { + log.Warningf("Error [%s] querying system.metrics for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("system.metrics") + } +} + +func (c *Collector) collectSystemParts(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying table sizes for host %s", host.Hostname) + start := time.Now() + systemPartsData, err := c.fetcher.getClickHouseSystemParts(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d table sizes for host %s", elapsed, len(systemPartsData), host.Hostname) + c.writer.WriteTableSizes(systemPartsData) + c.writer.WriteOKFetch("table sizes") + c.writer.WriteSystemParts(systemPartsData) + c.writer.WriteOKFetch("system parts") + } else { + log.Warningf("Error [%s] querying system.parts for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("table sizes") + c.writer.WriteErrorFetch("system parts") + } +} + +func (c *Collector) collectSystemReplicas(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying system replicas for host %s", host.Hostname) + start := time.Now() + systemReplicas, err := c.fetcher.getClickHouseQuerySystemReplicas(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d system replicas for host %s", elapsed, len(systemReplicas), host.Hostname) + c.writer.WriteSystemReplicas(systemReplicas) + c.writer.WriteOKFetch("system.replicas") + } else { + log.Warningf("Error [%s] querying system.replicas for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("system.replicas") + } +} + +func (c *Collector) collectMutations(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying mutations for host %s", host.Hostname) + start := time.Now() + mutations, err := c.fetcher.getClickHouseQueryMutations(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d mutations for %s", elapsed, len(mutations), host.Hostname) + c.writer.WriteMutations(mutations) + c.writer.WriteOKFetch("system.mutations") + } else { + log.Warningf("Error [%s] querying system.mutations for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("system.mutations") + } +} + +func (c *Collector) collectSystemDisks(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying disks for host %s", host.Hostname) + start := time.Now() + disks, err := c.fetcher.getClickHouseQuerySystemDisks(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d disks for host %s", elapsed, len(disks), host.Hostname) + c.writer.WriteSystemDisks(disks) + c.writer.WriteOKFetch("system.disks") + } else { + log.Warningf("Error [%s] querying system.disks for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("system.disks") + } +} + +func (c *Collector) collectDetachedParts(ctx context.Context, host *metrics.WatchedHost) { + log.V(1).Infof("Querying detached parts for host %s", host.Hostname) + start := time.Now() + detachedParts, err := c.fetcher.getClickHouseQueryDetachedParts(ctx) + elapsed := time.Since(start) + if err == nil { + log.V(1).Infof("Extracted [%s] %d detached parts info for host %s", elapsed, len(detachedParts), host.Hostname) + c.writer.WriteDetachedParts(detachedParts) + c.writer.WriteOKFetch("system.detached_parts") + } else { + log.Warningf("Error [%s] querying system.detached_parts for host %s err: %s", elapsed, host.Hostname, err) + c.writer.WriteErrorFetch("system.detached_parts") + } +} From 97d046124db33a9355d916b6f2bf4523f634a676 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:21:06 +0500 Subject: [PATCH 135/233] dev: extract collector --- pkg/metrics/clickhouse/exporter.go | 164 +---------------------------- 1 file changed, 5 insertions(+), 159 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 53a5590fe..1b1db5cb4 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -175,165 +175,11 @@ func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *MetricsFetcher { // collectHostMetrics collects metrics from one host and writes them into chan func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, c chan<- prometheus.Metric) { - fetcher := e.newHostFetcher(host) - writer := NewCHIPrometheusWriter(c, chi, host) - - wg := sync.WaitGroup{} - wg.Add(6) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostSystemMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostSystemPartsMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostSystemReplicasMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostMutationsMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostSystemDisksMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - go func(ctx context.Context, host *metrics.WatchedHost, fetcher *MetricsFetcher, writer *CHIPrometheusWriter) { - e.collectHostDetachedPartsMetrics(ctx, host, fetcher, writer) - wg.Done() - }(ctx, host, fetcher, writer) - wg.Wait() -} - -func (e *Exporter) collectHostSystemMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying system metrics for host %s", host.Hostname) - start := time.Now() - metrics, err := fetcher.getClickHouseQueryMetrics(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d system metrics for host %s", elapsed, len(metrics), host.Hostname) - writer.WriteMetrics(metrics) - writer.WriteOKFetch("system.metrics") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.metrics for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("system.metrics") - } -} - -func (e *Exporter) collectHostSystemPartsMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying table sizes for host %s", host.Hostname) - start := time.Now() - systemPartsData, err := fetcher.getClickHouseSystemParts(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d table sizes for host %s", elapsed, len(systemPartsData), host.Hostname) - writer.WriteTableSizes(systemPartsData) - writer.WriteOKFetch("table sizes") - writer.WriteSystemParts(systemPartsData) - writer.WriteOKFetch("system parts") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.parts for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("table sizes") - writer.WriteErrorFetch("system parts") - } -} - -func (e *Exporter) collectHostSystemReplicasMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying system replicas for host %s", host.Hostname) - start := time.Now() - systemReplicas, err := fetcher.getClickHouseQuerySystemReplicas(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d system replicas for host %s", elapsed, len(systemReplicas), host.Hostname) - writer.WriteSystemReplicas(systemReplicas) - writer.WriteOKFetch("system.replicas") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.replicas for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("system.replicas") - } -} - -func (e *Exporter) collectHostMutationsMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying mutations for host %s", host.Hostname) - start := time.Now() - mutations, err := fetcher.getClickHouseQueryMutations(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d mutations for %s", elapsed, len(mutations), host.Hostname) - writer.WriteMutations(mutations) - writer.WriteOKFetch("system.mutations") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.mutations for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("system.mutations") - } -} - -func (e *Exporter) collectHostSystemDisksMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying disks for host %s", host.Hostname) - start := time.Now() - disks, err := fetcher.getClickHouseQuerySystemDisks(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d disks for host %s", elapsed, len(disks), host.Hostname) - writer.WriteSystemDisks(disks) - writer.WriteOKFetch("system.disks") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.disks for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("system.disks") - } -} - -func (e *Exporter) collectHostDetachedPartsMetrics( - ctx context.Context, - host *metrics.WatchedHost, - fetcher *MetricsFetcher, - writer *CHIPrometheusWriter, -) { - log.V(1).Infof("Querying detached parts for host %s", host.Hostname) - start := time.Now() - detachedParts, err := fetcher.getClickHouseQueryDetachedParts(ctx) - elapsed := time.Since(start) - if err == nil { - log.V(1).Infof("Extracted [%s] %d detached parts info for host %s", elapsed, len(detachedParts), host.Hostname) - writer.WriteDetachedParts(detachedParts) - writer.WriteOKFetch("system.detached_parts") - } else { - // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Warningf("Error [%s] querying system.detached_parts for host %s err: %s", elapsed, host.Hostname, err) - writer.WriteErrorFetch("system.detached_parts") - } + collector := NewCollector( + e.newHostFetcher(host), + NewCHIPrometheusWriter(c, chi, host), + ) + collector.CollectHostMetrics(ctx, host) } // getWatchedCHI serves HTTP request to get list of watched CHIs From 71ee092da87f6b171b4f4f04e42510bf746ad05c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:52:00 +0500 Subject: [PATCH 136/233] dev: extract registry as an entity --- pkg/metrics/clickhouse/registry.go | 87 ++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 pkg/metrics/clickhouse/registry.go diff --git a/pkg/metrics/clickhouse/registry.go b/pkg/metrics/clickhouse/registry.go new file mode 100644 index 000000000..469f1f072 --- /dev/null +++ b/pkg/metrics/clickhouse/registry.go @@ -0,0 +1,87 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clickhouse + +import ( + "sync" + + log "github.com/golang/glog" + + "github.com/altinity/clickhouse-operator/pkg/apis/metrics" +) + +// CRRegistry is a thread-safe storage for watched Custom Resources +type CRRegistry struct { + index crInstallationsIndex + mutex sync.RWMutex + toRemove sync.Map +} + +// NewCRRegistry creates a new CRRegistry instance +func NewCRRegistry() *CRRegistry { + return &CRRegistry{ + index: newCRInstallationsIndex(), + } +} + +// Add adds or updates a CR in the registry +func (r *CRRegistry) Add(cr *metrics.WatchedCR) { + r.mutex.Lock() + defer r.mutex.Unlock() + log.V(1).Infof("Registry: Add CR (%s/%s): %s", cr.Namespace, cr.Name, cr) + r.index.set(cr.IndexKey(), cr) +} + +// Remove removes a CR from the registry +func (r *CRRegistry) Remove(cr *metrics.WatchedCR) { + r.mutex.Lock() + defer r.mutex.Unlock() + log.V(1).Infof("Registry: Remove CR (%s/%s)", cr.Namespace, cr.Name) + r.index.remove(cr.IndexKey()) +} + +// EnqueueRemove enqueues a CR for removal (will be removed on next Cleanup) +func (r *CRRegistry) EnqueueRemove(cr *metrics.WatchedCR) { + r.toRemove.Store(cr, struct{}{}) +} + +// Cleanup processes all CRs enqueued for removal +func (r *CRRegistry) Cleanup() { + log.V(2).Info("Registry: Starting cleanup") + r.toRemove.Range(func(key, value interface{}) bool { + if cr, ok := key.(*metrics.WatchedCR); ok { + r.toRemove.Delete(key) + r.Remove(cr) + log.V(1).Infof("Registry: Cleaned up CR (%s/%s)", cr.Name, cr.Namespace) + } + return true + }) + log.V(2).Info("Registry: Completed cleanup") +} + +// List returns all watched CRs as a slice +func (r *CRRegistry) List() []*metrics.WatchedCR { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.index.slice() +} + +// Walk iterates over all hosts while holding an exclusive lock +// Use this when the iteration may modify state +func (r *CRRegistry) Walk(fn func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.index.walk(fn) +} From 2cc5fe35aa2aab492c26516e9169aa4279bd95c3 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:52:28 +0500 Subject: [PATCH 137/233] dev: exporter uses external registry --- cmd/metrics_exporter/app/metrics_exporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/metrics_exporter/app/metrics_exporter.go b/cmd/metrics_exporter/app/metrics_exporter.go index 800377974..fcc95527d 100644 --- a/cmd/metrics_exporter/app/metrics_exporter.go +++ b/cmd/metrics_exporter/app/metrics_exporter.go @@ -96,7 +96,7 @@ func Run() { chop.New(kubeClient, chopClient, chopConfigFile) log.Info(chop.Config().String(true)) - exporter := clickhouse.StartMetricsREST( + exporter, _ := clickhouse.StartMetricsREST( metricsEP, metricsPath, chop.Config().ClickHouse.Metrics.Timeouts.Collect, From 987c634f9270cbc7691ad57186b2c99122610004 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:52:53 +0500 Subject: [PATCH 138/233] dev: exporter and external registry --- pkg/metrics/clickhouse/exporter.go | 128 ++++------------------------- 1 file changed, 18 insertions(+), 110 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 1b1db5cb4..3f370018f 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -16,9 +16,6 @@ package clickhouse import ( "context" - "encoding/json" - "fmt" - "net/http" "sync" "time" @@ -42,34 +39,24 @@ import ( // Exporter implements prometheus.Collector interface type Exporter struct { collectorTimeout time.Duration - - // crInstallations is an index of watched CRs - crInstallations crInstallationsIndex - - mutex sync.RWMutex - toRemoveFromWatched sync.Map + registry *CRRegistry } // Type compatibility var _ prometheus.Collector = &Exporter{} // NewExporter returns a new instance of Exporter type -func NewExporter(collectorTimeout time.Duration) *Exporter { +func NewExporter(registry *CRRegistry, collectorTimeout time.Duration) *Exporter { return &Exporter{ - crInstallations: newCRInstallationsIndex(), + registry: registry, collectorTimeout: collectorTimeout, } } -// getWatchedCHIs -func (e *Exporter) getWatchedCHIs() []*metrics.WatchedCR { - return e.crInstallations.slice() -} - // Collect implements prometheus.Collector Collect method func (e *Exporter) Collect(ch chan<- prometheus.Metric) { // Run cleanup on each collect - e.cleanup() + e.registry.Cleanup() if ch == nil { log.Warning("Prometheus channel is closed. Unable to write metrics") @@ -87,19 +74,15 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { ctx, cancel := context.WithTimeout(context.Background(), e.collectorTimeout) defer cancel() - // This method may be called concurrently and must therefore be implemented in a concurrency safe way - e.mutex.Lock() - defer e.mutex.Unlock() - log.V(1).Infof("Launching host collectors [%s]", time.Since(start)) var wg = sync.WaitGroup{} - e.crInstallations.walk(func(chi *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) { + e.registry.Walk(func(cr *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) { wg.Add(1) - go func(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, ch chan<- prometheus.Metric) { + go func(ctx context.Context, cr *metrics.WatchedCR, host *metrics.WatchedHost, ch chan<- prometheus.Metric) { defer wg.Done() - e.collectHostMetrics(ctx, chi, host, ch) - }(ctx, chi, host, ch) + e.collectHostMetrics(ctx, cr, host, ch) + }(ctx, cr, host, ch) }) wg.Wait() } @@ -109,44 +92,16 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { prometheus.DescribeByCollect(e, ch) } -// enqueueToRemoveFromWatched -func (e *Exporter) enqueueToRemoveFromWatched(chi *metrics.WatchedCR) { - e.toRemoveFromWatched.Store(chi, struct{}{}) -} - -// cleanup cleans all pending for cleaning -func (e *Exporter) cleanup() { - // Clean up all pending for cleaning CHIs - log.V(2).Info("Starting cleanup") - e.toRemoveFromWatched.Range(func(key, value interface{}) bool { - switch key.(type) { - case *metrics.WatchedCR: - e.toRemoveFromWatched.Delete(key) - e.removeFromWatched(key.(*metrics.WatchedCR)) - log.V(1).Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*metrics.WatchedCR).Name, key.(*metrics.WatchedCR).Namespace) - } - return true - }) - log.V(2).Info("Completed cleanup") -} - -// removeFromWatched deletes record from watched index -func (e *Exporter) removeFromWatched(chi *metrics.WatchedCR) { - e.mutex.Lock() - defer e.mutex.Unlock() - log.V(1).Infof("Remove ClickHouseInstallation (%s/%s)", chi.Namespace, chi.Name) - e.crInstallations.remove(chi.IndexKey()) -} - -// updateWatched updates watched index -func (e *Exporter) updateWatched(chi *metrics.WatchedCR) { - e.mutex.Lock() - defer e.mutex.Unlock() - log.V(1).Infof("Update ClickHouseInstallation (%s/%s): %s", chi.Namespace, chi.Name, chi) - e.crInstallations.set(chi.IndexKey(), chi) +// collectHostMetrics collects metrics from one host and writes them into chan +func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, c chan<- prometheus.Metric) { + collector := NewCollector( + e.newHostFetcher(host), + NewCHIPrometheusWriter(c, chi, host), + ) + collector.CollectHostMetrics(ctx, host) } -// newFetcher returns new Metrics Fetcher for specified host +// newHostFetcher returns new Metrics Fetcher for specified host func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *MetricsFetcher { // Make base cluster connection params clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config()) @@ -173,53 +128,6 @@ func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *MetricsFetcher { ) } -// collectHostMetrics collects metrics from one host and writes them into chan -func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, c chan<- prometheus.Metric) { - collector := NewCollector( - e.newHostFetcher(host), - NewCHIPrometheusWriter(c, chi, host), - ) - collector.CollectHostMetrics(ctx, host) -} - -// getWatchedCHI serves HTTP request to get list of watched CHIs -func (e *Exporter) getWatchedCHI(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(e.getWatchedCHIs()) -} - -// fetchCHI decodes chi from the request -func (e *Exporter) fetchCHI(r *http.Request) (*metrics.WatchedCR, error) { - chi := &metrics.WatchedCR{} - if err := json.NewDecoder(r.Body).Decode(chi); err == nil { - if chi.IsValid() { - return chi, nil - } - } - - return nil, fmt.Errorf("unable to parse CHI from request") -} - -// updateWatchedCHI serves HTTP request to add CHI to the list of watched CHIs -func (e *Exporter) updateWatchedCHI(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - if chi, err := e.fetchCHI(r); err == nil { - e.updateWatched(chi) - } else { - http.Error(w, err.Error(), http.StatusNotAcceptable) - } -} - -// deleteWatchedCHI serves HTTP request to delete CHI from the list of watched CHIs -func (e *Exporter) deleteWatchedCHI(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - if chi, err := e.fetchCHI(r); err == nil { - e.enqueueToRemoveFromWatched(chi) - } else { - http.Error(w, err.Error(), http.StatusNotAcceptable) - } -} - // DiscoveryWatchedCHIs discovers all ClickHouseInstallation objects available for monitoring and adds them to watched list func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *chopAPI.Clientset) { // Get all CHI objects from watched namespace(s) @@ -252,8 +160,8 @@ func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.Click normalized, _ := normalizer.CreateTemplated(chi, normalizerCommon.NewOptions[api.ClickHouseInstallation]()) - watchedCHI := metrics.NewWatchedCR(normalized) - e.updateWatched(watchedCHI) + watchedCR := metrics.NewWatchedCR(normalized) + e.registry.Add(watchedCR) } func (e *Exporter) shouldWatchCR(chi *api.ClickHouseInstallation) bool { From 1e317f5cec7b328c9ccc2b7cada5da1cddeef889 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:53:04 +0500 Subject: [PATCH 139/233] dev: server --- pkg/metrics/clickhouse/rest_server.go | 111 ++++++++++++++++++++------ 1 file changed, 85 insertions(+), 26 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go index 12027507e..be010c422 100644 --- a/pkg/metrics/clickhouse/rest_server.go +++ b/pkg/metrics/clickhouse/rest_server.go @@ -15,56 +15,115 @@ package clickhouse import ( + "encoding/json" "fmt" "net/http" "time" log "github.com/golang/glog" - // log "k8s.io/klog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/altinity/clickhouse-operator/pkg/apis/metrics" ) -// StartMetricsREST start Prometheus metrics exporter in background +// RESTServer provides HTTP API for managing watched CRs +type RESTServer struct { + registry *CRRegistry +} + +// NewRESTServer creates a new RESTServer instance +func NewRESTServer(registry *CRRegistry) *RESTServer { + return &RESTServer{ + registry: registry, + } +} + +// ServeHTTP implements http.Handler interface +func (s *RESTServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/chi" { + http.Error(w, "404 not found.", http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodGet: + s.handleGet(w, r) + case http.MethodPost: + s.handlePost(w, r) + case http.MethodDelete: + s.handleDelete(w, r) + default: + _, _ = fmt.Fprintf(w, "Sorry, only GET, POST and DELETE methods are supported.") + } +} + +// handleGet serves HTTP GET request to get list of watched CRs +func (s *RESTServer) handleGet(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(s.registry.List()) +} + +// handlePost serves HTTP POST request to add CR to the list of watched CRs +func (s *RESTServer) handlePost(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + if cr, err := s.decodeCR(r); err == nil { + s.registry.Add(cr) + } else { + http.Error(w, err.Error(), http.StatusNotAcceptable) + } +} + +// handleDelete serves HTTP DELETE request to delete CR from the list of watched CRs +func (s *RESTServer) handleDelete(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + if cr, err := s.decodeCR(r); err == nil { + s.registry.EnqueueRemove(cr) + } else { + http.Error(w, err.Error(), http.StatusNotAcceptable) + } +} + +// decodeCR decodes CR from the HTTP request body +func (s *RESTServer) decodeCR(r *http.Request) (*metrics.WatchedCR, error) { + cr := &metrics.WatchedCR{} + if err := json.NewDecoder(r.Body).Decode(cr); err == nil { + if cr.IsValid() { + return cr, nil + } + } + return nil, fmt.Errorf("unable to parse CR from request") +} + +// StartMetricsREST starts Prometheus metrics exporter and REST API server func StartMetricsREST( metricsAddress string, metricsPath string, collectorTimeout time.Duration, - chiListAddress string, chiListPath string, -) *Exporter { +) (*Exporter, *CRRegistry) { log.V(1).Infof("Starting metrics exporter at '%s%s'\n", metricsAddress, metricsPath) - exporter := NewExporter(collectorTimeout) + // Create shared registry + registry := NewCRRegistry() + + // Create and register Prometheus exporter + exporter := NewExporter(registry, collectorTimeout) prometheus.MustRegister(exporter) + // Create REST server + restServer := NewRESTServer(registry) + + // Setup HTTP handlers http.Handle(metricsPath, promhttp.Handler()) - http.Handle(chiListPath, exporter) + http.Handle(chiListPath, restServer) + // Start HTTP servers go http.ListenAndServe(metricsAddress, nil) if metricsAddress != chiListAddress { go http.ListenAndServe(chiListAddress, nil) } - return exporter -} - -// ServeHTTP is an interface method to serve HTTP requests -func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/chi" { - http.Error(w, "404 not found.", http.StatusNotFound) - return - } - - switch r.Method { - case "GET": - e.getWatchedCHI(w, r) - case "POST": - e.updateWatchedCHI(w, r) - case "DELETE": - e.deleteWatchedCHI(w, r) - default: - _, _ = fmt.Fprintf(w, "Sorry, only GET, POST and DELETE methods are supported.") - } + return exporter, registry } From f07b0462f8ed673f665f20f4221fa1eeca4fa07c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 22:56:18 +0500 Subject: [PATCH 140/233] dev: simplify server --- cmd/metrics_exporter/app/metrics_exporter.go | 2 +- pkg/metrics/clickhouse/rest_server.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/metrics_exporter/app/metrics_exporter.go b/cmd/metrics_exporter/app/metrics_exporter.go index fcc95527d..800377974 100644 --- a/cmd/metrics_exporter/app/metrics_exporter.go +++ b/cmd/metrics_exporter/app/metrics_exporter.go @@ -96,7 +96,7 @@ func Run() { chop.New(kubeClient, chopClient, chopConfigFile) log.Info(chop.Config().String(true)) - exporter, _ := clickhouse.StartMetricsREST( + exporter := clickhouse.StartMetricsREST( metricsEP, metricsPath, chop.Config().ClickHouse.Metrics.Timeouts.Collect, diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go index be010c422..80d07def4 100644 --- a/pkg/metrics/clickhouse/rest_server.go +++ b/pkg/metrics/clickhouse/rest_server.go @@ -102,7 +102,7 @@ func StartMetricsREST( collectorTimeout time.Duration, chiListAddress string, chiListPath string, -) (*Exporter, *CRRegistry) { +) *Exporter { log.V(1).Infof("Starting metrics exporter at '%s%s'\n", metricsAddress, metricsPath) // Create shared registry @@ -125,5 +125,5 @@ func StartMetricsREST( go http.ListenAndServe(chiListAddress, nil) } - return exporter, registry + return exporter } From 51ba2491a11834f2f837266b441b58c1f0663d7f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 7 Feb 2026 23:50:28 +0500 Subject: [PATCH 141/233] dev: introduce rest server --- pkg/metrics/clickhouse/rest_server.go | 91 ++++++++++++++++++++++----- 1 file changed, 75 insertions(+), 16 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go index 80d07def4..b4f8545c7 100644 --- a/pkg/metrics/clickhouse/rest_server.go +++ b/pkg/metrics/clickhouse/rest_server.go @@ -27,7 +27,33 @@ import ( "github.com/altinity/clickhouse-operator/pkg/apis/metrics" ) -// RESTServer provides HTTP API for managing watched CRs +// Request type constants +const ( + RequestTypeCR = "cr" + RequestTypeHost = "host" +) + +// RESTRequest wraps different request types for POST/DELETE operations +type RESTRequest struct { + Type string `json:"type"` // "cr" or "host" + CR *metrics.WatchedCR `json:"cr,omitempty"` + Host *HostRequest `json:"host,omitempty"` +} + +// HostRequest contains host details with parent context +type HostRequest struct { + CRNamespace string `json:"crNamespace"` + CRName string `json:"crName"` + ClusterName string `json:"clusterName"` + Host *metrics.WatchedHost `json:"host"` +} + +// IsValid checks if HostRequest has all required fields +func (r *HostRequest) IsValid() bool { + return r.CRNamespace != "" && r.CRName != "" && r.ClusterName != "" && r.Host != nil && r.Host.Hostname != "" +} + +// RESTServer provides HTTP API for managing watched CRs and Hosts type RESTServer struct { registry *CRRegistry } @@ -64,35 +90,68 @@ func (s *RESTServer) handleGet(w http.ResponseWriter, r *http.Request) { _ = json.NewEncoder(w).Encode(s.registry.List()) } -// handlePost serves HTTP POST request to add CR to the list of watched CRs +// handlePost serves HTTP POST request to add CR or Host func (s *RESTServer) handlePost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - if cr, err := s.decodeCR(r); err == nil { - s.registry.Add(cr) - } else { + req, err := s.decodeRequest(r) + if err != nil { http.Error(w, err.Error(), http.StatusNotAcceptable) + return + } + + switch req.Type { + case RequestTypeCR: + s.registry.AddCR(req.CR) + case RequestTypeHost: + if err := s.registry.AddHost(req.Host); err != nil { + http.Error(w, err.Error(), http.StatusNotAcceptable) + return + } + default: + http.Error(w, fmt.Sprintf("unknown request type: %s", req.Type), http.StatusNotAcceptable) } } -// handleDelete serves HTTP DELETE request to delete CR from the list of watched CRs +// handleDelete serves HTTP DELETE request to delete CR or Host func (s *RESTServer) handleDelete(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - if cr, err := s.decodeCR(r); err == nil { - s.registry.EnqueueRemove(cr) - } else { + req, err := s.decodeRequest(r) + if err != nil { http.Error(w, err.Error(), http.StatusNotAcceptable) + return + } + + switch req.Type { + case RequestTypeCR: + s.registry.EnqueueRemoveCR(req.CR) + case RequestTypeHost: + s.registry.EnqueueRemoveHost(req.Host) + default: + http.Error(w, fmt.Sprintf("unknown request type: %s", req.Type), http.StatusNotAcceptable) } } -// decodeCR decodes CR from the HTTP request body -func (s *RESTServer) decodeCR(r *http.Request) (*metrics.WatchedCR, error) { - cr := &metrics.WatchedCR{} - if err := json.NewDecoder(r.Body).Decode(cr); err == nil { - if cr.IsValid() { - return cr, nil +// decodeRequest decodes RESTRequest from the HTTP request body +func (s *RESTServer) decodeRequest(r *http.Request) (*RESTRequest, error) { + req := &RESTRequest{} + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + return nil, fmt.Errorf("unable to parse request: %w", err) + } + + switch req.Type { + case RequestTypeCR: + if req.CR == nil || !req.CR.IsValid() { + return nil, fmt.Errorf("invalid CR in request") + } + case RequestTypeHost: + if req.Host == nil || !req.Host.IsValid() { + return nil, fmt.Errorf("invalid Host in request") } + default: + return nil, fmt.Errorf("unknown request type: %s", req.Type) } - return nil, fmt.Errorf("unable to parse CR from request") + + return req, nil } // StartMetricsREST starts Prometheus metrics exporter and REST API server From bc3d24749c573a5eb0252a7e378c3e3a1a72969a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 00:44:12 +0500 Subject: [PATCH 142/233] dev: registry --- pkg/metrics/clickhouse/registry.go | 119 ++++++++++++++++++++++++++--- 1 file changed, 107 insertions(+), 12 deletions(-) diff --git a/pkg/metrics/clickhouse/registry.go b/pkg/metrics/clickhouse/registry.go index 469f1f072..70739c5d3 100644 --- a/pkg/metrics/clickhouse/registry.go +++ b/pkg/metrics/clickhouse/registry.go @@ -15,6 +15,7 @@ package clickhouse import ( + "fmt" "sync" log "github.com/golang/glog" @@ -22,6 +23,21 @@ import ( "github.com/altinity/clickhouse-operator/pkg/apis/metrics" ) +// removeType identifies what type of entity to remove +type removeType int + +const ( + removeTypeCR removeType = iota + removeTypeHost +) + +// removeRequest represents a pending removal request +type removeRequest struct { + removeType removeType + cr *metrics.WatchedCR + host *HostRequest +} + // CRRegistry is a thread-safe storage for watched Custom Resources type CRRegistry struct { index crInstallationsIndex @@ -36,35 +52,114 @@ func NewCRRegistry() *CRRegistry { } } -// Add adds or updates a CR in the registry -func (r *CRRegistry) Add(cr *metrics.WatchedCR) { +// AddCR adds or updates a CR in the registry +func (r *CRRegistry) AddCR(cr *metrics.WatchedCR) { r.mutex.Lock() defer r.mutex.Unlock() log.V(1).Infof("Registry: Add CR (%s/%s): %s", cr.Namespace, cr.Name, cr) r.index.set(cr.IndexKey(), cr) } -// Remove removes a CR from the registry -func (r *CRRegistry) Remove(cr *metrics.WatchedCR) { +// AddHost adds a host to an existing CR in the registry +func (r *CRRegistry) AddHost(req *HostRequest) error { + r.mutex.Lock() + defer r.mutex.Unlock() + + crKey := (&metrics.WatchedCR{Namespace: req.CRNamespace, Name: req.CRName}).IndexKey() + cr, ok := r.index.get(crKey) + if !ok || cr == nil { + return fmt.Errorf("CR not found: %s", crKey) + } + + // Find or create cluster + var cluster *metrics.WatchedCluster + for _, c := range cr.Clusters { + if c.Name == req.ClusterName { + cluster = c + break + } + } + if cluster == nil { + cluster = &metrics.WatchedCluster{Name: req.ClusterName} + cr.Clusters = append(cr.Clusters, cluster) + } + + // Add or update host + found := false + for i, h := range cluster.Hosts { + if h.Hostname == req.Host.Hostname { + cluster.Hosts[i] = req.Host + found = true + break + } + } + if !found { + cluster.Hosts = append(cluster.Hosts, req.Host) + } + + log.V(1).Infof("Registry: Add Host %s to CR (%s/%s) cluster %s", req.Host.Hostname, req.CRNamespace, req.CRName, req.ClusterName) + return nil +} + +// RemoveCR removes a CR from the registry +func (r *CRRegistry) RemoveCR(cr *metrics.WatchedCR) { r.mutex.Lock() defer r.mutex.Unlock() log.V(1).Infof("Registry: Remove CR (%s/%s)", cr.Namespace, cr.Name) r.index.remove(cr.IndexKey()) } -// EnqueueRemove enqueues a CR for removal (will be removed on next Cleanup) -func (r *CRRegistry) EnqueueRemove(cr *metrics.WatchedCR) { - r.toRemove.Store(cr, struct{}{}) +// RemoveHost removes a host from a CR in the registry +func (r *CRRegistry) RemoveHost(req *HostRequest) { + r.mutex.Lock() + defer r.mutex.Unlock() + + crKey := (&metrics.WatchedCR{Namespace: req.CRNamespace, Name: req.CRName}).IndexKey() + cr, ok := r.index.get(crKey) + if !ok || cr == nil { + log.V(1).Infof("Registry: Cannot remove host, CR not found: %s", crKey) + return + } + + for _, cluster := range cr.Clusters { + if cluster.Name == req.ClusterName { + for i, h := range cluster.Hosts { + if h.Hostname == req.Host.Hostname { + cluster.Hosts = append(cluster.Hosts[:i], cluster.Hosts[i+1:]...) + log.V(1).Infof("Registry: Remove Host %s from CR (%s/%s) cluster %s", req.Host.Hostname, req.CRNamespace, req.CRName, req.ClusterName) + return + } + } + } + } +} + +// EnqueueRemoveCR enqueues a CR for removal (will be removed on next Cleanup) +func (r *CRRegistry) EnqueueRemoveCR(cr *metrics.WatchedCR) { + req := &removeRequest{removeType: removeTypeCR, cr: cr} + r.toRemove.Store(req, struct{}{}) +} + +// EnqueueRemoveHost enqueues a host for removal (will be removed on next Cleanup) +func (r *CRRegistry) EnqueueRemoveHost(host *HostRequest) { + req := &removeRequest{removeType: removeTypeHost, host: host} + r.toRemove.Store(req, struct{}{}) } -// Cleanup processes all CRs enqueued for removal +// Cleanup processes all CRs and hosts enqueued for removal func (r *CRRegistry) Cleanup() { log.V(2).Info("Registry: Starting cleanup") r.toRemove.Range(func(key, value interface{}) bool { - if cr, ok := key.(*metrics.WatchedCR); ok { - r.toRemove.Delete(key) - r.Remove(cr) - log.V(1).Infof("Registry: Cleaned up CR (%s/%s)", cr.Name, cr.Namespace) + r.toRemove.Delete(key) + if req, ok := key.(*removeRequest); ok { + switch req.removeType { + case removeTypeCR: + r.RemoveCR(req.cr) + log.V(1).Infof("Registry: Cleaned up CR (%s/%s)", req.cr.Namespace, req.cr.Name) + case removeTypeHost: + r.RemoveHost(req.host) + log.V(1).Infof("Registry: Cleaned up Host %s from CR (%s/%s)", req.host.Host.Hostname, req.host.CRNamespace, req.host.CRName) + } } return true }) From 42edbf4e3991d6e87842c475abbfbef1065ecb23 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 00:45:00 +0500 Subject: [PATCH 143/233] dev: formatter --- pkg/metrics/clickhouse/rest_server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go index b4f8545c7..4dbc709d7 100644 --- a/pkg/metrics/clickhouse/rest_server.go +++ b/pkg/metrics/clickhouse/rest_server.go @@ -35,9 +35,9 @@ const ( // RESTRequest wraps different request types for POST/DELETE operations type RESTRequest struct { - Type string `json:"type"` // "cr" or "host" - CR *metrics.WatchedCR `json:"cr,omitempty"` - Host *HostRequest `json:"host,omitempty"` + Type string `json:"type"` // "cr" or "host" + CR *metrics.WatchedCR `json:"cr,omitempty"` + Host *HostRequest `json:"host,omitempty"` } // HostRequest contains host details with parent context From 248109e44fe5994b31d069a4a1150577c10b7958 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 00:53:51 +0500 Subject: [PATCH 144/233] dev: simplify remove operations --- pkg/metrics/clickhouse/registry.go | 53 ++---------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/pkg/metrics/clickhouse/registry.go b/pkg/metrics/clickhouse/registry.go index 70739c5d3..ef7e69257 100644 --- a/pkg/metrics/clickhouse/registry.go +++ b/pkg/metrics/clickhouse/registry.go @@ -23,26 +23,10 @@ import ( "github.com/altinity/clickhouse-operator/pkg/apis/metrics" ) -// removeType identifies what type of entity to remove -type removeType int - -const ( - removeTypeCR removeType = iota - removeTypeHost -) - -// removeRequest represents a pending removal request -type removeRequest struct { - removeType removeType - cr *metrics.WatchedCR - host *HostRequest -} - // CRRegistry is a thread-safe storage for watched Custom Resources type CRRegistry struct { - index crInstallationsIndex - mutex sync.RWMutex - toRemove sync.Map + index crInstallationsIndex + mutex sync.RWMutex } // NewCRRegistry creates a new CRRegistry instance @@ -134,38 +118,6 @@ func (r *CRRegistry) RemoveHost(req *HostRequest) { } } -// EnqueueRemoveCR enqueues a CR for removal (will be removed on next Cleanup) -func (r *CRRegistry) EnqueueRemoveCR(cr *metrics.WatchedCR) { - req := &removeRequest{removeType: removeTypeCR, cr: cr} - r.toRemove.Store(req, struct{}{}) -} - -// EnqueueRemoveHost enqueues a host for removal (will be removed on next Cleanup) -func (r *CRRegistry) EnqueueRemoveHost(host *HostRequest) { - req := &removeRequest{removeType: removeTypeHost, host: host} - r.toRemove.Store(req, struct{}{}) -} - -// Cleanup processes all CRs and hosts enqueued for removal -func (r *CRRegistry) Cleanup() { - log.V(2).Info("Registry: Starting cleanup") - r.toRemove.Range(func(key, value interface{}) bool { - r.toRemove.Delete(key) - if req, ok := key.(*removeRequest); ok { - switch req.removeType { - case removeTypeCR: - r.RemoveCR(req.cr) - log.V(1).Infof("Registry: Cleaned up CR (%s/%s)", req.cr.Namespace, req.cr.Name) - case removeTypeHost: - r.RemoveHost(req.host) - log.V(1).Infof("Registry: Cleaned up Host %s from CR (%s/%s)", req.host.Host.Hostname, req.host.CRNamespace, req.host.CRName) - } - } - return true - }) - log.V(2).Info("Registry: Completed cleanup") -} - // List returns all watched CRs as a slice func (r *CRRegistry) List() []*metrics.WatchedCR { r.mutex.RLock() @@ -174,7 +126,6 @@ func (r *CRRegistry) List() []*metrics.WatchedCR { } // Walk iterates over all hosts while holding an exclusive lock -// Use this when the iteration may modify state func (r *CRRegistry) Walk(fn func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) { r.mutex.Lock() defer r.mutex.Unlock() From 31566636176745b8831d80de903c1b84f13029da Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 00:54:15 +0500 Subject: [PATCH 145/233] dev: switch server to new remove commands --- pkg/metrics/clickhouse/rest_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go index 4dbc709d7..8fe7be886 100644 --- a/pkg/metrics/clickhouse/rest_server.go +++ b/pkg/metrics/clickhouse/rest_server.go @@ -123,9 +123,9 @@ func (s *RESTServer) handleDelete(w http.ResponseWriter, r *http.Request) { switch req.Type { case RequestTypeCR: - s.registry.EnqueueRemoveCR(req.CR) + s.registry.RemoveCR(req.CR) case RequestTypeHost: - s.registry.EnqueueRemoveHost(req.Host) + s.registry.RemoveHost(req.Host) default: http.Error(w, fmt.Sprintf("unknown request type: %s", req.Type), http.StatusNotAcceptable) } From 5b89b9677444e8216bbad640797d3db04740f071 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 00:55:28 +0500 Subject: [PATCH 146/233] dev: use exporter --- pkg/metrics/clickhouse/exporter.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go index 3f370018f..2e2196dd1 100644 --- a/pkg/metrics/clickhouse/exporter.go +++ b/pkg/metrics/clickhouse/exporter.go @@ -55,9 +55,6 @@ func NewExporter(registry *CRRegistry, collectorTimeout time.Duration) *Exporter // Collect implements prometheus.Collector Collect method func (e *Exporter) Collect(ch chan<- prometheus.Metric) { - // Run cleanup on each collect - e.registry.Cleanup() - if ch == nil { log.Warning("Prometheus channel is closed. Unable to write metrics") return @@ -161,7 +158,7 @@ func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.Click normalized, _ := normalizer.CreateTemplated(chi, normalizerCommon.NewOptions[api.ClickHouseInstallation]()) watchedCR := metrics.NewWatchedCR(normalized) - e.registry.Add(watchedCR) + e.registry.AddCR(watchedCR) } func (e *Exporter) shouldWatchCR(chi *api.ClickHouseInstallation) bool { From c1c7c0f594370283e67b680d2053268ad887f41c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 01:04:16 +0500 Subject: [PATCH 147/233] dev: switch machinery to new protocol --- pkg/metrics/clickhouse/rest_machinery.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_machinery.go b/pkg/metrics/clickhouse/rest_machinery.go index 767a9ce7e..3fb59e1ca 100644 --- a/pkg/metrics/clickhouse/rest_machinery.go +++ b/pkg/metrics/clickhouse/rest_machinery.go @@ -18,25 +18,23 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/altinity/clickhouse-operator/pkg/apis/metrics" "io" "net/http" ) -func makeRESTCall(chi *metrics.WatchedCR, method string) error { +func makeRESTCall(restReq *RESTRequest, method string) error { url := "http://127.0.0.1:8888/chi" - json, err := json.Marshal(chi) + payload, err := json.Marshal(restReq) if err != nil { return err } - req, err := http.NewRequest(method, url, bytes.NewBuffer(json)) + httpReq, err := http.NewRequest(method, url, bytes.NewBuffer(payload)) if err != nil { return err } - //req.SetBasicAuth(s.Username, s.Password) - _, err = doRequest(req) + _, err = doRequest(httpReq) return err } From 293d4e1de35592ba6d324408ea5caf54fa741640 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 01:05:01 +0500 Subject: [PATCH 148/233] dev: adjust client ccommands according to the new api --- pkg/metrics/clickhouse/rest_client.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_client.go b/pkg/metrics/clickhouse/rest_client.go index 55510d3a7..e6d194100 100644 --- a/pkg/metrics/clickhouse/rest_client.go +++ b/pkg/metrics/clickhouse/rest_client.go @@ -18,10 +18,20 @@ import "github.com/altinity/clickhouse-operator/pkg/apis/metrics" // InformMetricsExporterAboutWatchedCHI informs exporter about new watched CHI func InformMetricsExporterAboutWatchedCHI(chi *metrics.WatchedCR) error { - return makeRESTCall(chi, "POST") + return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, "POST") } // InformMetricsExporterToDeleteWatchedCHI informs exporter to delete/forget watched CHI func InformMetricsExporterToDeleteWatchedCHI(chi *metrics.WatchedCR) error { - return makeRESTCall(chi, "DELETE") + return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, "DELETE") +} + +// InformMetricsExporterAboutWatchedHost informs exporter about new watched host +func InformMetricsExporterAboutWatchedHost(host *HostRequest) error { + return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, "POST") +} + +// InformMetricsExporterToDeleteWatchedHost informs exporter to delete/forget watched host +func InformMetricsExporterToDeleteWatchedHost(host *HostRequest) error { + return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, "DELETE") } From d536d5514baa199e1d55d6d55fe5eab877f2120d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 01:06:01 +0500 Subject: [PATCH 149/233] dev: switch to http const --- pkg/metrics/clickhouse/rest_client.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/metrics/clickhouse/rest_client.go b/pkg/metrics/clickhouse/rest_client.go index e6d194100..87c467d2c 100644 --- a/pkg/metrics/clickhouse/rest_client.go +++ b/pkg/metrics/clickhouse/rest_client.go @@ -14,24 +14,28 @@ package clickhouse -import "github.com/altinity/clickhouse-operator/pkg/apis/metrics" +import ( + "net/http" + + "github.com/altinity/clickhouse-operator/pkg/apis/metrics" +) // InformMetricsExporterAboutWatchedCHI informs exporter about new watched CHI func InformMetricsExporterAboutWatchedCHI(chi *metrics.WatchedCR) error { - return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, "POST") + return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, http.MethodPost) } // InformMetricsExporterToDeleteWatchedCHI informs exporter to delete/forget watched CHI func InformMetricsExporterToDeleteWatchedCHI(chi *metrics.WatchedCR) error { - return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, "DELETE") + return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, http.MethodDelete) } // InformMetricsExporterAboutWatchedHost informs exporter about new watched host func InformMetricsExporterAboutWatchedHost(host *HostRequest) error { - return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, "POST") + return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, http.MethodPost) } // InformMetricsExporterToDeleteWatchedHost informs exporter to delete/forget watched host func InformMetricsExporterToDeleteWatchedHost(host *HostRequest) error { - return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, "DELETE") + return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, http.MethodDelete) } From 6a73f654d7c64c169ead77538531cc4a03f0a8d4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 13:31:13 +0500 Subject: [PATCH 150/233] dev: reclaim - introduce labels for new pvc --- pkg/model/common/tags/labeler/labels.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/model/common/tags/labeler/labels.go b/pkg/model/common/tags/labeler/labels.go index a5f48383c..9ecb72a24 100644 --- a/pkg/model/common/tags/labeler/labels.go +++ b/pkg/model/common/tags/labeler/labels.go @@ -105,15 +105,17 @@ func (l *Labeler) _labelExistingPV(pv *core.PersistentVolume, host *api.Host) ma func (l *Labeler) labelNewPVC(params ...any) map[string]string { var host *api.Host - if len(params) > 0 { + var template *api.VolumeClaimTemplate + if len(params) > 1 { host = params[0].(*api.Host) - return l._labelNewPVC(host) + template = params[1].(*api.VolumeClaimTemplate) + return l._labelNewPVC(host, template) } panic("not enough params for labeler") } -func (l *Labeler) _labelNewPVC(host *api.Host) map[string]string { - return l.GetHostScope(host, false) +func (l *Labeler) _labelNewPVC(host *api.Host, template *api.VolumeClaimTemplate) map[string]string { + return l.getHostScopeReclaimPolicy(host, template, false) } func (l *Labeler) labelExistingPVC(params ...any) map[string]string { From 13af73c9eba53f073313e6f991d674ba99114f19 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 13:34:28 +0500 Subject: [PATCH 151/233] dev: reclaim - rewrite PVC with reclaim labels --- pkg/model/common/creator/pvc.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/model/common/creator/pvc.go b/pkg/model/common/creator/pvc.go index 71808b663..43a104fe6 100644 --- a/pkg/model/common/creator/pvc.go +++ b/pkg/model/common/creator/pvc.go @@ -28,6 +28,7 @@ func (c *Creator) CreatePVC( namespace string, host *api.Host, spec *core.PersistentVolumeClaimSpec, + template *api.VolumeClaimTemplate, ) *core.PersistentVolumeClaim { persistentVolumeClaim := core.PersistentVolumeClaim{ TypeMeta: meta.TypeMeta{ @@ -43,7 +44,7 @@ func (c *Creator) CreatePVC( // we are close to proper disk inheritance // Right now we hit the following error: // "Forbidden: updates to StatefulSet spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden" - Labels: c.macro.Scope(host).Map(c.tagger.Label(interfaces.LabelNewPVC, host, false)), + Labels: c.macro.Scope(host).Map(c.tagger.Label(interfaces.LabelNewPVC, host, template)), Annotations: c.macro.Scope(host).Map(c.tagger.Annotate(interfaces.AnnotateNewPVC, host)), // Incompatible with PV retain policy // Fails PV retain policy test (19) From bb1853b27585145328fca706f97f9a10ae286347 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 13:37:43 +0500 Subject: [PATCH 152/233] dev: reclaim - common interface --- pkg/interfaces/interfaces-main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/interfaces/interfaces-main.go b/pkg/interfaces/interfaces-main.go index b99828d62..4af799936 100644 --- a/pkg/interfaces/interfaces-main.go +++ b/pkg/interfaces/interfaces-main.go @@ -98,6 +98,7 @@ type ICreator interface { namespace string, host *api.Host, spec *core.PersistentVolumeClaimSpec, + template *api.VolumeClaimTemplate, ) *core.PersistentVolumeClaim TagPVC( pvc *core.PersistentVolumeClaim, From 5a4f5f2c380fe9bf2dd943a3ad02e644234ccfc8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 13:39:28 +0500 Subject: [PATCH 153/233] dev: reclaim - call PVC creators with additional templates --- pkg/controller/common/storage/storage-reconciler.go | 7 ++++--- pkg/model/common/creator/stateful-set-storage.go | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/controller/common/storage/storage-reconciler.go b/pkg/controller/common/storage/storage-reconciler.go index 51455189e..23987beec 100644 --- a/pkg/controller/common/storage/storage-reconciler.go +++ b/pkg/controller/common/storage/storage-reconciler.go @@ -127,7 +127,7 @@ func (w *Reconciler) reconcilePVCFromVolumeMount( pvcName := w.namer.Name(interfaces.NamePVCNameByVolumeClaimTemplate, host, volumeClaimTemplate) // Which PVC are we going to reconcile - pvc, chopCreated, err := w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec) + pvc, chopCreated, err := w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec, volumeClaimTemplate) if err != nil { // Unable to fetch or create PVC correctly. return nil @@ -156,7 +156,7 @@ func (w *Reconciler) reconcilePVCFromVolumeMount( // Refresh PVC model. Since PVC is just deleted refreshed model may not be fetched from the k8s, // but can be provided by the operator still - pvc, _, _ = w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec) + pvc, _, _ = w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec, volumeClaimTemplate) reconcileError = ErrPVCWithLostPVDeleted } @@ -189,6 +189,7 @@ func (w *Reconciler) fetchOrCreatePVC( volumeMountName string, operatorInCharge bool, pvcSpec *core.PersistentVolumeClaimSpec, + template *api.VolumeClaimTemplate, ) ( pvc *core.PersistentVolumeClaim, created bool, @@ -220,7 +221,7 @@ func (w *Reconciler) fetchOrCreatePVC( "PVC (%s/%s/%s/%s) model provided by the operator", namespace, host.GetName(), volumeMountName, name, ) - pvc = w.task.Creator().CreatePVC(name, namespace, host, pvcSpec) + pvc = w.task.Creator().CreatePVC(name, namespace, host, pvcSpec, template) return pvc, true, nil } else { // PVC is not available and the operator is not in charge of the PVC diff --git a/pkg/model/common/creator/stateful-set-storage.go b/pkg/model/common/creator/stateful-set-storage.go index e203e8d1c..f58c5ffa8 100644 --- a/pkg/model/common/creator/stateful-set-storage.go +++ b/pkg/model/common/creator/stateful-set-storage.go @@ -127,7 +127,7 @@ func (c *Creator) stsSetupVolumeForPVCTemplate( k8s.StatefulSetAppendVolumes(statefulSet, volume) } else { // For templates we should not specify namespace where PVC would be located - pvc := *c.CreatePVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec) + pvc := *c.CreatePVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec, volumeClaimTemplate) k8s.StatefulSetAppendPersistentVolumeClaims(statefulSet, pvc) } } From 409056287e6b3cb11b820f0cb2a9bcad76b9f7d7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 15:49:54 +0500 Subject: [PATCH 154/233] dev: clarify excluder --- pkg/controller/chi/worker-monitoring.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index 523e25e72..aaf439552 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -21,13 +21,18 @@ import ( // excludeFromMonitoring excludes stopped CR from monitoring func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { + // Important + // Exclude from monitoring STOPpe CR + // Running CR is not touched + if !cr.IsStopped() { - // No need to exclude non-stopped CR + // CR is NOT stopped, it is running + // No need to exclude running CR return } - // CR is stopped, let's exclude it from monitoring - // because it makes no sense to send SQL requests to stopped instances + // CR is stopped + // Exclude it from monitoring cause it makes no sense to send SQL requests to stopped instances w.a.V(1). WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). From b4009fd7845f5e5fe83ff77eff376301a8a2c51e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 15:52:17 +0500 Subject: [PATCH 155/233] dev: clarify includer --- pkg/controller/chi/worker-monitoring.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index aaf439552..9423c471b 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -22,7 +22,7 @@ import ( // excludeFromMonitoring excludes stopped CR from monitoring func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { // Important - // Exclude from monitoring STOPpe CR + // Exclude from monitoring STOP-ped CR // Running CR is not touched if !cr.IsStopped() { @@ -33,6 +33,7 @@ func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { // CR is stopped // Exclude it from monitoring cause it makes no sense to send SQL requests to stopped instances + w.a.V(1). WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). @@ -43,11 +44,18 @@ func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { // addToMonitoring adds CR to monitoring func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) { + // Important + // Include into monitoring RUN-ning CR + // Stopped CR is not touched + if cr.IsStopped() { // No need to add stopped CR return } + // CR is running + // Include it into monitoring + w.a.V(1). WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). @@ -55,3 +63,6 @@ func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) { Info("add CR to monitoring") w.c.updateWatch(cr) } + + + From 33df2c308593c0fb6782767481425775cade52e7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:02:02 +0500 Subject: [PATCH 156/233] dev: minor --- pkg/controller/chi/worker-monitoring.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index 9423c471b..90f30b0c8 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -63,6 +63,3 @@ func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) { Info("add CR to monitoring") w.c.updateWatch(cr) } - - - From 05b91dd5f581af7d96920ee6b7674f7b9f9b7619 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:29:31 +0500 Subject: [PATCH 157/233] dev: enhanse monitoring starter to allocate watch and ensure watch is available --- pkg/controller/chi/worker-monitoring.go | 38 ++++++++++++++++--------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index 90f30b0c8..d6823678c 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -25,21 +25,33 @@ func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { // Exclude from monitoring STOP-ped CR // Running CR is not touched - if !cr.IsStopped() { - // CR is NOT stopped, it is running - // No need to exclude running CR - return - } + if cr.IsStopped() { + // CR is stopped + // Exclude it from monitoring cause it makes no sense to send SQL requests to stopped instances - // CR is stopped - // Exclude it from monitoring cause it makes no sense to send SQL requests to stopped instances + w.a.V(1). + WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). + WithAction(cr). + M(cr).F(). + Info("exclude CR from monitoring") + w.c.deleteWatch(cr) + } else { + // CR is NOT stopped, it is running + // Ensure CR is registered in monitoring + w.a.V(1). + WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). + WithAction(cr). + M(cr).F(). + Info("ensure CR in monitoring") - w.a.V(1). - WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). - WithAction(cr). - M(cr).F(). - Info("exclude CR from monitoring") - w.c.deleteWatch(cr) + if cr.HasAncestor() { + // Ensure CR is watched + w.c.updateWatch(cr.GetAncestorT()) + } else { + // CR is a new one - allocate monitoring + w.c.allocateWatch(cr) + } + } } // addToMonitoring adds CR to monitoring From 78a08fc99ebccabc1417697249773fb9d8b7a033 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:29:49 +0500 Subject: [PATCH 158/233] dev: watch allocator --- pkg/controller/chi/controller.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index eb3bd3712..d5090db00 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -692,6 +692,13 @@ func (c *Controller) updateWatch(chi *api.ClickHouseInstallation) { go c.updateWatchAsync(watched) } +// allocateWatch +func (c *Controller) allocateWatch(chi *api.ClickHouseInstallation) { + watched := metrics.NewWatchedCR(chi) + watched.Clusters = nil + go c.updateWatchAsync(watched) +} + // updateWatchAsync func (c *Controller) updateWatchAsync(chi *metrics.WatchedCR) { if err := clickhouse.InformMetricsExporterAboutWatchedCHI(chi); err != nil { From ba2db3e2de94c1b40becaaff69a1afab321df703 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:34:38 +0500 Subject: [PATCH 159/233] dev: rethink monitoring function --- pkg/controller/chi/worker-monitoring.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index d6823678c..bb5e8e0e2 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -19,11 +19,11 @@ import ( a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" ) -// excludeFromMonitoring excludes stopped CR from monitoring -func (w *worker) excludeFromMonitoring(cr *api.ClickHouseInstallation) { - // Important - // Exclude from monitoring STOP-ped CR - // Running CR is not touched +// prepareMonitoring prepares monitoring state before reconcile begins. +// For stopped CR - excludes from monitoring. +// For running CR with ancestor - preserves old topology in monitoring. +// For new running CR - allocates an empty slot in monitoring index. +func (w *worker) prepareMonitoring(cr *api.ClickHouseInstallation) { if cr.IsStopped() { // CR is stopped From 85050eb53cd2a63d5fc7d4147f5fdaa87c5ad5e8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:34:57 +0500 Subject: [PATCH 160/233] dev: utilize modifications --- pkg/controller/chi/worker-reconciler-chi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 1b9510c34..c66279a2c 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -80,7 +80,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal } w.markReconcileStart(ctx, new) - w.excludeFromMonitoring(new) + w.prepareMonitoring(new) w.setHostStatusesPreliminary(ctx, new) if err := w.reconcile(ctx, new); err != nil { From d30fd4a07bdd62f4661e9d05894dc2fb2f0557fc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 16:35:14 +0500 Subject: [PATCH 161/233] dev: mirror in chk --- pkg/controller/chk/worker-monitoring.go | 4 ++-- pkg/controller/chk/worker-reconciler-chk.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/chk/worker-monitoring.go b/pkg/controller/chk/worker-monitoring.go index c524383f2..a30cfc8b4 100644 --- a/pkg/controller/chk/worker-monitoring.go +++ b/pkg/controller/chk/worker-monitoring.go @@ -18,8 +18,8 @@ import ( api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" ) -// excludeFromMonitoring excludes stopped CR from monitoring -func (w *worker) excludeFromMonitoring(cr *api.ClickHouseKeeperInstallation) { +// prepareMonitoring prepares monitoring state before reconcile begins +func (w *worker) prepareMonitoring(cr *api.ClickHouseKeeperInstallation) { } // addToMonitoring adds CR to monitoring diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index abc888359..72c71904a 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -80,7 +80,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee } w.markReconcileStart(ctx, new) - w.excludeFromMonitoring(new) + w.prepareMonitoring(new) w.setHostStatusesPreliminary(ctx, new) if err := w.reconcile(ctx, new); err != nil { From 4f2fd65be6a3e90ab8c6e4e313679435aef5249b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 17:03:41 +0500 Subject: [PATCH 162/233] dev: add host adder on monitoring level --- pkg/controller/chi/worker-monitoring.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index bb5e8e0e2..50cfc1bb3 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -19,6 +19,17 @@ import ( a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" ) +// addHostToMonitoring adds a single host to monitoring. +// Used during reconcile to enable monitoring for individual hosts as they become ready. +func (w *worker) addHostToMonitoring(host *api.Host) { + if host.GetCR().IsStopped() { + return + } + + w.a.V(1).M(host).F().Info("add host to monitoring: %s", host.Runtime.Address.FQDN) + w.c.addHostWatch(host) +} + // prepareMonitoring prepares monitoring state before reconcile begins. // For stopped CR - excludes from monitoring. // For running CR with ancestor - preserves old topology in monitoring. From 294994cb46df01c928073548caf142a30876cd18 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 17:04:06 +0500 Subject: [PATCH 163/233] dev: add host monitoring call --- pkg/controller/chi/controller.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index d5090db00..13e481ab2 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -708,6 +708,33 @@ func (c *Controller) updateWatchAsync(chi *metrics.WatchedCR) { } } +// addHostWatch adds a single host to monitoring +func (c *Controller) addHostWatch(host *api.Host) { + req := &clickhouse.HostRequest{ + CRNamespace: host.Runtime.Address.Namespace, + CRName: host.Runtime.Address.CHIName, + ClusterName: host.Runtime.Address.ClusterName, + Host: &metrics.WatchedHost{ + Name: host.Name, + Hostname: host.Runtime.Address.FQDN, + TCPPort: host.TCPPort.Value(), + TLSPort: host.TLSPort.Value(), + HTTPPort: host.HTTPPort.Value(), + HTTPSPort: host.HTTPSPort.Value(), + }, + } + go c.addHostWatchAsync(req) +} + +// addHostWatchAsync +func (c *Controller) addHostWatchAsync(req *clickhouse.HostRequest) { + if err := clickhouse.InformMetricsExporterAboutWatchedHost(req); err != nil { + log.V(1).F().Info("FAIL add host watch (%s/%s/%s/%s): %q", req.CRNamespace, req.CRName, req.ClusterName, req.Host.Hostname, err) + } else { + log.V(1).Info("OK add host watch (%s/%s/%s/%s)", req.CRNamespace, req.CRName, req.ClusterName, req.Host.Hostname) + } +} + // deleteWatch func (c *Controller) deleteWatch(chi *api.ClickHouseInstallation) { watched := metrics.NewWatchedCR(chi) From 13881ddee645445fe50c92b4b73aaea8a1a6c00f Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 17:04:40 +0500 Subject: [PATCH 164/233] dev: add host to minitoring upon reconcile completion --- pkg/controller/chi/worker-reconciler-chi.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index c66279a2c..8206a8022 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -740,6 +740,9 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error { }, }) + // Host reconcile completed successfully - add it to monitoring + w.addHostToMonitoring(host) + metrics.HostReconcilesCompleted(ctx, host.GetCR()) metrics.HostReconcilesTimings(ctx, host.GetCR(), time.Since(startTime).Seconds()) From 638ecdb28fbe90fad52b8f3b77f96771d59ab314 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sun, 8 Feb 2026 17:05:00 +0500 Subject: [PATCH 165/233] dev: add host to monitoring upon replication lag catch up --- pkg/controller/chi/worker-wait-exclude-include-restart.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/controller/chi/worker-wait-exclude-include-restart.go b/pkg/controller/chi/worker-wait-exclude-include-restart.go index f613e76eb..96812025b 100644 --- a/pkg/controller/chi/worker-wait-exclude-include-restart.go +++ b/pkg/controller/chi/worker-wait-exclude-include-restart.go @@ -325,6 +325,9 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error "Host/shard/cluster: %d/%d/%s", host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName) + // Host is alive but catching up - add to monitoring so metrics are collected during the wait + w.addHostToMonitoring(host) + err := w.waitHostHasNoReplicationDelay(ctx, host) if err == nil { w.a.V(1). From e1d2b3c86aff8aeee3237a8c62342829920d02af Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 9 Feb 2026 13:07:03 +0500 Subject: [PATCH 166/233] dev: bump go version --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a85b1901e..ebceb3187 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/altinity/clickhouse-operator -go 1.25.5 +go 1.25.6 replace ( github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.0 From d5fd330878fc8b3378ee38d6a7a7112d0febf891 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 9 Feb 2026 13:26:05 +0500 Subject: [PATCH 167/233] test: metrics --- tests/e2e/run_tests_metrics.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/e2e/run_tests_metrics.sh b/tests/e2e/run_tests_metrics.sh index 127cec49c..99e7ed0a8 100755 --- a/tests/e2e/run_tests_metrics.sh +++ b/tests/e2e/run_tests_metrics.sh @@ -6,5 +6,12 @@ export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" +RUN_ALL="${RUN_ALL:-""}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" --native + +# We may want run all tests to the end ignoring failed tests in the process +if [[ ! -z "${RUN_ALL}" ]]; then + RUN_ALL="--test-to-end" +fi + +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" ${RUN_ALL} --parallel off -o short --native From e5649b6f990553aecdaa0042a536bf5326cc3c5c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 9 Feb 2026 14:08:49 +0500 Subject: [PATCH 168/233] test: make metrics laucnher --- tests/e2e/run_tests_metrics_local.sh | 54 ++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100755 tests/e2e/run_tests_metrics_local.sh diff --git a/tests/e2e/run_tests_metrics_local.sh b/tests/e2e/run_tests_metrics_local.sh new file mode 100755 index 000000000..8ced4a343 --- /dev/null +++ b/tests/e2e/run_tests_metrics_local.sh @@ -0,0 +1,54 @@ +#!/bin/bash +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}" +OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}" +OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}" +METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}" +METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}" +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}" +OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" +OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" +ONLY="${ONLY:-"*"}" +VERBOSITY="${VERBOSITY:-"2"}" +RUN_ALL="${RUN_ALL:-""}" +MINIKUBE_RESET="${MINIKUBE_RESET:-""}" +MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}" + +if [[ ! -z "${MINIKUBE_RESET}" ]]; then + SKIP_K9S="yes" "${CUR_DIR}/run_minikube_reset.sh" +fi + +if [[ ! -z "${MINIKUBE_PRELOAD_IMAGES}" ]]; then + echo "pre-load images into minikube" + IMAGES=" + clickhouse/clickhouse-server:23.3 + clickhouse/clickhouse-server:25.3 + clickhouse/clickhouse-server:latest + " + for image in ${IMAGES}; do + docker pull -q ${image} && \ + echo "pushing to minikube" && \ + minikube image load ${image} --overwrite=false --daemon=true + done + echo "images pre-loaded" +fi + +# +# Build images and run tests +# +echo "Build" && \ +VERBOSITY="${VERBOSITY}" "${CUR_DIR}/../../dev/image_build_all_dev.sh" && \ +echo "Load images" && \ +minikube image load "${OPERATOR_IMAGE}" && \ +minikube image load "${METRICS_EXPORTER_IMAGE}" && \ +echo "Images prepared" && \ +OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \ +METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \ +OPERATOR_VERSION="${OPERATOR_VERSION}" \ +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \ +OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \ +OPERATOR_INSTALL="${OPERATOR_INSTALL}" \ +ONLY="${ONLY}" \ +RUN_ALL="${RUN_ALL}" \ +"${CUR_DIR}/run_tests_metrics.sh" From 8d6dfd205859abc802219f2bc1c80c3c089f4169 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 9 Feb 2026 14:09:11 +0500 Subject: [PATCH 169/233] dev: write event for replication lag catching process --- pkg/controller/chi/worker-wait-exclude-include-restart.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/controller/chi/worker-wait-exclude-include-restart.go b/pkg/controller/chi/worker-wait-exclude-include-restart.go index 96812025b..370aa2867 100644 --- a/pkg/controller/chi/worker-wait-exclude-include-restart.go +++ b/pkg/controller/chi/worker-wait-exclude-include-restart.go @@ -22,6 +22,7 @@ import ( api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/apis/common/types" "github.com/altinity/clickhouse-operator/pkg/chop" + a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer" "github.com/altinity/clickhouse-operator/pkg/controller/common/poller" "github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain" "github.com/altinity/clickhouse-operator/pkg/interfaces" @@ -321,6 +322,7 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error w.a.V(1). M(host).F(). + WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileInProgress). Info("Wait for host to catch replication lag - START "+ "Host/shard/cluster: %d/%d/%s", host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName) @@ -332,6 +334,7 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error if err == nil { w.a.V(1). M(host).F(). + WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileCompleted). Info("Wait for host to catch replication lag - COMPLETED "+ "Host/shard/cluster: %d/%d/%s", host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName, @@ -341,6 +344,7 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error } else { w.a.V(1). M(host).F(). + WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileFailed). Info("Wait for host to catch replication lag - FAILED "+ "Host/shard/cluster: %d/%d/%s"+ "err: %v ", From 48ef06339e03fd5072787cfebafe2a9bfd0adbe8 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 10 Feb 2026 14:38:52 +0500 Subject: [PATCH 170/233] dev: minor test --- .../v1/type_status_test.go | 19 ++++++++++++------- pkg/controller/chi/controller_test.go | 4 ++-- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go index 1f1c3cc09..5d6f5b3e7 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go @@ -2,9 +2,12 @@ package v1 import ( - "github.com/stretchr/testify/require" "sync" "testing" + + "github.com/stretchr/testify/require" + + "github.com/altinity/clickhouse-operator/pkg/apis/common/types" ) var normalizedChiA = &ClickHouseInstallation{} @@ -158,12 +161,14 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) { name: "CopyFrom", goRoutineA: func(s *Status) { s.PushAction("always-present-action") // CopyFrom preserves existing actions (does not clobber) - s.CopyFrom(copyTestStatusFrom, CopyStatusOptions{ - Actions: true, - Errors: true, - MainFields: true, - WholeStatus: true, - InheritableFields: true, + s.CopyFrom(copyTestStatusFrom, types.CopyStatusOptions{ + CopyStatusFieldGroup: types.CopyStatusFieldGroup{ + FieldGroupActions: true, + FieldGroupErrors: true, + FieldGroupMain: true, + FieldGroupWholeStatus: true, + FieldGroupInheritable: true, + }, }) }, goRoutineB: func(s *Status) { diff --git a/pkg/controller/chi/controller_test.go b/pkg/controller/chi/controller_test.go index e93ed1d4f..54acd2f26 100644 --- a/pkg/controller/chi/controller_test.go +++ b/pkg/controller/chi/controller_test.go @@ -39,8 +39,8 @@ func Test_shouldEnqueue(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := shouldEnqueue(tt.chi); got != tt.want { - t.Errorf("shouldEnqueue() = %v, want %v", got, tt.want) + if got := ShouldEnqueue(tt.chi); got != tt.want { + t.Errorf("ShouldEnqueue() = %v, want %v", got, tt.want) } }) } From a2e99009e91a48abd9c6c108a3b4f0376337902e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 10 Feb 2026 14:49:25 +0500 Subject: [PATCH 171/233] dev: bump operator version --- tests/e2e/test_operator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index a7a2119e7..ec876c66a 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -568,7 +568,7 @@ def test_operator_upgrade(self, manifest, service, version_from, version_to=None @Name("test_010009_1. Test operator upgrade") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_UpgradingOperator("1.0")) @Tags("NO_PARALLEL") -def test_010009_1(self, version_from="0.25.2", version_to=None): +def test_010009_1(self, version_from="0.25.6", version_to=None): if version_to is None: version_to = self.context.operator_version From e38482a9e0802c14875dbf291bf3dfd45d8ee8eb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 10 Feb 2026 14:53:01 +0500 Subject: [PATCH 172/233] dev: rework local test --- tests/e2e/run_tests_local.sh | 161 +++++++---------------------------- 1 file changed, 31 insertions(+), 130 deletions(-) diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh index 7a98760a1..1a4ada831 100755 --- a/tests/e2e/run_tests_local.sh +++ b/tests/e2e/run_tests_local.sh @@ -1,113 +1,58 @@ #!/bin/bash CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/test_common.sh" -OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}" -OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}" -OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}" -METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}" -METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}" -IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}" -OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" -OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" -ONLY="${ONLY:-"*"}" -MINIKUBE_RESET="${MINIKUBE_RESET:-""}" -VERBOSITY="${VERBOSITY:-"2"}" -# We may want run all tests to the end ignoring failed tests in the process -RUN_ALL="${RUN_ALL:-""}" - -# Possible options are: -# 1. operator -# 2. keeper -# 3. metrics +# Possible options: operator, keeper, metrics +# Can be set via env var for non-interactive use: WHAT=metrics ./run_tests_local.sh WHAT="${WHAT}" -# Possible options are: -# 1. replace -# 2. apply -KUBECTL_MODE="${KUBECTL_MODE:-"apply"}" - -# # +# Interactive menu (or non-interactive if WHAT is already set) # function select_test_goal() { local specified_goal="${1}" - if [[ ! -z "${specified_goal}" ]]; then - echo "Having specified explicitly: ${specified_goal}" - return 0 - else - echo "What would you like to start? Possible options:" - echo " 1 - test operator" - echo " 2 - test keeper" - echo " 3 - test metrics" - echo -n "Enter your choice (1, 2, 3): " - read COMMAND - # Trim EOL from the command received - COMMAND=$(echo "${COMMAND}" | tr -d '\n\t\r ') - case "${COMMAND}" in - "1") - echo "picking operator" - return 1 - ;; - "2") - echo "piking keeper" - return 2 - ;; - "3") - echo "picking metrics" - return 3 - ;; - *) - echo "don't know what '${COMMAND}' is, so picking operator" - return 1 - ;; - esac + if [[ -n "${specified_goal}" ]]; then + echo "Having specified explicitly: ${specified_goal}" >&2 + echo "${specified_goal}" + return fi -} -# -# -# -function goal_name() { - local goal_code=${1} - case "${goal_code}" in - "0") - echo "${WHAT}" - ;; - "1") - echo "operator" - ;; - "2") - echo "keeper" - ;; - "3") - echo "metrics" - ;; + echo "What would you like to start? Possible options:" >&2 + echo " 1 - test operator" >&2 + echo " 2 - test keeper" >&2 + echo " 3 - test metrics" >&2 + echo -n "Enter your choice (1, 2, 3): " >&2 + read COMMAND + COMMAND=$(echo "${COMMAND}" | tr -d '\n\t\r ') + case "${COMMAND}" in + "1") echo "operator" ;; + "2") echo "keeper" ;; + "3") echo "metrics" ;; *) + echo "don't know what '${COMMAND}' is, so picking operator" >&2 echo "operator" ;; esac } -select_test_goal "${WHAT}" -WHAT=$(goal_name $?) +WHAT=$(select_test_goal "${WHAT}") -echo "Provided command is: ${WHAT}" -echo -n "Which means we are going to " +# Map test goal to dedicated local script case "${WHAT}" in "operator") - DEFAULT_EXECUTABLE="run_tests_operator.sh" - echo "test OPERATOR" + LOCAL_SCRIPT="run_tests_operator_local.sh" + echo "Selected: test OPERATOR" ;; "keeper") - DEFAULT_EXECUTABLE="run_tests_keeper.sh" - echo "test KEEPER" + LOCAL_SCRIPT="run_tests_keeper_local.sh" + echo "Selected: test KEEPER" ;; "metrics") - DEFAULT_EXECUTABLE="run_tests_metrics.sh" - echo "test METRICS" + LOCAL_SCRIPT="run_tests_metrics_local.sh" + echo "Selected: test METRICS" ;; *) - echo "exit because I do not know what '${WHAT}' is" + echo "Unknown test type: '${WHAT}', exiting" exit 1 ;; esac @@ -117,49 +62,5 @@ echo "Press to start test immediately (if you agree with specified optio echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically" read -t ${TIMEOUT} -EXECUTABLE="${EXECUTABLE:-"${DEFAULT_EXECUTABLE}"}" -MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}" - -if [[ ! -z "${MINIKUBE_RESET}" ]]; then - SKIP_K9S="yes" ./run_minikube_reset.sh -fi - -if [[ ! -z "${MINIKUBE_PRELOAD_IMAGES}" ]]; then - echo "pre-load images into minikube" - IMAGES=" - clickhouse/clickhouse-server:23.3 - clickhouse/clickhouse-server:23.8 - clickhouse/clickhouse-server:24.3 - clickhouse/clickhouse-server:24.8 - clickhouse/clickhouse-server:25.3 - clickhouse/clickhouse-server:latest - altinity/clickhouse-server:24.8.14.10459.altinitystable - docker.io/zookeeper:3.8.4 - " - for image in ${IMAGES}; do - docker pull -q ${image} && \ - echo "pushing to minikube" && \ - minikube image load ${image} --overwrite=false --daemon=true - done - echo "images pre-loaded" -fi - -# -# Build images and run tests -# -echo "Build" && \ -VERBOSITY="${VERBOSITY}" ${CUR_DIR}/../../dev/image_build_all_dev.sh && \ -echo "Load images" && \ -minikube image load "${OPERATOR_IMAGE}" && \ -minikube image load "${METRICS_EXPORTER_IMAGE}" && \ -echo "Images prepared" && \ -OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \ -METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \ -OPERATOR_VERSION="${OPERATOR_VERSION}" \ -IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \ -OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \ -OPERATOR_INSTALL="${OPERATOR_INSTALL}" \ -ONLY="${ONLY}" \ -KUBECTL_MODE="${KUBECTL_MODE}" \ -RUN_ALL="${RUN_ALL}" \ -"${CUR_DIR}/${EXECUTABLE}" +# Dispatch to the dedicated local script +"${CUR_DIR}/${LOCAL_SCRIPT}" From bbbd04da84053a947fce49b6a8ff9f85874df95f Mon Sep 17 00:00:00 2001 From: saba Date: Tue, 10 Feb 2026 16:40:15 +0100 Subject: [PATCH 173/233] Fix distributed_ddl replicas_path mismatch causing ON CLUSTER DDL to hang The operator generates a custom per CHI (e.g. /clickhouse/{chi-name}/task_queue/ddl) but does not set . ClickHouse 25.8+ falls back to the default replicas_path from config.xml (/clickhouse/task_queue/replicas), which is in a different subtree. This causes the DDL workers to never pick up tasks, making any CREATE TABLE ... ON CLUSTER hang indefinitely. Add alongside so both use the same per-CHI subtree in ZooKeeper/Keeper. --- pkg/model/chi/config/generator.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/model/chi/config/generator.go b/pkg/model/chi/config/generator.go index 34dcd9b12..163d2a5fa 100644 --- a/pkg/model/chi/config/generator.go +++ b/pkg/model/chi/config/generator.go @@ -33,6 +33,9 @@ const ( // Pattern for string path used in XXX DistributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl" + // Pattern for string path used in XXX + DistributedDDLReplicasPathPattern = "/clickhouse/%s/task_queue/replicas" + // Special auto-generated clusters. Each of these clusters lay over all replicas in CHI // 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas. // 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas. @@ -178,6 +181,7 @@ func (c *Generator) getHostZookeeper(host *chi.Host) string { // X util.Iline(b, 4, "") util.Iline(b, 4, " %s", c.getDistributedDDLPath()) + util.Iline(b, 4, " %s", c.getDistributedDDLReplicasPath()) if c.opts.DistributedDDL.HasProfile() { util.Iline(b, 4, " %s", c.opts.DistributedDDL.GetProfile()) } @@ -545,6 +549,11 @@ func (c *Generator) getDistributedDDLPath() string { return fmt.Sprintf(DistributedDDLPathPattern, c.cr.GetName()) } +// getDistributedDDLReplicasPath returns string path used in XXX +func (c *Generator) getDistributedDDLReplicasPath() string { + return fmt.Sprintf(DistributedDDLReplicasPathPattern, c.cr.GetName()) +} + // getRemoteServersReplicaHostname returns hostname (podhostname + service or FQDN) for "remote_servers.xml" // based on .Spec.Defaults.ReplicasUseFQDN func (c *Generator) getRemoteServersReplicaHostname(host *chi.Host) string { From e16df7ba2745c2ca4c45dd29e934a0bd7362f20e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 10 Feb 2026 23:07:39 +0500 Subject: [PATCH 174/233] test: extract common functions --- tests/e2e/test_common.sh | 136 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100755 tests/e2e/test_common.sh diff --git a/tests/e2e/test_common.sh b/tests/e2e/test_common.sh new file mode 100755 index 000000000..e154e6247 --- /dev/null +++ b/tests/e2e/test_common.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Common library for test scripts. Source this file, do not execute it. +# Usage: source "${CUR_DIR}/test_common.sh" + +COMMON_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +# ============================================================================= +# Variable defaults (all overridable via environment) +# ============================================================================= + +# Operator versioning +OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}" +OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}" +OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}" +METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}" +METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}" + +# NOTE: IMAGE_PULL_POLICY is intentionally NOT set here. +# Test runners default to "Always" (CI), local scripts default to "IfNotPresent" (minikube). + +# Test execution +OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" +OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" +ONLY="${ONLY:-"*"}" +VERBOSITY="${VERBOSITY:-"2"}" +RUN_ALL="${RUN_ALL:-""}" +KUBECTL_MODE="${KUBECTL_MODE:-"apply"}" + +# Minikube control +MINIKUBE_RESET="${MINIKUBE_RESET:-""}" +MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}" + +# ============================================================================= +# Image lists for preloading into minikube +# ============================================================================= + +PRELOAD_IMAGES_OPERATOR=( + "clickhouse/clickhouse-server:23.3" + "clickhouse/clickhouse-server:23.8" + "clickhouse/clickhouse-server:24.3" + "clickhouse/clickhouse-server:24.8" + "clickhouse/clickhouse-server:25.3" + "clickhouse/clickhouse-server:latest" + "altinity/clickhouse-server:24.8.14.10459.altinitystable" + "docker.io/zookeeper:3.8.4" +) + +PRELOAD_IMAGES_KEEPER=( + "clickhouse/clickhouse-server:23.3" + "clickhouse/clickhouse-server:23.8" + "clickhouse/clickhouse-server:24.3" + "clickhouse/clickhouse-server:24.8" + "clickhouse/clickhouse-server:25.3" + "clickhouse/clickhouse-server:latest" + "altinity/clickhouse-server:24.8.14.10459.altinitystable" + "docker.io/zookeeper:3.8.4" +) + +PRELOAD_IMAGES_METRICS=( + "clickhouse/clickhouse-server:23.3" + "clickhouse/clickhouse-server:25.3" + "clickhouse/clickhouse-server:latest" +) + +# ============================================================================= +# Functions +# ============================================================================= + +# Install Python dependencies needed by TestFlows +function common_install_pip_requirements() { + pip3 install -r "${COMMON_DIR}/../image/requirements.txt" +} + +# Convert RUN_ALL env var to --test-to-end flag. +# Usage: RUN_ALL_FLAG=$(common_convert_run_all) +function common_convert_run_all() { + if [[ -n "${RUN_ALL}" ]]; then + echo "--test-to-end" + fi +} + +# Export the standard set of env vars that regression.py / settings.py expects +function common_export_test_env() { + export OPERATOR_NAMESPACE + export OPERATOR_INSTALL + export IMAGE_PULL_POLICY +} + +# Reset minikube cluster if MINIKUBE_RESET is set +function common_minikube_reset() { + if [[ -n "${MINIKUBE_RESET}" ]]; then + SKIP_K9S="yes" "${COMMON_DIR}/run_minikube_reset.sh" + fi +} + +# Pull images and load them into minikube. +# Only runs if MINIKUBE_PRELOAD_IMAGES is set. +# Usage: common_preload_images "${PRELOAD_IMAGES_OPERATOR[@]}" +function common_preload_images() { + if [[ -n "${MINIKUBE_PRELOAD_IMAGES}" ]]; then + echo "pre-load images into minikube" + for image in "$@"; do + docker pull -q "${image}" && \ + echo "pushing ${image} to minikube" && \ + minikube image load "${image}" --overwrite=false --daemon=true + done + echo "images pre-loaded" + fi +} + +# Build operator + metrics-exporter docker images and load them into minikube +function common_build_and_load_images() { + echo "Build" && \ + VERBOSITY="${VERBOSITY}" "${COMMON_DIR}/../../dev/image_build_all_dev.sh" && \ + echo "Load images" && \ + minikube image load "${OPERATOR_IMAGE}" && \ + minikube image load "${METRICS_EXPORTER_IMAGE}" && \ + echo "Images prepared" +} + +# Run a test runner script with all env vars forwarded. +# Usage: common_run_test_script "run_tests_operator.sh" +function common_run_test_script() { + local script="${1}" + OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \ + METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \ + OPERATOR_VERSION="${OPERATOR_VERSION}" \ + IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \ + OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \ + OPERATOR_INSTALL="${OPERATOR_INSTALL}" \ + ONLY="${ONLY}" \ + KUBECTL_MODE="${KUBECTL_MODE}" \ + RUN_ALL="${RUN_ALL}" \ + "${COMMON_DIR}/${script}" +} From bd796fdb246dbaee33d56139d3d67a9fbe7bf735 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 10 Feb 2026 23:09:10 +0500 Subject: [PATCH 175/233] test: keeper tests runner --- tests/e2e/run_tests_keeper.sh | 20 ++++++++++++++------ tests/e2e/run_tests_keeper_local.sh | 10 ++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) create mode 100755 tests/e2e/run_tests_keeper_local.sh diff --git a/tests/e2e/run_tests_keeper.sh b/tests/e2e/run_tests_keeper.sh index 68318a3ff..3597b30bb 100755 --- a/tests/e2e/run_tests_keeper.sh +++ b/tests/e2e/run_tests_keeper.sh @@ -1,10 +1,18 @@ #!/bin/bash CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -pip3 install -r "$CUR_DIR/../image/requirements.txt" +source "${CUR_DIR}/test_common.sh" -export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_keeper/${ONLY}" --native +common_install_pip_requirements +common_export_test_env + +RUN_ALL_FLAG=$(common_convert_run_all) + +python3 "${COMMON_DIR}/../regression.py" \ + --only="/regression/e2e.test_keeper/${ONLY}" \ + ${RUN_ALL_FLAG} \ + -o short \ + --trim-results on \ + --debug \ + --native diff --git a/tests/e2e/run_tests_keeper_local.sh b/tests/e2e/run_tests_keeper_local.sh new file mode 100755 index 000000000..ac41f284d --- /dev/null +++ b/tests/e2e/run_tests_keeper_local.sh @@ -0,0 +1,10 @@ +#!/bin/bash +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/test_common.sh" + +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}" + +common_minikube_reset +common_preload_images "${PRELOAD_IMAGES_KEEPER[@]}" +common_build_and_load_images && \ +common_run_test_script "run_tests_keeper.sh" From 623f8007d69a333c02b9e38387508e911cb75841 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 00:27:02 +0500 Subject: [PATCH 176/233] test: metrics --- tests/e2e/run_tests_metrics.sh | 24 +++++++------ tests/e2e/run_tests_metrics_local.sh | 54 +++------------------------- 2 files changed, 18 insertions(+), 60 deletions(-) diff --git a/tests/e2e/run_tests_metrics.sh b/tests/e2e/run_tests_metrics.sh index 99e7ed0a8..71a07234a 100755 --- a/tests/e2e/run_tests_metrics.sh +++ b/tests/e2e/run_tests_metrics.sh @@ -1,17 +1,19 @@ #!/bin/bash CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -pip3 install -r "$CUR_DIR/../image/requirements.txt" +source "${CUR_DIR}/test_common.sh" -export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -RUN_ALL="${RUN_ALL:-""}" -ONLY="${ONLY:-"*"}" +common_install_pip_requirements +common_export_test_env -# We may want run all tests to the end ignoring failed tests in the process -if [[ ! -z "${RUN_ALL}" ]]; then - RUN_ALL="--test-to-end" -fi +RUN_ALL_FLAG=$(common_convert_run_all) -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" ${RUN_ALL} --parallel off -o short --native +python3 "${COMMON_DIR}/../regression.py" \ + --only="/regression/e2e.test_metrics_exporter/${ONLY}" \ + ${RUN_ALL_FLAG} \ + --parallel off \ + -o short \ + --trim-results on \ + --debug \ + --native diff --git a/tests/e2e/run_tests_metrics_local.sh b/tests/e2e/run_tests_metrics_local.sh index 8ced4a343..6cc08fbc4 100755 --- a/tests/e2e/run_tests_metrics_local.sh +++ b/tests/e2e/run_tests_metrics_local.sh @@ -1,54 +1,10 @@ #!/bin/bash CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/test_common.sh" -OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}" -OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}" -OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}" -METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}" -METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}" IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}" -OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" -OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" -ONLY="${ONLY:-"*"}" -VERBOSITY="${VERBOSITY:-"2"}" -RUN_ALL="${RUN_ALL:-""}" -MINIKUBE_RESET="${MINIKUBE_RESET:-""}" -MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}" -if [[ ! -z "${MINIKUBE_RESET}" ]]; then - SKIP_K9S="yes" "${CUR_DIR}/run_minikube_reset.sh" -fi - -if [[ ! -z "${MINIKUBE_PRELOAD_IMAGES}" ]]; then - echo "pre-load images into minikube" - IMAGES=" - clickhouse/clickhouse-server:23.3 - clickhouse/clickhouse-server:25.3 - clickhouse/clickhouse-server:latest - " - for image in ${IMAGES}; do - docker pull -q ${image} && \ - echo "pushing to minikube" && \ - minikube image load ${image} --overwrite=false --daemon=true - done - echo "images pre-loaded" -fi - -# -# Build images and run tests -# -echo "Build" && \ -VERBOSITY="${VERBOSITY}" "${CUR_DIR}/../../dev/image_build_all_dev.sh" && \ -echo "Load images" && \ -minikube image load "${OPERATOR_IMAGE}" && \ -minikube image load "${METRICS_EXPORTER_IMAGE}" && \ -echo "Images prepared" && \ -OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \ -METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \ -OPERATOR_VERSION="${OPERATOR_VERSION}" \ -IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \ -OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \ -OPERATOR_INSTALL="${OPERATOR_INSTALL}" \ -ONLY="${ONLY}" \ -RUN_ALL="${RUN_ALL}" \ -"${CUR_DIR}/run_tests_metrics.sh" +common_minikube_reset +common_preload_images "${PRELOAD_IMAGES_METRICS[@]}" +common_build_and_load_images && \ +common_run_test_script "run_tests_metrics.sh" From 6ba04c32f9cdc8baa85ca8e50571dbbe643f53a4 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 00:27:13 +0500 Subject: [PATCH 177/233] test: operator --- tests/e2e/run_tests_operator.sh | 31 +++++++++++---------------- tests/e2e/run_tests_operator_local.sh | 10 +++++++++ 2 files changed, 23 insertions(+), 18 deletions(-) create mode 100755 tests/e2e/run_tests_operator_local.sh diff --git a/tests/e2e/run_tests_operator.sh b/tests/e2e/run_tests_operator.sh index 2af4c1354..a7a8d9bfc 100755 --- a/tests/e2e/run_tests_operator.sh +++ b/tests/e2e/run_tests_operator.sh @@ -1,23 +1,18 @@ #!/bin/bash CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -pip3 install -r "$CUR_DIR/../image/requirements.txt" +source "${CUR_DIR}/test_common.sh" -export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -RUN_ALL="${RUN_ALL:-""}" -ONLY="${ONLY:-"*"}" +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -# We may want run all tests to the end ignoring failed tests in the process -if [[ ! -z "${RUN_ALL}" ]]; then - RUN_ALL="--test-to-end" -fi +common_install_pip_requirements +common_export_test_env -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" ${RUN_ALL} -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --test-to-end -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* -o short --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native -o short --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/*32* --trim-results on --debug --native -o short --native +RUN_ALL_FLAG=$(common_convert_run_all) + +python3 "${COMMON_DIR}/../regression.py" \ + --only="/regression/e2e.test_operator/${ONLY}" \ + ${RUN_ALL_FLAG} \ + -o short \ + --trim-results on \ + --debug \ + --native diff --git a/tests/e2e/run_tests_operator_local.sh b/tests/e2e/run_tests_operator_local.sh new file mode 100755 index 000000000..45a00dca3 --- /dev/null +++ b/tests/e2e/run_tests_operator_local.sh @@ -0,0 +1,10 @@ +#!/bin/bash +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/test_common.sh" + +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}" + +common_minikube_reset +common_preload_images "${PRELOAD_IMAGES_OPERATOR[@]}" +common_build_and_load_images && \ +common_run_test_script "run_tests_operator.sh" From ee0bd084472d22bdc5fbfdc8eb24c0449865b4af Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 00:31:28 +0500 Subject: [PATCH 178/233] dev: undo --- pkg/controller/common/storage/storage-reconciler.go | 7 +++---- pkg/interfaces/interfaces-main.go | 1 - pkg/model/common/creator/pvc.go | 3 +-- pkg/model/common/creator/stateful-set-storage.go | 2 +- pkg/model/common/tags/labeler/labels.go | 10 ++++------ 5 files changed, 9 insertions(+), 14 deletions(-) diff --git a/pkg/controller/common/storage/storage-reconciler.go b/pkg/controller/common/storage/storage-reconciler.go index 23987beec..51455189e 100644 --- a/pkg/controller/common/storage/storage-reconciler.go +++ b/pkg/controller/common/storage/storage-reconciler.go @@ -127,7 +127,7 @@ func (w *Reconciler) reconcilePVCFromVolumeMount( pvcName := w.namer.Name(interfaces.NamePVCNameByVolumeClaimTemplate, host, volumeClaimTemplate) // Which PVC are we going to reconcile - pvc, chopCreated, err := w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec, volumeClaimTemplate) + pvc, chopCreated, err := w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec) if err != nil { // Unable to fetch or create PVC correctly. return nil @@ -156,7 +156,7 @@ func (w *Reconciler) reconcilePVCFromVolumeMount( // Refresh PVC model. Since PVC is just deleted refreshed model may not be fetched from the k8s, // but can be provided by the operator still - pvc, _, _ = w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec, volumeClaimTemplate) + pvc, _, _ = w.fetchOrCreatePVC(ctx, host, pvcNamespace, pvcName, volumeMount.Name, shouldCHOPCreatePVC, &volumeClaimTemplate.Spec) reconcileError = ErrPVCWithLostPVDeleted } @@ -189,7 +189,6 @@ func (w *Reconciler) fetchOrCreatePVC( volumeMountName string, operatorInCharge bool, pvcSpec *core.PersistentVolumeClaimSpec, - template *api.VolumeClaimTemplate, ) ( pvc *core.PersistentVolumeClaim, created bool, @@ -221,7 +220,7 @@ func (w *Reconciler) fetchOrCreatePVC( "PVC (%s/%s/%s/%s) model provided by the operator", namespace, host.GetName(), volumeMountName, name, ) - pvc = w.task.Creator().CreatePVC(name, namespace, host, pvcSpec, template) + pvc = w.task.Creator().CreatePVC(name, namespace, host, pvcSpec) return pvc, true, nil } else { // PVC is not available and the operator is not in charge of the PVC diff --git a/pkg/interfaces/interfaces-main.go b/pkg/interfaces/interfaces-main.go index 4af799936..b99828d62 100644 --- a/pkg/interfaces/interfaces-main.go +++ b/pkg/interfaces/interfaces-main.go @@ -98,7 +98,6 @@ type ICreator interface { namespace string, host *api.Host, spec *core.PersistentVolumeClaimSpec, - template *api.VolumeClaimTemplate, ) *core.PersistentVolumeClaim TagPVC( pvc *core.PersistentVolumeClaim, diff --git a/pkg/model/common/creator/pvc.go b/pkg/model/common/creator/pvc.go index 43a104fe6..71808b663 100644 --- a/pkg/model/common/creator/pvc.go +++ b/pkg/model/common/creator/pvc.go @@ -28,7 +28,6 @@ func (c *Creator) CreatePVC( namespace string, host *api.Host, spec *core.PersistentVolumeClaimSpec, - template *api.VolumeClaimTemplate, ) *core.PersistentVolumeClaim { persistentVolumeClaim := core.PersistentVolumeClaim{ TypeMeta: meta.TypeMeta{ @@ -44,7 +43,7 @@ func (c *Creator) CreatePVC( // we are close to proper disk inheritance // Right now we hit the following error: // "Forbidden: updates to StatefulSet spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden" - Labels: c.macro.Scope(host).Map(c.tagger.Label(interfaces.LabelNewPVC, host, template)), + Labels: c.macro.Scope(host).Map(c.tagger.Label(interfaces.LabelNewPVC, host, false)), Annotations: c.macro.Scope(host).Map(c.tagger.Annotate(interfaces.AnnotateNewPVC, host)), // Incompatible with PV retain policy // Fails PV retain policy test (19) diff --git a/pkg/model/common/creator/stateful-set-storage.go b/pkg/model/common/creator/stateful-set-storage.go index f58c5ffa8..e203e8d1c 100644 --- a/pkg/model/common/creator/stateful-set-storage.go +++ b/pkg/model/common/creator/stateful-set-storage.go @@ -127,7 +127,7 @@ func (c *Creator) stsSetupVolumeForPVCTemplate( k8s.StatefulSetAppendVolumes(statefulSet, volume) } else { // For templates we should not specify namespace where PVC would be located - pvc := *c.CreatePVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec, volumeClaimTemplate) + pvc := *c.CreatePVC(volumeClaimTemplate.Name, "", host, &volumeClaimTemplate.Spec) k8s.StatefulSetAppendPersistentVolumeClaims(statefulSet, pvc) } } diff --git a/pkg/model/common/tags/labeler/labels.go b/pkg/model/common/tags/labeler/labels.go index 9ecb72a24..a5f48383c 100644 --- a/pkg/model/common/tags/labeler/labels.go +++ b/pkg/model/common/tags/labeler/labels.go @@ -105,17 +105,15 @@ func (l *Labeler) _labelExistingPV(pv *core.PersistentVolume, host *api.Host) ma func (l *Labeler) labelNewPVC(params ...any) map[string]string { var host *api.Host - var template *api.VolumeClaimTemplate - if len(params) > 1 { + if len(params) > 0 { host = params[0].(*api.Host) - template = params[1].(*api.VolumeClaimTemplate) - return l._labelNewPVC(host, template) + return l._labelNewPVC(host) } panic("not enough params for labeler") } -func (l *Labeler) _labelNewPVC(host *api.Host, template *api.VolumeClaimTemplate) map[string]string { - return l.getHostScopeReclaimPolicy(host, template, false) +func (l *Labeler) _labelNewPVC(host *api.Host) map[string]string { + return l.GetHostScope(host, false) } func (l *Labeler) labelExistingPVC(params ...any) map[string]string { From 6c005acd1d632dcc03a590fe48c925a90e243ecd Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 14:53:34 +0500 Subject: [PATCH 179/233] dev: move repeat functionality into main script --- tests/e2e/run_tests_local.sh | 47 ++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh index 1a4ada831..1778ff3d6 100755 --- a/tests/e2e/run_tests_local.sh +++ b/tests/e2e/run_tests_local.sh @@ -6,6 +6,10 @@ source "${CUR_DIR}/test_common.sh" # Can be set via env var for non-interactive use: WHAT=metrics ./run_tests_local.sh WHAT="${WHAT}" +# Repeat mode: "success" = repeat until success, "failure" = repeat until failure, empty = single run +# Usage: REPEAT_UNTIL=success ./run_tests_local.sh +REPEAT_UNTIL="${REPEAT_UNTIL:-""}" + # # Interactive menu (or non-interactive if WHAT is already set) # @@ -62,5 +66,44 @@ echo "Press to start test immediately (if you agree with specified optio echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically" read -t ${TIMEOUT} -# Dispatch to the dedicated local script -"${CUR_DIR}/${LOCAL_SCRIPT}" +# Dispatch to the dedicated local script, with optional repeat mode +case "${REPEAT_UNTIL}" in + "success") + # Repeat until tests pass + start=$(date) + run=1 + echo "start run ${run}" + until "${CUR_DIR}/${LOCAL_SCRIPT}"; do + echo "run number ${run} failed" + echo "-------------------------------------------" + run=$((run+1)) + echo "start run ${run}" + done + end=$(date) + echo "=============================================" + echo "Run number ${run} succeeded" + echo "start time: ${start}" + echo "end time: ${end}" + ;; + "failure") + # Repeat until tests fail + start=$(date) + run=1 + echo "start run ${run}" + while "${CUR_DIR}/${LOCAL_SCRIPT}"; do + echo "run number ${run} completed successfully" + echo "-------------------------------------------" + run=$((run+1)) + echo "start run ${run}" + done + end=$(date) + echo "=============================================" + echo "Run number ${run} failed" + echo "start time: ${start}" + echo "end time: ${end}" + ;; + *) + # Single run + "${CUR_DIR}/${LOCAL_SCRIPT}" + ;; +esac From b0e758f3709f6be10453a6d86f840a5ecc29e27e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 14:54:29 +0500 Subject: [PATCH 180/233] test: unhanse paraller --- tests/e2e/run_tests_parallel.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/e2e/run_tests_parallel.sh b/tests/e2e/run_tests_parallel.sh index cebc337cf..90c50239f 100755 --- a/tests/e2e/run_tests_parallel.sh +++ b/tests/e2e/run_tests_parallel.sh @@ -1,14 +1,17 @@ #!/bin/bash set -e CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -pip3 install -r "$CUR_DIR/../image/requirements.txt" +source "${CUR_DIR}/test_common.sh" + +IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" + +common_install_pip_requirements +common_export_test_env + rm -rfv /tmp/test*.log pad="000" MAX_PARALLEL=${MAX_PARALLEL:-5} -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" - function run_test_parallel() { test_names=("$@") run_test_cmd="" From 89e96fd8c56ea2fe86aa3ddeff1c741cf79e714e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 14:54:46 +0500 Subject: [PATCH 181/233] test: remove obsoleted scripts --- tests/e2e/run_tests_local_apply.sh | 5 ----- tests/e2e/run_tests_local_replace.sh | 5 ----- tests/e2e/run_tests_local_until_failed.sh | 25 ---------------------- tests/e2e/run_tests_local_until_success.sh | 25 ---------------------- 4 files changed, 60 deletions(-) delete mode 100755 tests/e2e/run_tests_local_apply.sh delete mode 100755 tests/e2e/run_tests_local_replace.sh delete mode 100755 tests/e2e/run_tests_local_until_failed.sh delete mode 100755 tests/e2e/run_tests_local_until_success.sh diff --git a/tests/e2e/run_tests_local_apply.sh b/tests/e2e/run_tests_local_apply.sh deleted file mode 100755 index 9860aff25..000000000 --- a/tests/e2e/run_tests_local_apply.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -KUBECTL_MODE="apply" \ -"${CUR_DIR}/run_tests_local.sh" diff --git a/tests/e2e/run_tests_local_replace.sh b/tests/e2e/run_tests_local_replace.sh deleted file mode 100755 index cb109aad3..000000000 --- a/tests/e2e/run_tests_local_replace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -KUBECTL_MODE="replace" \ -"${CUR_DIR}/run_tests_local.sh" diff --git a/tests/e2e/run_tests_local_until_failed.sh b/tests/e2e/run_tests_local_until_failed.sh deleted file mode 100755 index 75fae2ef3..000000000 --- a/tests/e2e/run_tests_local_until_failed.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -function start_run { - local run_to_start=${1} - echo "start run ${run_to_start}" -} - -start=$(date) -run=1 -start_run ${run} -while ./run_tests_local.sh; do - echo "run number ${run} completed successfully" - echo "-------------------------------------------" - echo "-------------------------------------------" - echo "-------------------------------------------" - - run=$((run+1)) - start_run ${run} -done -end=$(date) - -echo "=============================================" -echo "Run number ${run} failed" -echo "start time: ${start}" -echo "end time: ${end}" diff --git a/tests/e2e/run_tests_local_until_success.sh b/tests/e2e/run_tests_local_until_success.sh deleted file mode 100755 index 5b1bd31d3..000000000 --- a/tests/e2e/run_tests_local_until_success.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -function start_run { - local run_to_start=${1} - echo "start run ${run_to_start}" -} - -start=$(date) -run=1 -start_run ${run} -until ./run_tests_local.sh; do - echo "run number ${run} failed" - echo "-------------------------------------------" - echo "-------------------------------------------" - echo "-------------------------------------------" - - run=$((run+1)) - start_run ${run} -done -end=$(date) - -echo "=============================================" -echo "Run number ${run} succeeded" -echo "start time: ${start}" -echo "end time: ${end}" From 7f2b6a8a668da0ef8d667f37b1512a2806b1e233 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 11 Feb 2026 14:57:19 +0500 Subject: [PATCH 182/233] test: clarify runner --- tests/e2e/run_tests_local.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh index 1778ff3d6..58d9f5d4f 100755 --- a/tests/e2e/run_tests_local.sh +++ b/tests/e2e/run_tests_local.sh @@ -2,11 +2,17 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" source "${CUR_DIR}/test_common.sh" -# Possible options: operator, keeper, metrics +# Test component select options: +# - operator +# - keeper +# - metrics # Can be set via env var for non-interactive use: WHAT=metrics ./run_tests_local.sh WHAT="${WHAT}" -# Repeat mode: "success" = repeat until success, "failure" = repeat until failure, empty = single run +# Repeat mode options: +# - success = repeat until success +# - failure = repeat until failure +# - not specified / empty = single run # Usage: REPEAT_UNTIL=success ./run_tests_local.sh REPEAT_UNTIL="${REPEAT_UNTIL:-""}" From e0647b8c53a7bea6935b17264a20551b4ad4f50e Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 12 Feb 2026 21:21:23 +0300 Subject: [PATCH 183/233] test for serviceAccountName --- tests/e2e/manifests/chk/test-020000-chk-sa.yaml | 4 ++++ tests/e2e/manifests/chk/test-020000-chk.yaml | 1 + tests/e2e/test_operator.py | 5 +++++ 3 files changed, 10 insertions(+) create mode 100644 tests/e2e/manifests/chk/test-020000-chk-sa.yaml diff --git a/tests/e2e/manifests/chk/test-020000-chk-sa.yaml b/tests/e2e/manifests/chk/test-020000-chk-sa.yaml new file mode 100644 index 000000000..8db86ffa6 --- /dev/null +++ b/tests/e2e/manifests/chk/test-020000-chk-sa.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-020000-chk-sa diff --git a/tests/e2e/manifests/chk/test-020000-chk.yaml b/tests/e2e/manifests/chk/test-020000-chk.yaml index fd3d413ad..ba6c00750 100644 --- a/tests/e2e/manifests/chk/test-020000-chk.yaml +++ b/tests/e2e/manifests/chk/test-020000-chk.yaml @@ -25,6 +25,7 @@ spec: - name: clickhouse-keeper imagePullPolicy: IfNotPresent image: "clickhouse/clickhouse-keeper:25.3" + serviceAccountName: test-020000-chk-sa volumeClaimTemplates: - name: default spec: diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index ec876c66a..624644b7c 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5449,6 +5449,7 @@ def test_020000(self): chk = yaml_manifest.get_name(util.get_full_path(chk_manifest)) with Given("Install CHK"): + kubectl.apply(util.get_full_path("manifests/chk/test-020000-chk-sa.yaml")) kubectl.create_and_check( manifest=chk_manifest, kind="chk", check={ @@ -5463,6 +5464,10 @@ def test_020000(self): for o in chk_objects: print(o) + with Then("Service account should be set"): + chk_pod_spec = kubectl.get_chk_pod_spec(chk) + assert chk_pod_spec["serviceAccountName"] == "test-020000-chk-sa" + with And("There should be a service for cluster a cluster"): kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless = True) From 1cf5a74fee97bccc30fce667b783ba3a8e86df26 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 14:47:59 +0500 Subject: [PATCH 184/233] test: leanup modifications --- tests/e2e/kubectl.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/e2e/kubectl.py b/tests/e2e/kubectl.py index 3906bed34..b157668ab 100644 --- a/tests/e2e/kubectl.py +++ b/tests/e2e/kubectl.py @@ -7,7 +7,7 @@ from testflows.asserts import error # from testflows.connect import Shell -# import e2e.settings as settings +import e2e.settings as settings import e2e.yaml_manifest as yaml_manifest import e2e.util as util @@ -73,6 +73,9 @@ def delete_kind(kind, name, ns=None, ok_to_fail=False, shell=None): def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False, shell=None): + if settings.no_cleanup: + note(f"NO_CLEANUP is set, skipping delete_chi: {chi}") + return delete_kind("chi", chi, ns=ns, ok_to_fail=ok_to_fail, shell=shell) if wait: wait_objects( @@ -98,6 +101,9 @@ def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False, def delete_chk(chk, ns=None, wait=True, ok_to_fail=False, shell=None): + if settings.no_cleanup: + note(f"NO_CLEANUP is set, skipping delete_chk: {chk}") + return delete_kind("chk", chk, ns=ns, ok_to_fail=ok_to_fail, shell=shell) From 522f8ee3bc287bb262690085d1a4671bb917da99 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 14:51:04 +0500 Subject: [PATCH 185/233] test: enhanse scripts --- tests/e2e/settings.py | 2 ++ tests/e2e/steps.py | 4 ++++ tests/e2e/test_operator.py | 1 + 3 files changed, 7 insertions(+) diff --git a/tests/e2e/settings.py b/tests/e2e/settings.py index e3569edea..50b0644ae 100644 --- a/tests/e2e/settings.py +++ b/tests/e2e/settings.py @@ -80,4 +80,6 @@ def get_docker_compose_path(): minio_version = "latest" +no_cleanup = True if "NO_CLEANUP" in os.environ else False + step_by_step = True if "STEP" in os.environ else False diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py index 22881fa1d..53c7fb805 100644 --- a/tests/e2e/steps.py +++ b/tests/e2e/steps.py @@ -13,6 +13,7 @@ from testflows.asserts import error import e2e.kubectl as kubectl +import e2e.settings as settings @TestStep(Given) @@ -45,6 +46,9 @@ def create_test_namespace(self, force=False): @TestStep(Finally) def delete_test_namespace(self): + if settings.no_cleanup: + note(f"NO_CLEANUP is set, skipping namespace deletion: {self.context.test_namespace}") + return shell = get_shell() self.context.shell = shell util.delete_namespace(namespace=self.context.test_namespace, delete_chi=True) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index ec876c66a..e2192fa60 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -467,6 +467,7 @@ def test_010008_3(self): with Finally("I clean up"): delete_test_namespace() + @TestCheck def test_operator_upgrade(self, manifest, service, version_from, version_to=None, shell=None): if version_to is None: From ddda22b31ffd24e4a0024e00581083466d568246 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 14:51:32 +0500 Subject: [PATCH 186/233] test: runscript modifications for cleanup --- tests/e2e/test_common.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/e2e/test_common.sh b/tests/e2e/test_common.sh index e154e6247..6f52d2abf 100755 --- a/tests/e2e/test_common.sh +++ b/tests/e2e/test_common.sh @@ -26,6 +26,7 @@ ONLY="${ONLY:-"*"}" VERBOSITY="${VERBOSITY:-"2"}" RUN_ALL="${RUN_ALL:-""}" KUBECTL_MODE="${KUBECTL_MODE:-"apply"}" +NO_CLEANUP="${NO_CLEANUP:-""}" # Minikube control MINIKUBE_RESET="${MINIKUBE_RESET:-""}" @@ -85,6 +86,7 @@ function common_export_test_env() { export OPERATOR_NAMESPACE export OPERATOR_INSTALL export IMAGE_PULL_POLICY + export NO_CLEANUP } # Reset minikube cluster if MINIKUBE_RESET is set @@ -132,5 +134,6 @@ function common_run_test_script() { ONLY="${ONLY}" \ KUBECTL_MODE="${KUBECTL_MODE}" \ RUN_ALL="${RUN_ALL}" \ + NO_CLEANUP="${NO_CLEANUP}" \ "${COMMON_DIR}/${script}" } From a33c7be86abec9985410e0991b1f309a29647d64 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 14:52:31 +0500 Subject: [PATCH 187/233] dev: move columns --- ...-install-yaml-template-01-section-crd-01-chi-chit.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml index 8ca0c26bf..8a820aee2 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count From 9cd1ee50541d9fd81fd6c8038c41a90defd80bbc Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 14:53:01 +0500 Subject: [PATCH 188/233] dev: move columns --- ...rator-install-yaml-template-01-section-crd-03-chk.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml index ca90b8a61..6374c1426 100644 --- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml +++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml @@ -22,6 +22,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -45,10 +49,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count From 3aa838df5a7035a269c088949b634718c9f460e7 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 15:32:15 +0500 Subject: [PATCH 189/233] dev: enable CHK startup proble --- pkg/model/chk/creator/probe.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/model/chk/creator/probe.go b/pkg/model/chk/creator/probe.go index 046af8eb8..ed61f0579 100644 --- a/pkg/model/chk/creator/probe.go +++ b/pkg/model/chk/creator/probe.go @@ -34,7 +34,7 @@ func NewProbeManager() *ProbeManager { func (m *ProbeManager) CreateProbe(what interfaces.ProbeType, host *api.Host) *core.Probe { switch what { case interfaces.ProbeDefaultStartup: - return nil + return m.createDefaultLivenessProbe(host) case interfaces.ProbeDefaultLiveness: return m.createDefaultLivenessProbe(host) case interfaces.ProbeDefaultReadiness: From 5c2cf51a3ab171a3a6b0e1f2593b9671aa37cae9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 15:32:49 +0500 Subject: [PATCH 190/233] dev: iverwrite chk startup probe wait --- pkg/model/chk/normalizer/normalizer.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go index a63ebc057..02314f3b8 100644 --- a/pkg/model/chk/normalizer/normalizer.go +++ b/pkg/model/chk/normalizer/normalizer.go @@ -416,6 +416,15 @@ func (n *Normalizer) normalizeReconcileStatefulSet(sts chi.ReconcileStatefulSet) func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost { // Normalize rh = rh.Normalize(types.NewStringBool(false), true) + // Enable startup probe wait so operator waits for each pod's Keeper process + // to start (ruok/imok) before proceeding to the next host. + // This prevents simultaneous pod restarts that would cause quorum loss. + // Readiness wait stays false to avoid deadlock on fresh clusters + // where Raft quorum doesn't exist yet. + if rh.Wait.Probes == nil { + rh.Wait.Probes = &chi.ReconcileHostWaitProbes{} + } + rh.Wait.Probes.Startup = types.NewStringBool(true) return rh } From 076e9a93f92203b1b11caaccc3bef68f23f0b951 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 13 Feb 2026 17:55:01 +0500 Subject: [PATCH 191/233] test: test clarification --- tests/e2e/test_operator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index e2192fa60..4e254f48e 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5508,7 +5508,7 @@ def test_020001(self): else: kubectl.delete_chk(ch_name) - with Then("There should not objects with overallped names"): + with Then("There should not be objects with overlapped names"): overlap = list(set(objects['chi']) & set(objects['chk'])) if len(overlap)>0: print("Overlapped objects:") From 32293af555f30f2ccd0023fe822c1cf6bee09859 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 14 Feb 2026 00:21:00 +0500 Subject: [PATCH 192/233] test: cleanup clarification --- tests/e2e/kubectl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e/kubectl.py b/tests/e2e/kubectl.py index b157668ab..553c5514f 100644 --- a/tests/e2e/kubectl.py +++ b/tests/e2e/kubectl.py @@ -74,7 +74,7 @@ def delete_kind(kind, name, ns=None, ok_to_fail=False, shell=None): def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False, shell=None): if settings.no_cleanup: - note(f"NO_CLEANUP is set, skipping delete_chi: {chi}") + print(f"NO_CLEANUP is set, skipping delete_chi: {chi}") return delete_kind("chi", chi, ns=ns, ok_to_fail=ok_to_fail, shell=shell) if wait: @@ -102,7 +102,7 @@ def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False, def delete_chk(chk, ns=None, wait=True, ok_to_fail=False, shell=None): if settings.no_cleanup: - note(f"NO_CLEANUP is set, skipping delete_chk: {chk}") + print(f"NO_CLEANUP is set, skipping delete_chk: {chk}") return delete_kind("chk", chk, ns=ns, ok_to_fail=ok_to_fail, shell=shell) From 8373621241d242edeb7444f31d885546df69fda9 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 14 Feb 2026 00:21:55 +0500 Subject: [PATCH 193/233] dev: env var cleanup --- tests/e2e/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/settings.py b/tests/e2e/settings.py index 50b0644ae..c1aa19bfe 100644 --- a/tests/e2e/settings.py +++ b/tests/e2e/settings.py @@ -80,6 +80,6 @@ def get_docker_compose_path(): minio_version = "latest" -no_cleanup = True if "NO_CLEANUP" in os.environ else False +no_cleanup = os.environ.get("NO_CLEANUP", "").lower() in ("1", "true", "yes") step_by_step = True if "STEP" in os.environ else False From 76bf20f417405e7e0e695ecef6453eaeafe8b25a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 14 Feb 2026 00:23:18 +0500 Subject: [PATCH 194/233] test: typos --- tests/e2e/steps.py | 2 +- tests/e2e/test_operator.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py index 53c7fb805..703dcc9e6 100644 --- a/tests/e2e/steps.py +++ b/tests/e2e/steps.py @@ -47,7 +47,7 @@ def create_test_namespace(self, force=False): @TestStep(Finally) def delete_test_namespace(self): if settings.no_cleanup: - note(f"NO_CLEANUP is set, skipping namespace deletion: {self.context.test_namespace}") + print(f"NO_CLEANUP is set, skipping namespace deletion: {self.context.test_namespace}") return shell = get_shell() self.context.shell = shell diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 4e254f48e..08e8db943 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5464,7 +5464,7 @@ def test_020000(self): for o in chk_objects: print(o) - with And("There should be a service for cluster a cluster"): + with And("There should be a service for a cluster"): kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless = True) with And("There should be a service for first replica"): @@ -5503,18 +5503,19 @@ def test_020001(self): objects[ch_kind] = kubectl.get_obj_names_grepped("pod,service,sts,pvc,cm,pdb,secret", grep=ch_name) print(*objects[ch_kind], sep='\n') - if ch_kind == 'chi': - kubectl.delete_chi(ch_name) - else: - kubectl.delete_chk(ch_name) + with When(f"Delete {ch_kind}"): + if ch_kind == 'chi': + kubectl.delete_chi(ch_name) + else: + kubectl.delete_chk(ch_name) with Then("There should not be objects with overlapped names"): overlap = list(set(objects['chi']) & set(objects['chk'])) - if len(overlap)>0: + if len(overlap) > 0: print("Overlapped objects:") print(*overlap, sep='\n') - assert len(overlap) == 0 + assert len(overlap) == 0, f"{len(overlap)} overlapping resource(s):\n" + "\n".join(f" {o}" for o in overlap) with Finally("I clean up"): delete_test_namespace() From 0358f910ac9fa5c55811017052a46b279b7950ed Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Sat, 14 Feb 2026 00:32:16 +0500 Subject: [PATCH 195/233] test: format --- tests/e2e/test_operator.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 79085fdd6..36a2745a1 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5452,7 +5452,8 @@ def test_020000(self): with Given("Install CHK"): kubectl.apply(util.get_full_path("manifests/chk/test-020000-chk-sa.yaml")) kubectl.create_and_check( - manifest=chk_manifest, kind="chk", + manifest=chk_manifest, + kind="chk", check={ "pod_count": 1, "pdb": {"keeper": 0}, @@ -5470,13 +5471,13 @@ def test_020000(self): assert chk_pod_spec["serviceAccountName"] == "test-020000-chk-sa" with And("There should be a service for cluster a cluster"): - kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless = True) + kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless=True) with And("There should be a service for first replica"): - kubectl.check_service(f"keeper-{chk}-0", "ClusterIP", headless = True) + kubectl.check_service(f"keeper-{chk}-0", "ClusterIP", headless=True) with And("There should be a PVC"): - assert kubectl.get_count("pvc", label = f"-l clickhouse-keeper.altinity.com/chk={chk}") == 1 + assert kubectl.get_count("pvc", label=f"-l clickhouse-keeper.altinity.com/chk={chk}") == 1 kubectl.delete_chk(chk) From de4303925b3190c0bb04ff473283af63c9f88f6c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 16 Feb 2026 12:59:55 +0500 Subject: [PATCH 196/233] dev: minor --- deploy/builder/templates-config/config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml index c2770da49..740daacf9 100644 --- a/deploy/builder/templates-config/config.yaml +++ b/deploy/builder/templates-config/config.yaml @@ -210,7 +210,6 @@ clickhouse: quotas: settings: files: - - version: ">= 23.5" spec: configuration: From da96e457110ba37be52cc562979ce6decf25433a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 16 Feb 2026 13:07:05 +0500 Subject: [PATCH 197/233] dev: config --- config/config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/config/config.yaml b/config/config.yaml index ad55c58f7..07413ef8d 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -216,7 +216,6 @@ clickhouse: quotas: settings: files: - - version: ">= 23.5" spec: configuration: From 91fefa4599daa8767fec6b35ee52699b03ec3e99 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 16 Feb 2026 13:07:30 +0500 Subject: [PATCH 198/233] env: manifests --- .../clickhouse-operator-install-ansible.yaml | 25 +++++++++---------- ...house-operator-install-bundle-v1beta1.yaml | 25 +++++++++---------- .../clickhouse-operator-install-bundle.yaml | 25 +++++++++---------- ...use-operator-install-template-v1beta1.yaml | 25 +++++++++---------- .../clickhouse-operator-install-template.yaml | 25 +++++++++---------- .../clickhouse-operator-install-tf.yaml | 25 +++++++++---------- deploy/operator/parts/crd.yaml | 24 +++++++++--------- 7 files changed, 84 insertions(+), 90 deletions(-) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index bb0a1965a..0f1bda900 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -33,6 +33,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -56,10 +60,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1566,6 +1566,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -1589,10 +1593,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3664,6 +3664,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3687,10 +3691,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -5034,7 +5034,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index b577d3be0..d0aa077e2 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -23,6 +23,10 @@ spec: - chi version: v1 additionalPrinterColumns: + - name: status + type: string + description: Resource status + JSONPath: .status.status - name: version type: string description: Operator version @@ -46,10 +50,6 @@ spec: description: TaskID priority: 1 # show in wide view JSONPath: .status.taskID - - name: status - type: string - description: Resource status - JSONPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1546,6 +1546,10 @@ spec: - chit version: v1 additionalPrinterColumns: + - name: status + type: string + description: Resource status + JSONPath: .status.status - name: version type: string description: Operator version @@ -1569,10 +1573,6 @@ spec: description: TaskID priority: 1 # show in wide view JSONPath: .status.taskID - - name: status - type: string - description: Resource status - JSONPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3626,6 +3626,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3649,10 +3653,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -5233,7 +5233,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 534c8841f..e097e8984 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1559,6 +1559,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -1582,10 +1586,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3657,6 +3657,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3680,10 +3684,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -5293,7 +5293,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index 2c0302f15..db0f61c73 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -23,6 +23,10 @@ spec: - chi version: v1 additionalPrinterColumns: + - name: status + type: string + description: Resource status + JSONPath: .status.status - name: version type: string description: Operator version @@ -46,10 +50,6 @@ spec: description: TaskID priority: 1 # show in wide view JSONPath: .status.taskID - - name: status - type: string - description: Resource status - JSONPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1546,6 +1546,10 @@ spec: - chit version: v1 additionalPrinterColumns: + - name: status + type: string + description: Resource status + JSONPath: .status.status - name: version type: string description: Operator version @@ -1569,10 +1573,6 @@ spec: description: TaskID priority: 1 # show in wide view JSONPath: .status.taskID - - name: status - type: string - description: Resource status - JSONPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3626,6 +3626,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3649,10 +3653,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -4980,7 +4980,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 4ae74db95..1cf13349b 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1559,6 +1559,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -1582,10 +1586,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3657,6 +3657,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3680,10 +3684,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -5027,7 +5027,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index f02a601c7..e8b65a97d 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -33,6 +33,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -56,10 +60,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -1566,6 +1566,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -1589,10 +1593,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3664,6 +3664,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3687,10 +3691,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -5034,7 +5034,6 @@ data: quotas: settings: files: - - version: ">= 23.5" spec: configuration: diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index 6b450ae64..49b5b79de 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -3726,6 +3726,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -3749,10 +3753,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count @@ -8336,6 +8336,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -8359,10 +8363,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count From 0d708440c86eff0f780b184db4a4932a63c8a36b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Mon, 16 Feb 2026 13:07:41 +0500 Subject: [PATCH 199/233] env: helm --- ...n-clickhouseinstallations.clickhouse.altinity.com.yaml | 8 ++++---- ...ouseinstallationtemplates.clickhouse.altinity.com.yaml | 8 ++++---- ...eeperinstallations.clickhouse-keeper.altinity.com.yaml | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml index 603aabf08..03bb8e057 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml index a22c51032..4350b913e 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml @@ -26,6 +26,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -49,10 +53,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml index 498938a52..405dac0a1 100644 --- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml @@ -22,6 +22,10 @@ spec: served: true storage: true additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status - name: version type: string description: Operator version @@ -45,10 +49,6 @@ spec: description: TaskID priority: 1 # show in wide view jsonPath: .status.taskID - - name: status - type: string - description: Resource status - jsonPath: .status.status - name: hosts-completed type: integer description: Completed hosts count From 2365da04e4512b7bd073e3b098d6105917db3921 Mon Sep 17 00:00:00 2001 From: alz Date: Mon, 16 Feb 2026 17:35:35 +0300 Subject: [PATCH 200/233] Cleanup CHK version upgrade test --- .../test-020003-chi-chk-upgrade.yaml} | 5 +- .../e2e/manifests/chk/test-020003-chk-2.yaml | 20 +++++++ tests/e2e/manifests/chk/test-020003-chk.yaml | 20 +++++++ tests/e2e/test_operator.py | 53 ++++++++++++------- tests/regression.py | 3 +- 5 files changed, 76 insertions(+), 25 deletions(-) rename tests/e2e/manifests/{chi/test-049-clickhouse-keeper-upgrade.yaml => chk/test-020003-chi-chk-upgrade.yaml} (71%) create mode 100644 tests/e2e/manifests/chk/test-020003-chk-2.yaml create mode 100644 tests/e2e/manifests/chk/test-020003-chk.yaml diff --git a/tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml b/tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml similarity index 71% rename from tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml rename to tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml index 59ae8e336..e26252b3b 100644 --- a/tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml +++ b/tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml @@ -1,17 +1,16 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: test-049-clickhouse-keeper-upgrade + name: test-020003-chi-chk-upgrade spec: useTemplates: - name: clickhouse-version configuration: zookeeper: nodes: - - host: keeper-clickhouse-keeper + - host: keeper-test-020003-chk port: 2181 clusters: - name: default layout: - shardsCount: 1 replicasCount: 2 \ No newline at end of file diff --git a/tests/e2e/manifests/chk/test-020003-chk-2.yaml b/tests/e2e/manifests/chk/test-020003-chk-2.yaml new file mode 100644 index 000000000..e667080ce --- /dev/null +++ b/tests/e2e/manifests/chk/test-020003-chk-2.yaml @@ -0,0 +1,20 @@ +apiVersion: "clickhouse-keeper.altinity.com/v1" +kind: "ClickHouseKeeperInstallation" +metadata: + name: test-020003-chk +spec: + defaults: + templates: + podTemplate: default + configuration: + clusters: + - name: keeper + layout: + replicasCount: 3 + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse-keeper + image: "clickhouse/clickhouse-keeper:25.8" diff --git a/tests/e2e/manifests/chk/test-020003-chk.yaml b/tests/e2e/manifests/chk/test-020003-chk.yaml new file mode 100644 index 000000000..000ce9736 --- /dev/null +++ b/tests/e2e/manifests/chk/test-020003-chk.yaml @@ -0,0 +1,20 @@ +apiVersion: "clickhouse-keeper.altinity.com/v1" +kind: "ClickHouseKeeperInstallation" +metadata: + name: test-020003-chk +spec: + defaults: + templates: + podTemplate: default + configuration: + clusters: + - name: keeper + layout: + replicasCount: 3 + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse-keeper + image: "clickhouse/clickhouse-keeper:25.3" diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 36a2745a1..d29a91d70 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5563,44 +5563,57 @@ def test_020003(self): when clickhouse-keeper defined with ClickHouseKeeperInstallation.""" create_shell_namespace_clickhouse_template() - util.require_keeper(keeper_type="chk", - keeper_manifest="clickhouse-keeper-3-node-for-test-only.yaml") - manifest = f"manifests/chi/test-049-clickhouse-keeper-upgrade.yaml" - chi = yaml_manifest.get_name(util.get_full_path(manifest)) + + chk_manifest = f"manifests/chk/test-020003-chk.yaml" + chk_manifest_upgraded = f"manifests/chk/test-020003-chk-2.yaml" + chi_manifest = f"manifests/chk/test-020003-chi-chk-upgrade.yaml" + chi = yaml_manifest.get_name(util.get_full_path(chi_manifest)) + chk = yaml_manifest.get_name(util.get_full_path(chk_manifest)) + cluster = "default" keeper_version_from = "25.3" keeper_version_to = "25.8" - with Given("CHI with 2 replicas"): + + with Given("CHK with 3 replicas"): kubectl.create_and_check( - manifest=manifest, + manifest=chk_manifest, + kind = "chk", check={ - "pod_count": 2, + "pod_count": 3, "do_not_delete": 1, }, ) - with And("Make sure Keeper is ready"): - kubectl.wait_chk_status('clickhouse-keeper', 'Completed') + + with And("CHI with 2 replicas"): + kubectl.create_and_check( + manifest=chi_manifest, + check={ + "pod_count": 2, + "do_not_delete": 1, + }, + ) check_replication(chi, {0, 1}, 1) with When(f"I check clickhouse-keeper version is {keeper_version_from}"): assert keeper_version_from in \ - kubectl.get_field('pod', 'chk-clickhouse-keeper-test-0-0-0', '.spec.containers[0].image'), error() + kubectl.get_field('pod', 'chk-test-020003-chk-keeper-0-0-0', '.spec.containers[0].image'), error() with Then(f"I change keeper version to {keeper_version_to}"): - cmd = f"""patch chk clickhouse-keeper --type='json' --patch='[{{"op":"replace","path":"/spec/templates/podTemplates/0/spec/containers/0/image","value":"clickhouse/clickhouse-keeper:{keeper_version_to}"}}]'""" - kubectl.launch(cmd) - - with Then("I wait CHK status 1"): - kubectl.wait_chk_status('clickhouse-keeper', 'InProgress') - with Then("I wait CHK status 2"): - kubectl.wait_chk_status('clickhouse-keeper', 'Completed') + kubectl.create_and_check( + manifest=chk_manifest_upgraded, + kind = "chk", + check={ + "pod_count": 3, + "do_not_delete": 1, + }, + ) with When(f"I check clickhouse-keeper version is changed to {keeper_version_to}"): - kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-0-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5) - kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-1-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5) - kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-2-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5) + kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-0-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1) + kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-1-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1) + kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-2-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1) with Then("Wait for ClickHouse to connect to Keeper properly"): for attempt in retries(timeout=180, delay=5): diff --git a/tests/regression.py b/tests/regression.py index 6f4542b6d..496747970 100755 --- a/tests/regression.py +++ b/tests/regression.py @@ -7,8 +7,7 @@ xfails = { # test_operator.py - "/regression/e2e.test_operator/test_010021*": [(Fail, "Storage test are flaky on github")], - "/regression/e2e.test_operator/test_020003*": [(Fail, "Keeper upgrade is flaky")], + "/regression/e2e.test_operator/test_010021*": [(Fail, "Storage test is flaky on github")], "/regression/e2e.test_operator/test_020005*": [(Fail, "Keeper scale-up/scale-down is flaky")], # test_clickhouse.py "/regression/e2e.test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")], From b62d59019954402eff678372359db65765db7a5a Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 17 Feb 2026 11:12:45 +0300 Subject: [PATCH 201/233] Clarify logging --- pkg/controller/chi/worker-monitoring.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go index 50cfc1bb3..a3f2efab7 100644 --- a/pkg/controller/chi/worker-monitoring.go +++ b/pkg/controller/chi/worker-monitoring.go @@ -44,7 +44,7 @@ func (w *worker) prepareMonitoring(cr *api.ClickHouseInstallation) { WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). M(cr).F(). - Info("exclude CR from monitoring") + Info("exclude CHI from monitoring") w.c.deleteWatch(cr) } else { // CR is NOT stopped, it is running @@ -53,7 +53,7 @@ func (w *worker) prepareMonitoring(cr *api.ClickHouseInstallation) { WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). M(cr).F(). - Info("ensure CR in monitoring") + Info("ensure CHI in monitoring") if cr.HasAncestor() { // Ensure CR is watched @@ -83,6 +83,6 @@ func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) { WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress). WithAction(cr). M(cr).F(). - Info("add CR to monitoring") + Info("add CHI to monitoring") w.c.updateWatch(cr) } From b27b2b08da28f0e05b7cb483a68ff85f2291d16c Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 17 Feb 2026 11:57:54 +0300 Subject: [PATCH 202/233] Tests for FIPS images --- .../manifests/chk/test-020008-chi-fips.yaml | 24 +++++++++ .../manifests/chk/test-020008-chk-fips.yaml | 20 ++++++++ tests/e2e/test_operator.py | 50 ++++++++++++++++--- 3 files changed, 86 insertions(+), 8 deletions(-) create mode 100644 tests/e2e/manifests/chk/test-020008-chi-fips.yaml create mode 100644 tests/e2e/manifests/chk/test-020008-chk-fips.yaml diff --git a/tests/e2e/manifests/chk/test-020008-chi-fips.yaml b/tests/e2e/manifests/chk/test-020008-chi-fips.yaml new file mode 100644 index 000000000..27cc1a7b2 --- /dev/null +++ b/tests/e2e/manifests/chk/test-020008-chi-fips.yaml @@ -0,0 +1,24 @@ +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: test-020008-chi-fips +spec: + defaults: + templates: + podTemplate: fips + configuration: + zookeeper: + nodes: + - host: keeper-test-020008-chk-fips + port: 2181 + clusters: + - name: default + layout: + replicasCount: 2 + templates: + podTemplates: + - name: fips + spec: + containers: + - name: clickhouse-pod + image: altinity/clickhouse-server:24.3.5.48.altinityfips \ No newline at end of file diff --git a/tests/e2e/manifests/chk/test-020008-chk-fips.yaml b/tests/e2e/manifests/chk/test-020008-chk-fips.yaml new file mode 100644 index 000000000..7851ee61f --- /dev/null +++ b/tests/e2e/manifests/chk/test-020008-chk-fips.yaml @@ -0,0 +1,20 @@ +apiVersion: "clickhouse-keeper.altinity.com/v1" +kind: "ClickHouseKeeperInstallation" +metadata: + name: test-020008-chk-fips +spec: + defaults: + templates: + podTemplate: fips + configuration: + clusters: + - name: keeper + layout: + replicasCount: 1 + templates: + podTemplates: + - name: fips + spec: + containers: + - name: clickhouse-keeper + image: altinity/clickhouse-keeper:24.3.5.48.altinityfips diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index d29a91d70..add629ac2 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5442,7 +5442,6 @@ def test_010061(self): @TestScenario @Name("test_020000. Test Basic CHK functions") -@Tags("NO_PARALLEL") def test_020000(self): create_shell_namespace_clickhouse_template() @@ -5487,7 +5486,6 @@ def test_020000(self): @TestScenario @Name("test_020001. Test that Kubernetes objects between CHI and CHK does not overlap") -@Tags("NO_PARALLEL") def test_020001(self): create_shell_namespace_clickhouse_template() @@ -5531,7 +5529,6 @@ def test_020001(self): @Name("test_020002. Test CHI with CHK") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_ClickHouseKeeperInstallation_volumeClaimTemplates("1.0")) -@Tags("NO_PARALLEL") def test_020002(self): """Check clickhouse-operator support ClickHouseKeeperInstallation with PVC in keeper manifest.""" @@ -5907,7 +5904,6 @@ def test_020005(self): @TestScenario @Name("test_020006. Test https://github.com/Altinity/clickhouse-operator/issues/1863") -@Tags("NO_PARALLEL") def test_020006(self): create_shell_namespace_clickhouse_template() @@ -5923,15 +5919,12 @@ def test_020006(self): } ) - kubectl.delete_chk(chk) - with Finally("I clean up"): delete_test_namespace() @TestScenario @Name("test_020007. Test fractional CPU requests/limits handling for CHK") -@Tags("NO_PARALLEL") def test_020007(self): create_shell_namespace_clickhouse_template() @@ -5960,7 +5953,48 @@ def test_020007(self): kubectl.force_chk_reconcile(chk, "reconcile2") - kubectl.delete_chk(chk) + with Finally("I clean up"): + delete_test_namespace() + +@TestScenario +@Name("test_020008. Test FIPS versions are properly supported by both in CHI and CHK") +@Tags("NO_PARALLEL") +def test_020008(self): + create_shell_namespace_clickhouse_template() + + chk_manifest = f"manifests/chk/test-020008-chk-fips.yaml" + chi_manifest = f"manifests/chk/test-020008-chi-fips.yaml" + chi = yaml_manifest.get_name(util.get_full_path(chi_manifest)) + chk = yaml_manifest.get_name(util.get_full_path(chk_manifest)) + + cluster = "default" + + with Given("CHK with FIPS versions"): + kubectl.create_and_check( + manifest=chk_manifest, + kind = "chk", + check={ + "pod_count": 1, + "do_not_delete": 1, + }, + ) + + + with And("CHI with FIPS version"): + kubectl.create_and_check( + manifest=chi_manifest, + check={ + "pod_count": 2, + "do_not_delete": 1, + }, + ) + + with Then("Clickhouse version is a FIPS one"): + ver = clickhouse.query(chi, 'select version()') + print(ver) + assert "fips" in ver + + check_replication(chi, {0, 1}, 1) with Finally("I clean up"): delete_test_namespace() From 84ff5a26d1d7eb7f5805c668ae24467a7ea77125 Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 17 Feb 2026 12:37:46 +0300 Subject: [PATCH 203/233] Test for CHK stop and suspend attributes --- tests/e2e/test_operator.py | 47 +++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index add629ac2..72ad32a30 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5478,7 +5478,52 @@ def test_020000(self): with And("There should be a PVC"): assert kubectl.get_count("pvc", label=f"-l clickhouse-keeper.altinity.com/chk={chk}") == 1 - kubectl.delete_chk(chk) + with When("Stop CHK"): + cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/stop","value":"yes"}}]\'' + kubectl.launch(cmd) + kubectl.wait_chk_status(chk, "InProgress") + kubectl.wait_chk_status(chk, "Completed") + with Then("STS should be there but no running pods"): + label = f"-l clickhouse-keeper.altinity.com/chk={chk}" + assert kubectl.get_count('sts', label = label) == 1 + assert kubectl.get_count('pod', label = label) == 0 + + with When("Resume CHK"): + cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/stop","value":"no"}}]\'' + kubectl.launch(cmd) + kubectl.wait_chk_status(chk, "InProgress") + kubectl.wait_chk_status(chk, "Completed") + with Then("Both STS and Pod should be up"): + label = f"-l clickhouse-keeper.altinity.com/chk={chk}" + assert kubectl.get_count('sts', label = label) == 1 + assert kubectl.get_count('pod', label = label) == 1 + + with When("Suspend CHK"): + cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\'' + kubectl.launch(cmd) + + with Then("Stop CHK one more time"): + cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/stop","value":"yes"}}]\'' + kubectl.launch(cmd) + time.sleep(15) # wait in case there was some sync issue + kubectl.wait_chk_status(chk, "Completed") + with Then("Stop should be ignored. Both STS and Pod should be up"): + label = f"-l clickhouse-keeper.altinity.com/chk={chk}" + assert kubectl.get_count('sts', label = label) == 1 + assert kubectl.get_count('pod', label = label) == 1 + + with When("Unsuspend CHK"): + cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\'' + kubectl.launch(cmd) + + with Then("Reconcile should trigger"): + kubectl.wait_chk_status(chk, "InProgress") + kubectl.wait_chk_status(chk, "Completed") + + with Then("And CHK should be stopped"): + label = f"-l clickhouse-keeper.altinity.com/chk={chk}" + assert kubectl.get_count('sts', label = label) == 1 + assert kubectl.get_count('pod', label = label) == 0 with Finally("I clean up"): delete_test_namespace() From fd90c9f25b283ffec6b3270e5ac0c4ae6342a99a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 17 Feb 2026 18:20:42 +0500 Subject: [PATCH 204/233] dev: recreate abort issue --- .../common/statefulset/statefulset-reconciler.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go index dadfee9b8..c2e571753 100644 --- a/pkg/controller/common/statefulset/statefulset-reconciler.go +++ b/pkg/controller/common/statefulset/statefulset-reconciler.go @@ -167,17 +167,15 @@ func (r *Reconciler) ReconcileStatefulSet( switch { case opts.ForceRecreate(): // Force recreate prevails over all other requests - _ = r.recreateStatefulSet(ctx, host, register, opts) + err = r.recreateStatefulSet(ctx, host, register, opts) + case apiErrors.IsNotFound(err): + // StatefulSet not found in k8s — create it + err = r.createStatefulSet(ctx, host, register, opts) default: - // We have (or had in the past) StatefulSet - try to update|recreate it + // We have StatefulSet - try to update|recreate it err = r.updateStatefulSet(ctx, host, register, opts) } - if apiErrors.IsNotFound(err) { - // StatefulSet not found - even during Update process - try to create it - err = r.createStatefulSet(ctx, host, register, opts) - } - // Host has to know current StatefulSet and Pod host.Runtime.CurStatefulSet, _ = r.sts.Get(ctx, newStatefulSet) From adcf5498fc056b22c7ef9235653a9e2b35d4ed31 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Tue, 17 Feb 2026 20:18:21 +0500 Subject: [PATCH 205/233] dev: inherit reconcile from CHI level --- pkg/model/chi/normalizer/normalizer.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go index 3afea5f17..37e3bd25b 100644 --- a/pkg/model/chi/normalizer/normalizer.go +++ b/pkg/model/chi/normalizer/normalizer.go @@ -1038,6 +1038,13 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) *chi.ClusterReconcile { reconcile = reconcile.Ensure() + // Inherit from CHI-level reconcile settings (fill empty values only) + if chiReconcile := n.req.GetTarget().GetSpecT().Reconcile; chiReconcile != nil { + reconcile.Runtime = reconcile.Runtime.MergeFrom(chiReconcile.Runtime, chi.MergeTypeFillEmptyValues) + reconcile.StatefulSet = reconcile.StatefulSet.MergeFrom(chiReconcile.StatefulSet) + reconcile.Host = reconcile.Host.MergeFrom(chiReconcile.Host) + } + reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime) reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet) reconcile.Host = n.normalizeReconcileHost(reconcile.Host) From 9902f95e4fdcc1f83e509b915e33381ccef9088f Mon Sep 17 00:00:00 2001 From: Nathaniel Caza Date: Tue, 17 Feb 2026 09:22:36 -0600 Subject: [PATCH 206/233] sort keys in Settings.Keys() method for consistent order (fix manifest reconcile issue) (#1900) Signed-off-by: Nathaniel Caza --- pkg/apis/clickhouse.altinity.com/v1/type_settings.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go index 0a3addd51..d30587887 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go @@ -300,11 +300,15 @@ func (s *Settings) SetScalarsFromMap(m map[string]string) *Settings { return s } -// Keys gets keys of the settings +// Keys gets keys of the settings in alphabetical order func (s *Settings) Keys() (keys []string) { s.WalkKeys(func(key string, setting *Setting) { keys = append(keys, key) }) + + // Sort keys to ensure deterministic ordering for Kubernetes manifest stability. + // Consistent ordering prevents unnecessary resource updates during reconciliation. + sort.Strings(keys) return keys } From 1c04aa4f9c1acd5f3f0722d844d1d391dc42a253 Mon Sep 17 00:00:00 2001 From: alz Date: Tue, 17 Feb 2026 20:58:16 +0300 Subject: [PATCH 207/233] Test for .spec.reconcile.statefulSet.recreate.onUpdateFailure --- tests/e2e/manifests/chi/test-042-abort-1.yaml | 22 ++++++ tests/e2e/manifests/chi/test-042-abort-2.yaml | 22 ++++++ tests/e2e/manifests/chi/test-042-abort-3.yaml | 22 ++++++ tests/e2e/test_operator.py | 70 +++++++++++++++++++ 4 files changed, 136 insertions(+) create mode 100644 tests/e2e/manifests/chi/test-042-abort-1.yaml create mode 100644 tests/e2e/manifests/chi/test-042-abort-2.yaml create mode 100644 tests/e2e/manifests/chi/test-042-abort-3.yaml diff --git a/tests/e2e/manifests/chi/test-042-abort-1.yaml b/tests/e2e/manifests/chi/test-042-abort-1.yaml new file mode 100644 index 000000000..ec67dfec3 --- /dev/null +++ b/tests/e2e/manifests/chi/test-042-abort-1.yaml @@ -0,0 +1,22 @@ +apiVersion: clickhouse.altinity.com/v1 +kind: ClickHouseInstallation +metadata: + name: test-010042-2 +spec: + reconcile: + statefulSet: + recreate: + onUpdateFailure: abort + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse-pod + image: clickhouse/clickhouse-server:24.8 + defaults: + templates: + podTemplate: default + configuration: + clusters: + - name: default \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-042-abort-2.yaml b/tests/e2e/manifests/chi/test-042-abort-2.yaml new file mode 100644 index 000000000..9e1fe5b06 --- /dev/null +++ b/tests/e2e/manifests/chi/test-042-abort-2.yaml @@ -0,0 +1,22 @@ +apiVersion: clickhouse.altinity.com/v1 +kind: ClickHouseInstallation +metadata: + name: test-010042-2 +spec: + reconcile: + statefulSet: + recreate: + onUpdateFailure: abort + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse-pod + image: clickhouse/clickhouse-server:25.3 + defaults: + templates: + podTemplate: default + configuration: + clusters: + - name: default \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-042-abort-3.yaml b/tests/e2e/manifests/chi/test-042-abort-3.yaml new file mode 100644 index 000000000..c9c1adce2 --- /dev/null +++ b/tests/e2e/manifests/chi/test-042-abort-3.yaml @@ -0,0 +1,22 @@ +apiVersion: clickhouse.altinity.com/v1 +kind: ClickHouseInstallation +metadata: + name: test-010042-2 +spec: + reconcile: + statefulSet: + recreate: + onUpdateFailure: recreate + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse-pod + image: clickhouse/clickhouse-server:25.3 + defaults: + templates: + podTemplate: default + configuration: + clusters: + - name: default \ No newline at end of file diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 72ad32a30..291b19b7a 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -4430,6 +4430,76 @@ def test_010042(self): with Finally("I clean up"): delete_test_namespace() +@TestScenario +@Name("test_010042_2. Test aborting changes that may recreate STS") +def test_010042_2(self): + create_shell_namespace_clickhouse_template() + + cluster = "default" + manifest = f"manifests/chi/test-042-abort-1.yaml" + chi = yaml_manifest.get_name(util.get_full_path(manifest)) + + with Given("CHI is created"): + kubectl.create_and_check( + manifest = "manifests/chi/test-042-abort-1.yaml", + check={ + "pod_count": 1, + "do_not_delete": 1, + }, + ) + + version_from = "24.8" + version_to = "25.3" + with Then("CHI version is " + version_from): + ver = clickhouse.query(chi, "select version()") + assert version_from in ver + + with When("OnUpdateFailaure is aborted"): + onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") + if onUpdateFailure != 'abort': + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"abort"}}]\'' + kubectl.launch(cmd) + kubectl.wait_chi_status(chi, "InProgress") + kubectl.wait_chi_status(chi, "Completed") + + with Then("Upgrade podTemplate to a different verison should be aborted"): + kubectl.create_and_check( + manifest = "manifests/chi/test-042-abort-2.yaml", + check={ + "pod_count": 1, + "do_not_delete": 1, + "chi_status": "Aborted" + }, + ) + + with And("CHI version is unchanged " + version_from): + ver = clickhouse.query(chi, "select version()") + assert version_from in ver + + with When("OnUpdateFailaure is recreate"): + onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") + if onUpdateFailure != 'recreate': + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"recreate"}}]\'' + kubectl.launch(cmd) + kubectl.wait_chi_status(chi, "InProgress") + kubectl.wait_chi_status(chi, "Completed") + + with Then("Upgrade podTemplate to a different verison should be successful"): + kubectl.create_and_check( + manifest = "manifests/chi/test-042-abort-3.yaml", + check={ + "pod_count": 1, + "do_not_delete": 1 + }, + ) + + with And("CHI version is changed " + version_to): + ver = clickhouse.query(chi, "select version()") + assert version_to in ver + + with Finally("I clean up"): + delete_test_namespace() + @TestCheck @Name("test_043. Logs container customizing") From 8bc8bc0d8cfbfb10299037fb7375d28a17089e7c Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 01:42:30 +0500 Subject: [PATCH 208/233] test: make test 42-2 to be unable to update sts --- tests/e2e/manifests/chi/test-042-abort-1.yaml | 11 ++++++++++- tests/e2e/manifests/chi/test-042-abort-2.yaml | 11 ++++++++++- tests/e2e/manifests/chi/test-042-abort-3.yaml | 11 ++++++++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/tests/e2e/manifests/chi/test-042-abort-1.yaml b/tests/e2e/manifests/chi/test-042-abort-1.yaml index ec67dfec3..780b12d30 100644 --- a/tests/e2e/manifests/chi/test-042-abort-1.yaml +++ b/tests/e2e/manifests/chi/test-042-abort-1.yaml @@ -14,9 +14,18 @@ spec: containers: - name: clickhouse-pod image: clickhouse/clickhouse-server:24.8 + volumeClaimTemplates: + - name: data-volume-claim + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi defaults: templates: podTemplate: default + dataVolumeClaimTemplate: data-volume-claim configuration: clusters: - - name: default \ No newline at end of file + - name: default diff --git a/tests/e2e/manifests/chi/test-042-abort-2.yaml b/tests/e2e/manifests/chi/test-042-abort-2.yaml index 9e1fe5b06..4864040ed 100644 --- a/tests/e2e/manifests/chi/test-042-abort-2.yaml +++ b/tests/e2e/manifests/chi/test-042-abort-2.yaml @@ -14,9 +14,18 @@ spec: containers: - name: clickhouse-pod image: clickhouse/clickhouse-server:25.3 + volumeClaimTemplates: + - name: data-volume-claim + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 200Mi defaults: templates: podTemplate: default + dataVolumeClaimTemplate: data-volume-claim configuration: clusters: - - name: default \ No newline at end of file + - name: default diff --git a/tests/e2e/manifests/chi/test-042-abort-3.yaml b/tests/e2e/manifests/chi/test-042-abort-3.yaml index c9c1adce2..d31740f94 100644 --- a/tests/e2e/manifests/chi/test-042-abort-3.yaml +++ b/tests/e2e/manifests/chi/test-042-abort-3.yaml @@ -14,9 +14,18 @@ spec: containers: - name: clickhouse-pod image: clickhouse/clickhouse-server:25.3 + volumeClaimTemplates: + - name: data-volume-claim + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 200Mi defaults: templates: podTemplate: default + dataVolumeClaimTemplate: data-volume-claim configuration: clusters: - - name: default \ No newline at end of file + - name: default From 69448625fc66f7c005dbd237650025b64b7f3f72 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 01:42:48 +0500 Subject: [PATCH 209/233] test: fix typos --- tests/e2e/test_operator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 291b19b7a..b1ff16244 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -4454,7 +4454,7 @@ def test_010042_2(self): ver = clickhouse.query(chi, "select version()") assert version_from in ver - with When("OnUpdateFailaure is aborted"): + with When("OnUpdateFailure is aborted"): onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") if onUpdateFailure != 'abort': cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"abort"}}]\'' @@ -4462,7 +4462,7 @@ def test_010042_2(self): kubectl.wait_chi_status(chi, "InProgress") kubectl.wait_chi_status(chi, "Completed") - with Then("Upgrade podTemplate to a different verison should be aborted"): + with Then("Upgrade podTemplate to a different version should be aborted"): kubectl.create_and_check( manifest = "manifests/chi/test-042-abort-2.yaml", check={ @@ -4476,7 +4476,7 @@ def test_010042_2(self): ver = clickhouse.query(chi, "select version()") assert version_from in ver - with When("OnUpdateFailaure is recreate"): + with When("OnUpdateFailure is recreate"): onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") if onUpdateFailure != 'recreate': cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"recreate"}}]\'' @@ -4484,7 +4484,7 @@ def test_010042_2(self): kubectl.wait_chi_status(chi, "InProgress") kubectl.wait_chi_status(chi, "Completed") - with Then("Upgrade podTemplate to a different verison should be successful"): + with Then("Upgrade podTemplate to a different version should be successful"): kubectl.create_and_check( manifest = "manifests/chi/test-042-abort-3.yaml", check={ From 304a99a2df5e3d0f7e051b10f7c9959dc20cefd3 Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 18 Feb 2026 09:08:38 +0300 Subject: [PATCH 210/233] Improved test_042_2 on OnUpdateFailure logic --- tests/e2e/manifests/chi/test-042-abort-2.yaml | 2 +- tests/e2e/manifests/chi/test-042-abort-3.yaml | 4 +- tests/e2e/test_operator.py | 47 ++++++++++++------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/tests/e2e/manifests/chi/test-042-abort-2.yaml b/tests/e2e/manifests/chi/test-042-abort-2.yaml index 4864040ed..fec9dbee7 100644 --- a/tests/e2e/manifests/chi/test-042-abort-2.yaml +++ b/tests/e2e/manifests/chi/test-042-abort-2.yaml @@ -21,7 +21,7 @@ spec: - ReadWriteOnce resources: requests: - storage: 200Mi + storage: 100Mi defaults: templates: podTemplate: default diff --git a/tests/e2e/manifests/chi/test-042-abort-3.yaml b/tests/e2e/manifests/chi/test-042-abort-3.yaml index d31740f94..9352411c8 100644 --- a/tests/e2e/manifests/chi/test-042-abort-3.yaml +++ b/tests/e2e/manifests/chi/test-042-abort-3.yaml @@ -6,14 +6,14 @@ spec: reconcile: statefulSet: recreate: - onUpdateFailure: recreate + onUpdateFailure: abort templates: podTemplates: - name: default spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:25.3 + image: clickhouse/clickhouse-server:25.8 volumeClaimTemplates: - name: data-volume-claim spec: diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index b1ff16244..92be8b85b 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -4448,11 +4448,13 @@ def test_010042_2(self): }, ) - version_from = "24.8" - version_to = "25.3" - with Then("CHI version is " + version_from): + version_1 = "24.8" + version_2 = "25.3" + version_3 = "25.8" + + with Then("CHI version is " + version_1): ver = clickhouse.query(chi, "select version()") - assert version_from in ver + assert version_1 in ver with When("OnUpdateFailure is aborted"): onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") @@ -4462,40 +4464,53 @@ def test_010042_2(self): kubectl.wait_chi_status(chi, "InProgress") kubectl.wait_chi_status(chi, "Completed") - with Then("Upgrade podTemplate to a different version should be aborted"): + with Then("Upgrade podTemplate.image to a different version should be allowed"): kubectl.create_and_check( manifest = "manifests/chi/test-042-abort-2.yaml", check={ "pod_count": 1, - "do_not_delete": 1, - "chi_status": "Aborted" + "do_not_delete": 1 }, ) - with And("CHI version is unchanged " + version_from): + with And("CHI version is nchanged to " + version_2): ver = clickhouse.query(chi, "select version()") - assert version_from in ver + assert version_2 in ver - with When("OnUpdateFailure is recreate"): + with When("OnUpdateFailure is aborted"): onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") - if onUpdateFailure != 'recreate': - cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"recreate"}}]\'' + if onUpdateFailure != 'abort': + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"abort"}}]\'' kubectl.launch(cmd) kubectl.wait_chi_status(chi, "InProgress") kubectl.wait_chi_status(chi, "Completed") - with Then("Upgrade podTemplate to a different version should be successful"): + with Then("Upgrade podTemplate.volumeClaimTemplate should fail"): kubectl.create_and_check( manifest = "manifests/chi/test-042-abort-3.yaml", check={ "pod_count": 1, - "do_not_delete": 1 + "do_not_delete": 1, + "chi_status": "Aborted" }, ) - with And("CHI version is changed " + version_to): + with And("CHI version is unchanged " + version_2): ver = clickhouse.query(chi, "select version()") - assert version_to in ver + assert version_2 in ver + + with When("OnUpdateFailure is changed to recreate"): + onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure") + if onUpdateFailure != 'recreate': + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"recreate"}}]\'' + kubectl.launch(cmd) + kubectl.wait_chi_status(chi, "InProgress") + kubectl.wait_chi_status(chi, "Completed") + + with Then("CHI reconcile should proceed, and CHI version is unchanged " + version_3): + ver = clickhouse.query(chi, "select version()") + assert version_3 in ver + with Finally("I clean up"): delete_test_namespace() From 5da273e97a9519203700572378b01288498b880a Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 18 Feb 2026 10:02:12 +0300 Subject: [PATCH 211/233] Test for suspend attribute to interrupt existsing reconcile --- tests/e2e/test_operator.py | 51 ++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 92be8b85b..d90fb38d6 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5063,34 +5063,65 @@ def test_010054(self): }, ) - with Then("Add suspend attribute to CHI"): + with When("Add suspend attribute to CHI"): cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\'' kubectl.launch(cmd) - with Then(f"Update podTemplate to {new_version} and confirm that pod image is NOT updated"): + with Then(f"Update podTemplate to {new_version} and confirm that pod image is NOT updated"): + kubectl.create_and_check( + manifest="manifests/chi/test-006-ch-upgrade-2.yaml", + check={ + "pod_count": 1, + "pod_image": old_version, + "do_not_delete": 1, + }, + ) + + with When("Remove suspend attribute from CHI"): + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\'' + kubectl.launch(cmd) + + kubectl.wait_chi_status(chi, "InProgress") + kubectl.wait_chi_status(chi, "Completed") + + with Then(f"Confirm that pod image is updated to {new_version}"): + kubectl.check_pod_image(chi, new_version) + + with When(f"Update podTemplate to {old_version} back but do not wait for completion"): kubectl.create_and_check( - manifest="manifests/chi/test-006-ch-upgrade-2.yaml", + manifest="manifests/chi/test-006-ch-upgrade-1.yaml", check={ - "pod_count": 1, - "pod_image": old_version, + "chi_status": "InProgress", "do_not_delete": 1, }, ) - with Then("Remove suspend attribute from CHI"): + with And("Add suspend attribute to CHI"): + cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\'' + kubectl.launch(cmd) + + with Then(f"Reconcile should be interrupted and pod image should remain at {new_version}"): + # kubectl.wait_chi_status(chi, "Aborted", retries=5) + time.sleep(60) + + kubectl.check_pod_image(chi, new_version) + + with When("Remove suspend attribute from CHI"): cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\'' kubectl.launch(cmd) - kubectl.wait_chi_status(chi, "InProgress") - kubectl.wait_chi_status(chi, "Completed") + with Then("Reconcile should be resumed"): + kubectl.wait_chi_status(chi, "InProgress") + kubectl.wait_chi_status(chi, "Completed") - with Then(f"Confirm that pod image is updated to {new_version}"): - kubectl.check_pod_image(chi, new_version) + with And(f"Pod image should be reverted back to {old_version}"): + kubectl.check_pod_image(chi, old_version) with Finally("I clean up"): delete_test_namespace() + @TestScenario @Name("test_010055. Test that restart rules can be merged from CHOP configuration") def test_010055(self): From 6d625de691f5d6ae9eab20672cad8780c9fb24ae Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 13:58:13 +0500 Subject: [PATCH 212/233] dev: rework k8s DNS --- deploy/builder/templates-config/config.yaml | 2 +- .../clickhouse.altinity.com/v1/type_configuration_chop.go | 2 +- pkg/model/chi/namer/patterns.go | 5 +++-- pkg/model/chk/namer/patterns.go | 5 +++-- tests/e2e/test_metrics_exporter.py | 4 ++-- tests/e2e/test_operator.py | 6 +++--- tests/e2e/util.py | 3 ++- 7 files changed, 15 insertions(+), 12 deletions(-) diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml index 740daacf9..0226d778b 100644 --- a/deploy/builder/templates-config/config.yaml +++ b/deploy/builder/templates-config/config.yaml @@ -169,7 +169,7 @@ clickhouse: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go index 2a58578c8..82e685e4b 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go @@ -72,7 +72,7 @@ const ( // Timeouts used to limit connection and queries from the operator to ClickHouse instances. In seconds // defaultTimeoutConnect specifies default timeout to connect to the ClickHouse instance. In seconds - defaultTimeoutConnect = 2 + defaultTimeoutConnect = 5 // defaultTimeoutQuery specifies default timeout to query the CLickHouse instance. In seconds defaultTimeoutQuery = 5 // defaultTimeoutCollect specifies default timeout to collect metrics from the ClickHouse instance. In seconds diff --git a/pkg/model/chi/namer/patterns.go b/pkg/model/chi/namer/patterns.go index 621398ff4..c9a74d5a3 100644 --- a/pkg/model/chi/namer/patterns.go +++ b/pkg/model/chi/namer/patterns.go @@ -62,8 +62,9 @@ const ( const ( // patternNamespaceDomain presents Domain Name pattern of a namespace // In this pattern "%s" is substituted namespace name's value - // Ex.: my-dev-namespace.svc.cluster.local - patternNamespaceDomain = "%s.svc.cluster.local" + // Trailing dot forces absolute DNS lookup, avoiding slow search-suffix resolution with ndots:5 + // Ex.: my-dev-namespace.svc.cluster.local. + patternNamespaceDomain = "%s.svc.cluster.local." // ServiceName.domain.name patternServiceFQDN = "%s" + "." + patternNamespaceDomain diff --git a/pkg/model/chk/namer/patterns.go b/pkg/model/chk/namer/patterns.go index dc2bf682a..701e74864 100644 --- a/pkg/model/chk/namer/patterns.go +++ b/pkg/model/chk/namer/patterns.go @@ -62,8 +62,9 @@ const ( const ( // patternNamespaceDomain presents Domain Name pattern of a namespace // In this pattern "%s" is substituted namespace name's value - // Ex.: my-dev-namespace.svc.cluster.local - patternNamespaceDomain = "%s.svc.cluster.local" + // Trailing dot forces absolute DNS lookup, avoiding slow search-suffix resolution with ndots:5 + // Ex.: my-dev-namespace.svc.cluster.local. + patternNamespaceDomain = "%s.svc.cluster.local." // ServiceName.domain.name patternServiceFQDN = "%s" + "." + patternNamespaceDomain diff --git a/tests/e2e/test_metrics_exporter.py b/tests/e2e/test_metrics_exporter.py index 923abd08a..9b60d6ec7 100644 --- a/tests/e2e/test_metrics_exporter.py +++ b/tests/e2e/test_metrics_exporter.py @@ -108,13 +108,13 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma "hosts": [ { "name": "0-0", - "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local", + "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local.", "tcpPort": 9000, "httpPort": 8123 }, { "name": "1-0", - "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local", + "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local.", "tcpPort": 9000, "httpPort": 8123 } diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index b1ff16244..a19cd6e9d 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -2930,7 +2930,7 @@ def test_010025(self): cnt_local = clickhouse.query_with_error( chi, "SELECT count() FROM test_local_025", - "chi-test-025-rescaling-default-0-1.test.svc.cluster.local", + "chi-test-025-rescaling-default-0-1.test.svc.cluster.local.", ) cnt_lb = clickhouse.query_with_error(chi, "SELECT count() FROM test_local_025") cnt_distr_lb = clickhouse.query_with_error(chi, "SELECT count() FROM test_distr_025") @@ -4928,8 +4928,8 @@ def test_labels(chi, type, key, value): out = kubectl.launch("get pods -l app=clickhouse-operator", ns=operator_namespace).splitlines()[1] operator_pod = re.split(r"[\t\r\n\s]+", out)[0] - # chi_clickhouse_metric_VersionInteger{chi="test-050",exclude_this_annotation="test-050-annotation",hostname="chi-test-050-default-0-0.test-050-e1884706-9a94-11ef-a786-367ddacfe5fd.svc.cluster.local",include_this_annotation="test-050-annotation",include_this_label="test-050-label",namespace="test-050-e1884706-9a94-11ef-a786-367ddacfe5fd"} - expect_labels = f"chi=\"test-050\",hostname=\"chi-test-050-default-0-0.{operator_namespace}.svc.cluster.local\",include_this_annotation=\"test-050-annotation\",include_this_label=\"test-050-label\"" + # chi_clickhouse_metric_VersionInteger{chi="test-050",exclude_this_annotation="test-050-annotation",hostname="chi-test-050-default-0-0.test-050-e1884706-9a94-11ef-a786-367ddacfe5fd.svc.cluster.local.",include_this_annotation="test-050-annotation",include_this_label="test-050-label",namespace="test-050-e1884706-9a94-11ef-a786-367ddacfe5fd"} + expect_labels = f"chi=\"test-050\",hostname=\"chi-test-050-default-0-0.{operator_namespace}.svc.cluster.local.\",include_this_annotation=\"test-050-annotation\",include_this_label=\"test-050-label\"" check_metrics_monitoring( operator_namespace=operator_namespace, operator_pod=operator_pod, diff --git a/tests/e2e/util.py b/tests/e2e/util.py index 8b4142cdb..76685cb9e 100644 --- a/tests/e2e/util.py +++ b/tests/e2e/util.py @@ -146,7 +146,8 @@ def wait_clickhouse_cluster_ready(chi): pod=pod, ) for host in chi["status"]["fqdns"]: - svc_short_name = host.replace(f".{current().context.test_namespace}.svc.cluster.local", "") + svc_short_name = host.replace(f".{current().context.test_namespace}.svc.cluster.local.", "") + svc_short_name = svc_short_name.replace(f".{current().context.test_namespace}.svc.cluster.local", "") if svc_short_name not in cluster_response: with Then("Not ready, sleep 5 seconds"): all_pods_ready = False From 538533c6a6c0dbd346fd789f028aa84ab6c3fa5e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 13:58:28 +0500 Subject: [PATCH 213/233] dev: config --- config/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.yaml b/config/config.yaml index 07413ef8d..f7f6c1024 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -175,7 +175,7 @@ clickhouse: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 From eeed5198c316bf4c69706393c4f8c4af76c5766b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 13:58:43 +0500 Subject: [PATCH 214/233] env: manifests --- deploy/operator/clickhouse-operator-install-ansible.yaml | 2 +- deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml | 2 +- deploy/operator/clickhouse-operator-install-bundle.yaml | 2 +- .../operator/clickhouse-operator-install-template-v1beta1.yaml | 2 +- deploy/operator/clickhouse-operator-install-template.yaml | 2 +- deploy/operator/clickhouse-operator-install-tf.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index 0f1bda900..50ee7b39c 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -4993,7 +4993,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index d0aa077e2..7301128d1 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -5192,7 +5192,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index e097e8984..8f2f01468 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -5252,7 +5252,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index db0f61c73..5684aed86 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -4939,7 +4939,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 1cf13349b..85d6a3834 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -4986,7 +4986,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index e8b65a97d..34328b53b 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -4993,7 +4993,7 @@ data: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 From 17aa0026cb6fb9ed4033447a0b5381433cf7f739 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 13:58:57 +0500 Subject: [PATCH 215/233] env: helm --- deploy/helm/clickhouse-operator/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 9b438dc4d..3dcf4444c 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -404,7 +404,7 @@ configs: # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. - connect: 1 + connect: 5 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 ################################################ From abba0cf7f7d7fe3b54c1facd21256c1c960bae38 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 14:37:56 +0500 Subject: [PATCH 216/233] dev: aborting --- pkg/controller/chi/worker-reconciler-chi.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 8206a8022..7093b4a88 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -67,7 +67,25 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal case new.Spec.Suspend.Value(): // if CR is suspended, should skip reconciliation w.a.M(new).F().Info("Suspended CR") - metrics.CRReconcilesCompleted(ctx, new) + if new.EnsureStatus().GetStatus() == api.StatusInProgress { + // CR was in the middle of reconcile when suspended — mark as Aborted + new.EnsureStatus().ReconcileAbort() + _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{ + CopyStatusOptions: types.CopyStatusOptions{ + CopyStatusFieldGroup: types.CopyStatusFieldGroup{ + FieldGroupMain: true, + }, + }, + }) + w.a.V(1). + WithEvent(new, a.EventActionReconcile, a.EventReasonReconcileFailed). + WithAction(new). + M(new).F(). + Warning("reconcile aborted due to suspend") + metrics.CRReconcilesAborted(ctx, new) + } else { + metrics.CRReconcilesCompleted(ctx, new) + } return nil case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") From 8d94239283fc83be5ae974b1b2db30748d2ba69b Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Wed, 18 Feb 2026 14:38:06 +0500 Subject: [PATCH 217/233] dev: sync chk --- pkg/controller/chk/worker-reconciler-chk.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index 72c71904a..6b73ef810 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -67,7 +67,25 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee case new.Spec.Suspend.Value(): // if CR is suspended, should skip reconciliation w.a.M(new).F().Info("Suspended CR") - metrics.CRReconcilesCompleted(ctx, new) + if new.EnsureStatus().GetStatus() == api.StatusInProgress { + // CR was in the middle of reconcile when suspended — mark as Aborted + new.EnsureStatus().ReconcileAbort() + _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{ + CopyStatusOptions: types.CopyStatusOptions{ + CopyStatusFieldGroup: types.CopyStatusFieldGroup{ + FieldGroupMain: true, + }, + }, + }) + w.a.V(1). + WithEvent(new, a.EventActionReconcile, a.EventReasonReconcileFailed). + WithAction(new). + M(new).F(). + Warning("reconcile aborted due to suspend") + metrics.CRReconcilesAborted(ctx, new) + } else { + metrics.CRReconcilesCompleted(ctx, new) + } return nil case new.EnsureRuntime().ActionPlan.HasActionsToDo(): w.a.M(new).F().Info("ActionPlan has actions - continue reconcile") From d3e56f42078c07a110c51f034a11a8e106de8330 Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 18 Feb 2026 22:20:50 +0300 Subject: [PATCH 218/233] Test improvements --- tests/e2e/kubectl.py | 6 ++++ tests/e2e/steps.py | 2 +- tests/e2e/test_operator.py | 57 +++++++++++++++++++++----------------- 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/tests/e2e/kubectl.py b/tests/e2e/kubectl.py index 553c5514f..4cde2cd89 100644 --- a/tests/e2e/kubectl.py +++ b/tests/e2e/kubectl.py @@ -589,6 +589,12 @@ def get_pod_ports(chi_name, pod_name="", ns=None, shell=None): ports.append(p["containerPort"]) return ports +def get_operator_pod(ns=None, shell=None): + out = launch(f"get pod -l app=clickhouse-operator -o=custom-columns=field:.metadata.name", ns=ns, ok_to_fail=True, shell=shell).splitlines() + if len(out) > 1: + return out[1] + else: + return "" def check_pod_ports(chi_name, ports, ns=None, shell=None): pod_ports = get_pod_ports(chi_name, ns=ns, shell=shell) diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py index 703dcc9e6..7e4ea4ed0 100644 --- a/tests/e2e/steps.py +++ b/tests/e2e/steps.py @@ -171,7 +171,7 @@ def check_metrics_monitoring( ns=operator_namespace, ) if expect_metric != "": - lines = [m for m in out.splitlines() if m.startswith(expect_metric)] + lines = [m for m in out.splitlines() if m.startswith(expect_metric) and expect_labels in m] if len(lines) > 0: metric = lines[0] print(f"have: {metric}") diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 37218485c..a7cc71c10 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -3620,11 +3620,9 @@ def test_010034(self): ) with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [1]"): - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] check_metrics_monitoring( operator_namespace=operator_namespace, - operator_pod=operator_pod, + operator_pod=kubectl.get_operator_pod(), expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$", ) @@ -3635,13 +3633,10 @@ def test_010034(self): util.restart_operator() kubectl.wait_chi_status(chi, "Completed") - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] - with Then("check for `chi_clickhouse_metric_fetch_errors` is not zero"): check_metrics_monitoring( operator_namespace=operator_namespace, - operator_pod=operator_pod, + operator_pod=kubectl.get_operator_pod(), expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 1$", ) @@ -3650,13 +3645,11 @@ def test_010034(self): with And("Re-create operator pod in order to restart metrics exporter to update the configuration [2]"): util.restart_operator() - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [2]"): check_metrics_monitoring( operator_namespace=operator_namespace, - operator_pod=operator_pod, + operator_pod=kubectl.get_operator_pod(), expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$", ) @@ -3712,13 +3705,11 @@ def test_010034(self): with And("Re-create operator pod in order to restart metrics exporter to update the configuration [3]"): util.restart_operator() - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [3]"): check_metrics_monitoring( operator_namespace=operator_namespace, - operator_pod=operator_pod, + operator_pod=kubectl.get_operator_pod(), expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$", ) @@ -3727,14 +3718,12 @@ def test_010034(self): with And("Re-create operator pod in order to restart metrics exporter to update the configuration [4]"): util.restart_operator() - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] # 0.21.2+ with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [4]"): check_metrics_monitoring( operator_namespace=operator_namespace, - operator_pod=operator_pod, + operator_pod=kubectl.get_operator_pod(), expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$", ) @@ -4719,8 +4708,7 @@ def test_010046(self): manifest = f"manifests/chi/test-046-0-clickhouse-operator-metrics.yaml" chi = yaml_manifest.get_name(util.get_full_path(manifest)) operator_namespace = current().context.operator_namespace - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] + operator_pod = kubectl.get_operator_pod() with Given("CHI with 1 replica is installed"): kubectl.create_and_check( @@ -4940,8 +4928,7 @@ def test_labels(chi, type, key, value): with Then("Check that exposed metrics do not have labels and annotations that are excluded"): operator_namespace = current().context.operator_namespace - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] + operator_pod = kubectl.get_operator_pod() # chi_clickhouse_metric_VersionInteger{chi="test-050",exclude_this_annotation="test-050-annotation",hostname="chi-test-050-default-0-0.test-050-e1884706-9a94-11ef-a786-367ddacfe5fd.svc.cluster.local.",include_this_annotation="test-050-annotation",include_this_label="test-050-label",namespace="test-050-e1884706-9a94-11ef-a786-367ddacfe5fd"} expect_labels = f"chi=\"test-050\",hostname=\"chi-test-050-default-0-0.{operator_namespace}.svc.cluster.local.\",include_this_annotation=\"test-050-annotation\",include_this_label=\"test-050-label\"" @@ -5257,16 +5244,35 @@ def test_010056(self): assert out != "0" with And("Replica still should be unready after reconcile timeout"): - pod = kubectl.get("pod", f"chi-{chi}-{cluster}-0-1-0") - ready = pod["metadata"]["labels"]["clickhouse.altinity.com/ready"] + ready = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-1-0", ".metadata.labels.clickhouse\.altinity\.com\/ready") print(f"ready label={ready}") assert ready != "yes", error("Replica should be unready") + with And("Replica should be included in the monitoring"): # as of 0.26.0 + operator_namespace=current().context.operator_namespace + check_metrics_monitoring( + operator_namespace = current().context.operator_namespace, + operator_pod = kubectl.get_operator_pod(), + expect_metric = "chi_clickhouse_metric_VersionInteger", + expect_labels = f"chi-{chi}-{cluster}-0-1" + ) + with And("Replica should report a replication queue"): # as of 0.26.0 + operator_namespace=current().context.operator_namespace + check_metrics_monitoring( + operator_namespace = current().context.operator_namespace, + operator_pod = kubectl.get_operator_pod(), + expect_metric = "chi_clickhouse_metric_ReplicasSumQueueSize", + expect_labels = f"chi-{chi}-{cluster}-0-1" + ) + with When("START REPLICATED SENDS"): clickhouse.query(chi, "SYSTEM START REPLICATED SENDS", host=f"chi-{chi}-{cluster}-0-0-0") - time.sleep(10) - with Then("Replication delay should be zero"): + with Then("Replica should become ready"): + kubectl.wait_field("pod", f"chi-{chi}-{cluster}-0-1-0", + ".metadata.labels.clickhouse\.altinity\.com\/ready", value="yes") + + with And("Replication delay should be zero"): out = clickhouse.query(chi, "select max(absolute_delay) from system.replicas", host=f"chi-{chi}-{cluster}-0-1-0") print(f"max(absolute_delay)={out}") assert out == "0" @@ -5322,8 +5328,7 @@ def test_010058(self): # Can be merged with test_034 potentially with Given("Add rootCA to operator configuration"): util.apply_operator_config("manifests/chopconf/test-058-chopconf.yaml") - out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1] - operator_pod = re.split(r"[\t\r\n\s]+", out)[0] + operator_pod = kubectl.get_operator_pod() with Given("test-058-root-ca secret is installed"): kubectl.apply( From dc3072824e64e13e8002e98d42ce3d1e10da4960 Mon Sep 17 00:00:00 2001 From: alz Date: Wed, 18 Feb 2026 22:41:52 +0300 Subject: [PATCH 219/233] Cleanup tests --- tests/e2e/test_operator.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index a7cc71c10..f769861d0 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -585,7 +585,7 @@ def test_010009_1(self, version_from="0.25.6", version_to=None): @TestScenario @Name("test_010009_2. Test operator upgrade") @Tags("NO_PARALLEL") -def test_010009_2(self, version_from="0.25.2", version_to=None): +def test_010009_2(self, version_from="0.25.6", version_to=None): if version_to is None: version_to = self.context.operator_version @@ -6124,7 +6124,6 @@ def test_020007(self): @TestScenario @Name("test_020008. Test FIPS versions are properly supported by both in CHI and CHK") -@Tags("NO_PARALLEL") def test_020008(self): create_shell_namespace_clickhouse_template() From cbd533065d4a65c4e3a24fc70035a0e1bf460256 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 00:45:13 +0500 Subject: [PATCH 220/233] test: runner cleanup --- tests/e2e/run_tests_local.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh index 58d9f5d4f..a37a24413 100755 --- a/tests/e2e/run_tests_local.sh +++ b/tests/e2e/run_tests_local.sh @@ -67,10 +67,13 @@ case "${WHAT}" in ;; esac -TIMEOUT=30 -echo "Press to start test immediately (if you agree with specified options)" -echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically" -read -t ${TIMEOUT} +# Only wait for confirmation when running interactively (stdin is a terminal) +if [ -t 0 ]; then + TIMEOUT=30 + echo "Press to start test immediately (if you agree with specified options)" + echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically" + read -t ${TIMEOUT} +fi # Dispatch to the dedicated local script, with optional repeat mode case "${REPEAT_UNTIL}" in From f6bec99e661496ff2b7d2b10b1e4d9518c939201 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 01:14:55 +0500 Subject: [PATCH 221/233] test: xfail flaky in CI test --- tests/e2e/test_operator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index f769861d0..418f31aef 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -1724,12 +1724,12 @@ def check_schema_propagation(replicas): "do_not_delete": 1, }, ) - with Then(f"Tables are deleted in {self.context.keeper_type}"): + with Then(f"Tables are deleted in {self.context.keeper_type}", flags=XFAIL): out = clickhouse.query_with_error( chi_name, f"SELECT count() FROM system.zookeeper WHERE path ='/clickhouse/{cluster}/tables/0/default'", ) - print(f"Found {out} replicated tables in {self.context.keeper_type}") + note(f"Found {out} replicated tables in {self.context.keeper_type}") assert "DB::Exception: No node" in out or out == "0" with Finally("I clean up"): From 10dc8df6fba5879e16a03d3dfe3b9803deeb86fc Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 19 Feb 2026 10:20:04 +0300 Subject: [PATCH 222/233] Update stable version to altinity/clickhouse-server:25.8.16.10001.altinitystable --- tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml index f741412a9..84f7fce5c 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: clickhouse-pod - image: altinity/clickhouse-server:25.3.6.10034.altinitystable + image: altinity/clickhouse-server:25.8.16.10001.altinitystable imagePullPolicy: IfNotPresent From 94064f2f37a70c003a064ce71b723b239a3e0604 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 19 Feb 2026 10:48:48 +0300 Subject: [PATCH 223/233] NO_PARALLEL for volume tests --- tests/e2e/test_operator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 418f31aef..0598bf325 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -3737,6 +3737,7 @@ def test_010034(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ReprovisioningVolume("1.0")) @Name("test_010036. Check operator volume re-provisioning") +@Tags("NO_PARALLEL") def test_010036(self): """Check clickhouse operator recreates volumes and schema if volume is broken.""" create_shell_namespace_clickhouse_template() From 2f7b185513eaf62623aa9b8295a1af3af63b23fb Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 14:14:07 +0500 Subject: [PATCH 224/233] dev: cut off excessively long values --- pkg/util/messagediff.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pkg/util/messagediff.go b/pkg/util/messagediff.go index dec3b87d6..beab0a933 100644 --- a/pkg/util/messagediff.go +++ b/pkg/util/messagediff.go @@ -64,14 +64,12 @@ func PrintPath(path *messagediff.Path, defaultPath string) (res string) { } func PrintTrimmedValue(value any) string { - valueFull := fmt.Sprintf("%s", Dump(value)) - ln := len(valueFull) - if (0 < ln) && (ln < 300) { - return valueFull - } else { - valueShort := fmt.Sprintf("%+v", value) - return valueShort + const maxLen = 1024 + str := Dump(value) + if len(str) <= maxLen { + return str } + return str[:maxLen] + "...(value truncated for brevity)" } // MessageDiffItemString stringifies one map[*messagediff.Path]interface{} item From 2c840268b1b27016400b6c5ed660c852e2f29165 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 14:14:31 +0500 Subject: [PATCH 225/233] dev: ignore in dump no-diff-valued fields --- pkg/util/dump.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/util/dump.go b/pkg/util/dump.go index 38de501e7..582e1a9dd 100644 --- a/pkg/util/dump.go +++ b/pkg/util/dump.go @@ -15,6 +15,8 @@ package util import ( + "reflect" + dumper "github.com/sanity-io/litter" ) @@ -35,6 +37,10 @@ func Dump(obj interface{}) (out string) { //FieldExclusions: regexp.MustCompile(`^(XXX_.*)$`), // XXX_ is a prefix of fields generated by protoc-gen-go //HideZeroValues :true, //DisablePointerReplacement : true, + // Skip fields tagged testdiff:"ignore" — consistent with messagediff behavior + FieldFilter: func(f reflect.StructField, _ reflect.Value) bool { + return f.Tag.Get("testdiff") != "ignore" + }, } return d.Sdump(obj) } From c978370a30e0d218d36a97c14ba0ab8304471c8a Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 14:16:42 +0500 Subject: [PATCH 226/233] dev: truncate printable length --- pkg/util/messagediff.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/messagediff.go b/pkg/util/messagediff.go index beab0a933..7c0374349 100644 --- a/pkg/util/messagediff.go +++ b/pkg/util/messagediff.go @@ -64,7 +64,7 @@ func PrintPath(path *messagediff.Path, defaultPath string) (res string) { } func PrintTrimmedValue(value any) string { - const maxLen = 1024 + const maxLen = 256 str := Dump(value) if len(str) <= maxLen { return str From c726ac71ad2957920de00382a3d3fbf655fcce5e Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 14:23:28 +0500 Subject: [PATCH 227/233] dev: turn strict go off --- pkg/util/dump.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/dump.go b/pkg/util/dump.go index 582e1a9dd..fb206e377 100644 --- a/pkg/util/dump.go +++ b/pkg/util/dump.go @@ -30,7 +30,7 @@ func Dump(obj interface{}) (out string) { d := dumper.Options{ Separator: " ", - StrictGo: true, + StrictGo: false, //Compact :true, //StripPackageNames :true, //HidePrivateFields: true, From 1cd2b4f3c93e566785c5ece12bafc5c7e7c2ff5d Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 14:25:43 +0500 Subject: [PATCH 228/233] dev: compactify --- pkg/util/dump.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/util/dump.go b/pkg/util/dump.go index fb206e377..2125e2603 100644 --- a/pkg/util/dump.go +++ b/pkg/util/dump.go @@ -29,10 +29,10 @@ func Dump(obj interface{}) (out string) { }() d := dumper.Options{ - Separator: " ", - StrictGo: false, - //Compact :true, - //StripPackageNames :true, + Separator: " ", + StrictGo: false, + Compact: true, + StripPackageNames: true, //HidePrivateFields: true, //FieldExclusions: regexp.MustCompile(`^(XXX_.*)$`), // XXX_ is a prefix of fields generated by protoc-gen-go //HideZeroValues :true, From 243a0df479f9537315c9db8ef45d68cd857c35f4 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 19 Feb 2026 18:02:36 +0300 Subject: [PATCH 229/233] Check for Aborted status when suspend flag is set in the middle of a reconcile --- tests/e2e/test_operator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 0598bf325..e762b23f3 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5089,8 +5089,8 @@ def test_010054(self): kubectl.launch(cmd) with Then(f"Reconcile should be interrupted and pod image should remain at {new_version}"): - # kubectl.wait_chi_status(chi, "Aborted", retries=5) - time.sleep(60) + kubectl.wait_chi_status(chi, "Aborted", retries=5, throw_error=False) + print(kubectl.get_chi_status(chi)) kubectl.check_pod_image(chi, new_version) From d342fcad47126cc01bcc2fd233ee499e0d892609 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Thu, 19 Feb 2026 21:52:58 +0500 Subject: [PATCH 230/233] test: Aborted --- pkg/controller/chi/worker-reconciler-chi.go | 4 ++-- pkg/controller/chk/worker-reconciler-chk.go | 4 ++-- tests/e2e/test_operator.py | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go index 7093b4a88..22c9bcf8e 100644 --- a/pkg/controller/chi/worker-reconciler-chi.go +++ b/pkg/controller/chi/worker-reconciler-chi.go @@ -67,8 +67,8 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal case new.Spec.Suspend.Value(): // if CR is suspended, should skip reconciliation w.a.M(new).F().Info("Suspended CR") - if new.EnsureStatus().GetStatus() == api.StatusInProgress { - // CR was in the middle of reconcile when suspended — mark as Aborted + if new.EnsureStatus().GetStatus() == api.StatusInProgress || new.EnsureRuntime().ActionPlan.HasActionsToDo() { + // Either was mid-reconcile when suspended, or has pending changes suppressed by suspend — mark as Aborted new.EnsureStatus().ReconcileAbort() _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{ CopyStatusOptions: types.CopyStatusOptions{ diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go index 6b73ef810..304002e80 100644 --- a/pkg/controller/chk/worker-reconciler-chk.go +++ b/pkg/controller/chk/worker-reconciler-chk.go @@ -67,8 +67,8 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee case new.Spec.Suspend.Value(): // if CR is suspended, should skip reconciliation w.a.M(new).F().Info("Suspended CR") - if new.EnsureStatus().GetStatus() == api.StatusInProgress { - // CR was in the middle of reconcile when suspended — mark as Aborted + if new.EnsureStatus().GetStatus() == api.StatusInProgress || new.EnsureRuntime().ActionPlan.HasActionsToDo() { + // Either was mid-reconcile when suspended, or has pending changes suppressed by suspend — mark as Aborted new.EnsureStatus().ReconcileAbort() _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{ CopyStatusOptions: types.CopyStatusOptions{ diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index e762b23f3..7d711142e 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5061,6 +5061,7 @@ def test_010054(self): check={ "pod_count": 1, "pod_image": old_version, + "chi_status": "Aborted", "do_not_delete": 1, }, ) From 51ead7b6cab370b6730d16ba1d0949f34ce3e4a2 Mon Sep 17 00:00:00 2001 From: alz Date: Thu, 19 Feb 2026 20:16:37 +0300 Subject: [PATCH 231/233] Remove debug logging --- tests/e2e/test_operator.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 7d711142e..11f6daab5 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -5090,9 +5090,7 @@ def test_010054(self): kubectl.launch(cmd) with Then(f"Reconcile should be interrupted and pod image should remain at {new_version}"): - kubectl.wait_chi_status(chi, "Aborted", retries=5, throw_error=False) - print(kubectl.get_chi_status(chi)) - + kubectl.wait_chi_status(chi, "Aborted", retries=5) kubectl.check_pod_image(chi, new_version) with When("Remove suspend attribute from CHI"): From 3c6ccf85e7b09486a9aa9627664ceb13a107190f Mon Sep 17 00:00:00 2001 From: Diego Nieto Date: Fri, 20 Feb 2026 13:04:23 +0100 Subject: [PATCH 232/233] helm: add optional registry prefix for operator and metrics images (#1928) Signed-off-by: Diego Nieto --- deploy/helm/clickhouse-operator/README.md | 2 ++ .../templates/generated/Deployment-clickhouse-operator.yaml | 4 ++-- deploy/helm/clickhouse-operator/values.yaml | 4 ++++ dev/generate_helm_chart.sh | 4 ++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md index f20676a1a..b0ac940c1 100644 --- a/deploy/helm/clickhouse-operator/README.md +++ b/deploy/helm/clickhouse-operator/README.md @@ -95,6 +95,7 @@ crdHook: | metrics.enabled | bool | `true` | | | metrics.env | list | `[]` | additional environment variables for the deployment of metrics-exporter containers possible format value `[{"name": "SAMPLE", "value": "text"}]` | | metrics.image.pullPolicy | string | `"IfNotPresent"` | image pull policy | +| metrics.image.registry | string | `""` | optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) | | metrics.image.repository | string | `"altinity/metrics-exporter"` | image repository | | metrics.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) | | metrics.resources | object | `{}` | custom resource configuration | @@ -104,6 +105,7 @@ crdHook: | operator.containerSecurityContext | object | `{}` | | | operator.env | list | `[]` | additional environment variables for the clickhouse-operator container in deployment possible format value `[{"name": "SAMPLE", "value": "text"}]` | | operator.image.pullPolicy | string | `"IfNotPresent"` | image pull policy | +| operator.image.registry | string | `""` | optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) | | operator.image.repository | string | `"altinity/clickhouse-operator"` | image repository | | operator.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) | | operator.priorityClassName | string | "" | priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details | diff --git a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml index 342933cec..6dfc8d0d2 100644 --- a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml +++ b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml @@ -66,7 +66,7 @@ spec: name: {{ include "altinity-clickhouse-operator.fullname" . }}-keeper-usersd-files containers: - name: {{ .Chart.Name }} - image: {{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }} + image: {{ if .Values.operator.image.registry }}{{ .Values.operator.image.registry | trimSuffix "/" }}/{{ end }}{{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }} imagePullPolicy: {{ .Values.operator.image.pullPolicy }} volumeMounts: - name: etc-clickhouse-operator-folder @@ -144,7 +144,7 @@ spec: securityContext: {{ toYaml .Values.operator.containerSecurityContext | nindent 12 }} {{ if .Values.metrics.enabled }} - name: metrics-exporter - image: {{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }} + image: {{ if .Values.metrics.image.registry }}{{ .Values.metrics.image.registry | trimSuffix "/" }}/{{ end }}{{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }} imagePullPolicy: {{ .Values.metrics.image.pullPolicy }} volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 3dcf4444c..32d4c5c27 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -40,6 +40,8 @@ crdHook: annotations: {} operator: image: + # operator.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) + registry: "" # operator.image.repository -- image repository repository: altinity/clickhouse-operator # operator.image.tag -- image tag (chart's appVersion value will be used if not set) @@ -65,6 +67,8 @@ operator: metrics: enabled: true image: + # metrics.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) + registry: "" # metrics.image.repository -- image repository repository: altinity/metrics-exporter # metrics.image.tag -- image tag (chart's appVersion value will be used if not set) diff --git a/dev/generate_helm_chart.sh b/dev/generate_helm_chart.sh index 5a3ce88f9..c4812865d 100755 --- a/dev/generate_helm_chart.sh +++ b/dev/generate_helm_chart.sh @@ -225,14 +225,14 @@ function update_deployment_resource() { done yq e -i '.spec.template.spec.containers[0].name |= "{{ .Chart.Name }}"' "${file}" - yq e -i '.spec.template.spec.containers[0].image |= "{{ .Values.operator.image.repository }}:{{ include \"altinity-clickhouse-operator.operator.tag\" . }}"' "${file}" + yq e -i '.spec.template.spec.containers[0].image |= "{{ if .Values.operator.image.registry }}{{ .Values.operator.image.registry | trimSuffix \"/\" }}/{{ end }}{{ .Values.operator.image.repository }}:{{ include \"altinity-clickhouse-operator.operator.tag\" . }}"' "${file}" yq e -i '.spec.template.spec.containers[0].imagePullPolicy |= "{{ .Values.operator.image.pullPolicy }}"' "${file}" yq e -i '.spec.template.spec.containers[0].resources |= "{{ toYaml .Values.operator.resources | nindent 12 }}"' "${file}" yq e -i '.spec.template.spec.containers[0].securityContext |= "{{ toYaml .Values.operator.containerSecurityContext | nindent 12 }}"' "${file}" yq e -i '(.spec.template.spec.containers[0].env[] | select(.valueFrom.resourceFieldRef.containerName == "clickhouse-operator") | .valueFrom.resourceFieldRef.containerName) = "{{ .Chart.Name }}"' "${file}" yq e -i '.spec.template.spec.containers[0].env += ["{{ with .Values.operator.env }}{{ toYaml . | nindent 12 }}{{ end }}"]' "${file}" - yq e -i '.spec.template.spec.containers[1].image |= "{{ .Values.metrics.image.repository }}:{{ include \"altinity-clickhouse-operator.metrics.tag\" . }}"' "${file}" + yq e -i '.spec.template.spec.containers[1].image |= "{{ if .Values.metrics.image.registry }}{{ .Values.metrics.image.registry | trimSuffix \"/\" }}/{{ end }}{{ .Values.metrics.image.repository }}:{{ include \"altinity-clickhouse-operator.metrics.tag\" . }}"' "${file}" yq e -i '.spec.template.spec.containers[1].imagePullPolicy |= "{{ .Values.metrics.image.pullPolicy }}"' "${file}" yq e -i '.spec.template.spec.containers[1].resources |= "{{ toYaml .Values.metrics.resources | nindent 12 }}"' "${file}" yq e -i '.spec.template.spec.containers[1].securityContext |= "{{ toYaml .Values.metrics.containerSecurityContext | nindent 12 }}"' "${file}" From 6398f7ff7f6e2560e09cc5de07f9eeaf32a7adf0 Mon Sep 17 00:00:00 2001 From: Vladislav Klimenko Date: Fri, 20 Feb 2026 18:16:09 +0500 Subject: [PATCH 233/233] env: operatohub manifests --- ...perator.v0.26.0.clusterserviceversion.yaml | 1666 +++++++++++++++++ ...allations.clickhouse.altinity.com.crd.yaml | 1527 +++++++++++++++ ...templates.clickhouse.altinity.com.crd.yaml | 1527 +++++++++++++++ ...ns.clickhouse-keeper.altinity.com.crd.yaml | 875 +++++++++ ...gurations.clickhouse.altinity.com.crd.yaml | 563 ++++++ 5 files changed, 6158 insertions(+) create mode 100644 deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml create mode 100644 deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml create mode 100644 deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml diff --git a/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml b/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml new file mode 100644 index 000000000..0de0cd65f --- /dev/null +++ b/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml @@ -0,0 +1,1666 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + name: clickhouse-operator.v0.26.0 + namespace: placeholder + annotations: + capabilities: Full Lifecycle + categories: Database + containerImage: docker.io/altinity/clickhouse-operator:0.26.0 + createdAt: '2026-02-20T18:07:52Z' + support: Altinity Ltd. https://altinity.com + description: The Altinity® Kubernetes Operator for ClickHouse® manages the full lifecycle of ClickHouse clusters. + repository: https://github.com/altinity/clickhouse-operator + certified: 'false' + alm-examples: | + [ + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseInstallation", + "metadata": { + "name": "simple-01" + }, + "spec": { + "configuration": { + "users": { + "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01", + "test_user/password": "test_password", + "test_user/networks/ip": [ + "0.0.0.0/0" + ] + }, + "clusters": [ + { + "name": "simple" + } + ] + } + } + }, + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseInstallation", + "metadata": { + "name": "use-templates-all", + "labels": { + "target-chi-label-manual": "target-chi-label-manual-value", + "target-chi-label-auto": "target-chi-label-auto-value" + } + }, + "spec": { + "useTemplates": [ + { + "name": "chit-01" + }, + { + "name": "chit-02" + } + ], + "configuration": { + "clusters": [ + { + "name": "c1" + } + ] + } + } + }, + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseOperatorConfiguration", + "metadata": { + "name": "chop-config-01" + }, + "spec": { + "watch": { + "namespaces": { + "include": [], + "exclude": [] + } + }, + "clickhouse": { + "configuration": { + "file": { + "path": { + "common": "config.d", + "host": "conf.d", + "user": "users.d" + } + }, + "user": { + "default": { + "profile": "default", + "quota": "default", + "networksIP": [ + "::1", + "127.0.0.1" + ], + "password": "default" + } + }, + "network": { + "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$" + } + }, + "access": { + "username": "clickhouse_operator", + "password": "clickhouse_operator_password", + "secret": { + "namespace": "", + "name": "" + }, + "port": 8123 + }, + "metrics": { + "timeouts": { + "collect": 9 + }, + "tablesRegexp": "^(metrics|custom_metrics)$" + } + }, + "template": { + "chi": { + "path": "templates.d" + } + }, + "reconcile": { + "runtime": { + "reconcileCHIsThreadsNumber": 10, + "reconcileShardsThreadsNumber": 5, + "reconcileShardsMaxConcurrencyPercent": 50 + }, + "statefulSet": { + "create": { + "onFailure": "ignore" + }, + "update": { + "timeout": 300, + "pollInterval": 5, + "onFailure": "abort" + } + }, + "host": { + "wait": { + "exclude": "true", + "queries": "true", + "include": "false", + "replicas": { + "all": "no", + "new": "yes", + "delay": 10 + }, + "probes": { + "startup": "no", + "readiness": "yes" + } + } + } + }, + "annotation": { + "include": [], + "exclude": [] + }, + "label": { + "include": [], + "exclude": [], + "appendScope": "no" + }, + "statefulSet": { + "revisionHistoryLimit": 0 + }, + "pod": { + "terminationGracePeriod": 30 + }, + "logger": { + "logtostderr": "true", + "alsologtostderr": "false", + "v": "1", + "stderrthreshold": "", + "vmodule": "", + "log_backtrace_at": "" + } + } + } + ] +spec: + version: 0.26.0 + minKubeVersion: 1.12.6 + maturity: alpha + replaces: clickhouse-operator.v0.25.6 + maintainers: + - email: support@altinity.com + name: Altinity + provider: + name: Altinity + displayName: Altinity® Kubernetes Operator for ClickHouse® + keywords: + - "clickhouse" + - "database" + - "oltp" + - "timeseries" + - "time series" + - "altinity" + customresourcedefinitions: + owned: + - description: ClickHouse Installation - set of ClickHouse Clusters + displayName: ClickHouseInstallation + group: clickhouse.altinity.com + kind: ClickHouseInstallation + name: clickhouseinstallations.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Installation Template - template for ClickHouse Installation + displayName: ClickHouseInstallationTemplate + group: clickhouse.altinity.com + kind: ClickHouseInstallationTemplate + name: clickhouseinstallationtemplates.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Operator Configuration - configuration of ClickHouse operator + displayName: ClickHouseOperatorConfiguration + group: clickhouse.altinity.com + kind: ClickHouseOperatorConfiguration + name: clickhouseoperatorconfigurations.clickhouse.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance + displayName: ClickHouseKeeperInstallation + group: clickhouse-keeper.altinity.com + kind: ClickHouseKeeperInstallation + name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com + version: v1 + resources: + - kind: Service + name: '' + version: v1 + - kind: Endpoint + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: StatefulSet + name: '' + version: v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Event + name: '' + version: v1 + - kind: PersistentVolumeClaim + name: '' + version: v1 + description: |- + ## ClickHouse + [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports. + Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details. + ## The Altinity Operator for ClickHouse + The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment. + Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples. + links: + - name: Altinity + url: https://altinity.com/ + - name: Operator homepage + url: https://www.altinity.com/kubernetes-operator + - name: Github + url: https://github.com/altinity/clickhouse-operator + - name: Documentation + url: https://github.com/Altinity/clickhouse-operator/tree/master/docs + icon: + - mediatype: image/png + base64data: |- + iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs + vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ + BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf + 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW + 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh + jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye + x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m + zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2 + cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB + fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f + sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72 + 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K + 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw + mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt + fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO + HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O + N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM + vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr + D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7 + 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w + ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m + 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI + zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7 + b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs + 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6 + dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj + 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM + jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72 + ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv + 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY + nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7 + b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH + IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq + z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8 + bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0 + wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8 + FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y + vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0 + HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x + wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj + Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F + t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6 + e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD + Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H + fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk + j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw + hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ + cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z + +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+ + TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8 + b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab + Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y + ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m + sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+ + VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw + vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex + 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox + zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+ + XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0 + c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP + WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955 + wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72 + zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD + XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax + WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r + brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx + dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V + bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi + /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq + mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe + 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL + e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT + H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+ + pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89 + 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H + Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf + k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c + jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/ + 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK + bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP + VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y + UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT + ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM + 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B + u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+ + XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A + lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d + P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW + ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx + uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT + b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE + HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj + ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8 + FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K + l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8 + M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT + 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh + xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe + +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr + vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr + 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx + AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw + 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte + fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y + UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N + b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE + thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2 + SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri + XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST + HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m + nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4 + E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc + yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN + hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2 + EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ + ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35 + W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ + D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL + 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5 + VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4 + oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy + 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k + Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX + /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln + U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A + KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP + fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh + r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH + hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U + /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+ + Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD + yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/ + tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo + n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K + dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5 + lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee + IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n + Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS + X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe + qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz + MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u + CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP + sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr + ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI + feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8 + WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv + ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz + flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo + zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0 + UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj + rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ + eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ + EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz + mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+ + Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t + ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u + fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL + TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn + YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn + K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh + olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M + JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z + yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf + mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m + Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n + Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ + z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP + vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8 + YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD + 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z + l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl + UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf + +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii + 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO + xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+ + ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0 + Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L + OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926 + wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE + qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/ + r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI + 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+ + nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx + LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU + Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe + q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR + eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W + NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw + YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc + 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx + yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l + vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I + uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP + 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF + RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG + 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo + 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ + OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+ + r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch + brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553 + aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG + R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq + FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq + yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+ + RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV + xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0 + w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX + v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw + mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7 + oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K + j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB + rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM + N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT + I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP + /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+ + R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81 + r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf + WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b + /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1 + F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm + +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T + pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf + N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb + PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1 + LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8 + 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX + TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U + YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE + ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2 + eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo + eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK + Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t + VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf + 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv + ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc + PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z + hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV + PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE + kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc + Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf + bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN + E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My + 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm + XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM + 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD + pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8 + 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ + nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4 + epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj + GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA + Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H + llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5 + eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz + YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg + Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X + M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16 + GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198 + uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8 + 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV + aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg + Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN + ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W + 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P + Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx + LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C + VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99 + 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk + bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt + NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h + GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8 + soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA + kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc + Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s + yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7 + uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u + 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh + lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp + 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34 + R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q + 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g + B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs + Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/ + qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+ + LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31 + tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO + wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi + 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5 + 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ + 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl + bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c + Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb + 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e + qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx + WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2 + 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC + xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i + 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM + cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7 + 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m + c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef + mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy + 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc + 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39 + W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r + NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv + rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD + KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN + Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx + 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG + 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT + 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9 + m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+ + inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB + 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c + r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf + g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG + HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t + LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq + mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX + b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO + P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48 + xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde + dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx + U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+ + 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ + Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR + IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6 + On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ + ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP + 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El + fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk + FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55 + bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z + rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW + 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq + CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv + 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw + cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL + T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R + b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer + e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+ + k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu + CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT + DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv + /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX + n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF + /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011 + F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX + fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8 + gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD + TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0 + 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+ + 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH + /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn + 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt + PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6 + EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k + z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9 + UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg + 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL + fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny + 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo + XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J + /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm + 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM + dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw + HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW + tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk + EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai + c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3 + 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi + kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx + xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc + gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U + eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc + Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht + 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b + tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx + 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x + tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/ + 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH + Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2 + Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8 + DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9 + PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv + Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0 + 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i + eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU + PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS + HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q + ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik + l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4 + HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd + E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z + LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY + KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha + muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6 + od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi + QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4 + xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3 + zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8 + J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN + DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W + PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK + po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO + oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O + CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2 + jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw + HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t + v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1 + AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY + jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE + iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3 + 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355 + w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI + fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI + b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV + bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY + Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy + yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ + bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf + 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok + wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk + R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej + Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n + +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8 + F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k + 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/ + Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl + n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J + n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT + E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1 + jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ + /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP + jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5 + t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y + 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R + uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu + WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l + n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151 + F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW + m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw + aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on + /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E + m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO + TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm + l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL + P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/ + v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3 + GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh + MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr + sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr + fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ + 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O + FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf + 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh + 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1 + bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ + bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G + ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse + D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV + Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX + dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6 + GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y + wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm + RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs + /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5 + ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC + EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup + 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44 + /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf + yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z + c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX + eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q + iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m + fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/ + 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP + sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd + iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk + PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS + Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH + 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5 + sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi + h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC + 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x + FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK + pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf + 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6 + i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu + v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/ + 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5 + iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao + 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96 + EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht + I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn + JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub + jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn + Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena + NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r + 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd + QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4 + cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv + 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2 + P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF + lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2 + mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba + D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG + Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL + qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8 + MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH + FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51 + q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K + 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj + 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi + 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF + 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF + Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x + SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc + pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO + OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL + 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW + 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe + tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb + +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM + NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv + f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od + yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz + 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam + PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC + FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW + 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88 + LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue + cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd + BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9 + eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J + z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH + CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/ + IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p + n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs + X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+ + yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT + Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN + hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr + RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k + 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s + VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu + nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4 + nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv + Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu + 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y + d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH + Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3 + /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n + E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf + 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m + 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim + 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue + tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc + jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m + nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h + Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N + v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf + hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe + PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn + T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw + O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v + Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk + AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g + 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7 + Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq + JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9 + V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+ + meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9 + TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S + hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL + 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S + W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7 + 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv + P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G + 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY + P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW + Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt + q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j + 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh + 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT + B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS + l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt + VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH + 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR + qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5 + Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy + 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p + rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q + jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n + /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo + 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa + LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb + F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16 + WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+ + 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F + RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe + +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W + qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55 + hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu + I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha + xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL + fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK + zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4 + 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ + pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc + 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+ + znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of + u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf + QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9 + a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs + fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn + hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn + 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo + liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7 + sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV + 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3 + A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu + +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt + jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk + arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT + k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf + /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+ + VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4 + 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY + Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv + v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl + 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA + 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el + sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq + FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa + Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E + VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w + /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2 + 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr + 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW + feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd + 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe + M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj + SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8 + Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y + n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf + ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An + 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If + /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5 + mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj + A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W + wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5 + z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo + 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj + t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP + bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S + bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT + RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F + X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6 + sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl + Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj + D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn + nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36 + WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15 + 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf + ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4 + 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1 + I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb + IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2 + G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U + UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP + n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv + 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve + iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M + lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U + M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf + RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6 + rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe + la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu + dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ + d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v + Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt + nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx + 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv + +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2 + 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U + vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X + fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n + 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG + PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga + STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7 + 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG + f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj + 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ + LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u + 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0 + /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334 + 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4 + fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2 + L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl + LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4 + yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq + 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA + n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7 + 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd + 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2 + Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK + FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4 + avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D + /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt + TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn + vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6 + 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR + kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd + 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3 + IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt + zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84 + g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1 + jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r + lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW + HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB + HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG + HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/ + EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k + SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj + SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37 + H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0 + fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2 + Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1 + 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh + vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO + iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5 + 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl + rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3 + 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x + savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV + HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s + u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ + N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi + Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/ + ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/ + 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5 + j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s + rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan + H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4 + Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd + np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ + +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+ + bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6 + xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc + QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT + P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR + BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU + sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9 + 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++ + 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7 + /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh + fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz + 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf + ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58 + zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ + C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i + Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU + i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1 + xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR + l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8 + Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+ + A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj + 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab + dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x + 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN + WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59 + vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2 + ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf + OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94 + 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg + Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi + sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw + mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+ + wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+ + qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX + OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI + lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH + +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai + jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L + jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i + 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1 + T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n + lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c + 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB + +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/ + /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0 + w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr + oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT + F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq + Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR + qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el + DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm + /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE + a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/ + 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7 + 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg + h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar + /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33 + k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx + 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3 + SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y + m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP + songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm + 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8 + ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm + 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg + 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6 + fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ + /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe + icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV + jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe + hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8 + SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe + A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16 + WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN + up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg + ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP + cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze + pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/ + HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3 + 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78 + aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd + AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy + 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M + eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe + WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF + o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7 + XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M + O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m + I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO + +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X + jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M + paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A + 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv + 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z + +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q + mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN + EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF + FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1 + Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f + Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT + nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s + eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex + rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS + 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU + l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj + LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa + ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/ + 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7 + 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf + +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU + gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w + phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ + fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8 + qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv + polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY + MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231 + o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2 + TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x + DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/ + v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7 + vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l + Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM + VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn + 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs + vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5 + rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ + 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P + nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8 + HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD + /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3 + a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE + wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf + eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU + t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw + pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj + tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h + PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95 + GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R + 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0 + JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s + 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA + Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T + VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE + +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy + dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P + WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt + I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv + X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F + W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv + z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN + 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz + vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4 + BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs + 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+ + IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc + ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3 + ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON + vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x + cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r + WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v + xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/ + 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+ + opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H + 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH + ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK + F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6 + flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj + gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6 + dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9 + CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO + oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U + U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72 + RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF + O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc + 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX + v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL + CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc + t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA + gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL + wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX + 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz + 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7 + jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75 + lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf + m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx + tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59 + X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL + 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk + lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ + +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q + ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3 + 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb + mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU + jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH + 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm + wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx + DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh + xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx + 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh + vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw + uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+ + dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM + 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5 + JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0 + bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu + HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW + +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G + uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi + ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk + koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6 + Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+ + ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt + Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6 + xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4 + HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt + Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj + Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7 + zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4 + aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1 + AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X + v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha + N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy + n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn + DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct + dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr + Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco + GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG + qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH + wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS + VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP + Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq + DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh + 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd + 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b + j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j + i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X + x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7 + C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw + v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je + wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q + BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628 + 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe + Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b + FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv + l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv + cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh + edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j + VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O + /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql + CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d + tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk + SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD + k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2 + ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3 + UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql + 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N + 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E + d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC + q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+ + E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec + eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff + Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr + l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP + jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4 + nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v + E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu + V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l + f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC + 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z + 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T + 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/ + 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs + T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg + LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36 + NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK + zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl + tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg + 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI + wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47 + Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv + t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W + PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu + Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj + r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL + k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx + XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv + xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB + HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P + Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2 + mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m + kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc + WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue + CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn + 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh + 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/ + CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1 + MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd + 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1 + ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg== + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: true + type: MultiNamespace + - supported: true + type: AllNamespaces + install: + strategy: deployment + spec: + deployments: + - name: clickhouse-operator + spec: + replicas: 1 + selector: + matchLabels: + app: clickhouse-operator + template: + metadata: + labels: + app: clickhouse-operator + spec: + containers: + - env: + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: docker.io/altinity/clickhouse-operator:0.26.0 + imagePullPolicy: Always + name: clickhouse-operator + - image: docker.io/altinity/metrics-exporter:0.26.0 + imagePullPolicy: Always + name: metrics-exporter + serviceAccountName: clickhouse-operator + permissions: + - serviceAccountName: clickhouse-operator + rules: + # + # Core API group + # + - apiGroups: + - "" + resources: + - configmaps + - services + - persistentvolumeclaims + - secrets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch + - delete + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + # + # apps.* resources + # + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + - apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - patch + - update + - delete + # The operator deployment personally, identified by name + - apiGroups: + - apps + resources: + - deployments + resourceNames: + - clickhouse-operator + verbs: + - get + - patch + - update + - delete + # + # policy.* resources + # + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - patch + - update + - watch + - create + - delete + # + # discovery.* resources + # + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + # + # apiextensions + # + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + # clickhouse - related resources + - apiGroups: + - clickhouse.altinity.com + # + # The operator's specific Custom Resources + # + + resources: + - clickhouseinstallations + verbs: + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallationtemplates + - clickhouseoperatorconfigurations + verbs: + - get + - list + - watch + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/finalizers + - clickhouseinstallationtemplates/finalizers + - clickhouseoperatorconfigurations/finalizers + verbs: + - update + - apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/status + - clickhouseinstallationtemplates/status + - clickhouseoperatorconfigurations/status + verbs: + - get + - update + - patch + - create + - delete + # clickhouse-keeper - related resources + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations + verbs: + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations/finalizers + verbs: + - update + - apiGroups: + - clickhouse-keeper.altinity.com + resources: + - clickhousekeeperinstallations/status + verbs: + - get + - update + - patch + - create + - delete diff --git a/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..03bb8e057 --- /dev/null +++ b/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,1527 @@ +# Template Parameters: +# +# KIND=ClickHouseInstallation +# SINGULAR=clickhouseinstallation +# PLURAL=clickhouseinstallations +# SHORT=chi +# OPERATOR_VERSION=0.26.0 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseinstallations.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.26.0 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseInstallation + singular: clickhouseinstallation + plural: clickhouseinstallations + shortNames: + - chi + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + nullable: true + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + nullable: true + x-kubernetes-preserve-unknown-fields: true + actionPlan: + type: object + description: "Action Plan" + nullable: true + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse clusters defined in a CHI. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + restart: + type: string + description: | + In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile. + This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts. + enum: + - "" + - "RollingUpdate" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Installation. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + troubleshoot: + !!merge <<: *TypeStringBool + description: | + Allows to troubleshoot Pods during CrashLoopBack state. + This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start. + Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts + and give time to troubleshoot via CLI. + Liveness and Readiness probes are disabled as well. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + templating: + type: object + # nullable: true + description: | + Optional, applicable inside ClickHouseInstallationTemplate only. + Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)." + properties: + policy: + type: string + description: | + When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate + will be auto-added into ClickHouseInstallation, selectable by `chiSelector`. + Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly. + enum: + - "" + - "auto" + - "manual" + chiSelector: + type: object + description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + reconciling: &TypeReconcile + type: object + description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + macros: + type: object + description: "macros parameters" + properties: + sections: + type: object + description: "sections behaviour for macros" + properties: + users: + type: object + description: "sections behaviour for macros on users" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + profiles: + type: object + description: "sections behaviour for macros on profiles" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + quotas: + type: object + description: "sections behaviour for macros on quotas" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + settings: + type: object + description: "sections behaviour for macros on settings" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + files: + type: object + description: "sections behaviour for macros on files" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + runtime: &TypeReconcileRuntime + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + host: &TypeReconcileHost + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: + !!merge <<: *TypeStringBool + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for ready probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + reconcile: + !!merge <<: *TypeReconcile + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + zookeeper: &TypeZookeeperConfig + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ + currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` + More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + secure: + !!merge <<: *TypeStringBool + description: "if a secure connection to Zookeeper is required" + availabilityZone: + type: string + description: "availability zone for Zookeeper node" + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + use_compression: + !!merge <<: *TypeStringBool + description: "Enables compression in Keeper protocol if set to true" + users: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure password hashed, authorization restrictions, database level security row filters etc. + More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write directly into XML tag during render *-usersd ConfigMap + + any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write into environment variable and write to XML tag via from_env=XXX + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of settings profile + More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles + # nullable: true + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of resource quotas + More details: https://clickhouse.tech/docs/en/operations/quotas/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas + # nullable: true + x-kubernetes-preserve-unknown-fields: true + settings: &TypeSettings + type: object + description: | + allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + every key in this object is the file name + every value in this object is the file content + you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html + each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored + More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml + + any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets + secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/ + and will automatically update when update secret + it useful for pass SSL certificates from cert-manager or similar tool + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level, shard-level and replica-level + every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` + all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` + Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zookeeper: + !!merge <<: *TypeZookeeperConfig + description: | + optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.zookeeper` settings + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + schemaPolicy: + type: object + description: | + describes how schema is propagated within replicas and shards + properties: + replica: + type: string + description: "how schema is propagated within a replica" + enum: + # List SchemaPolicyReplicaXXX constants from model + - "" + - "None" + - "All" + shard: + type: string + description: "how schema is propagated between shards" + enum: + # List SchemaPolicyShardXXX constants from model + - "" + - "None" + - "All" + - "DistributedTablesOnly" + insecure: + !!merge <<: *TypeStringBool + description: optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: optional, open secure ports for cluster + secret: + type: object + description: "optional, shared secret value to secure cluster communications" + properties: + auto: + !!merge <<: *TypeStringBool + description: "Auto-generate shared secret value to secure cluster communications" + value: + description: "Cluster shared secret value in plain text" + type: string + valueFrom: + description: "Cluster shared secret source" + type: object + properties: + secretKeyRef: + description: | + Selects a key of a secret in the clickhouse installation namespace. + Should not be used if value is not empty. + type: object + properties: + name: + description: | + Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - name + - key + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + !!merge <<: *TypeReconcileRuntime + host: + !!merge <<: *TypeReconcileHost + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + shardsCount: + type: integer + description: | + how much shards for current ClickHouse cluster will run in Kubernetes, + each shard contains shared-nothing part of data and contains set of replicas, + cluster contains 1 shard by default" + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + shards: + type: array + description: | + optional, allows override top-level `chi.spec.configuration`, cluster-level + `chi.spec.configuration.clusters` settings for each shard separately, + use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + definitionType: + type: string + description: "DEPRECATED - to be removed soon" + weight: + type: integer + description: | + optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + internalReplication: + !!merge <<: *TypeStringBool + description: | + optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise + allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard + override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` + replicasCount: + type: integer + description: | + optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + shard contains 1 replica by default + override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` + minimum: 1 + replicas: + type: array + description: | + optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` + More info: https://clickhouse.tech/docs/en/interfaces/tcp/ + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` + More info: https://clickhouse.tech/docs/en/interfaces/http/ + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + useTemplates: + type: array + description: | + list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI` + manifest during render Kubernetes resources to create related ClickHouse clusters" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "name of `ClickHouseInstallationTemplate` (chit) resource" + namespace: + type: string + description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`" + useType: + type: string + description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" + enum: + # List useTypeXXX constants from model + - "" + - "merge" diff --git a/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..4350b913e --- /dev/null +++ b/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,1527 @@ +# Template Parameters: +# +# KIND=ClickHouseInstallationTemplate +# SINGULAR=clickhouseinstallationtemplate +# PLURAL=clickhouseinstallationtemplates +# SHORT=chit +# OPERATOR_VERSION=0.26.0 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseinstallationtemplates.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.26.0 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseInstallationTemplate + singular: clickhouseinstallationtemplate + plural: clickhouseinstallationtemplates + shortNames: + - chit + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + nullable: true + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + nullable: true + x-kubernetes-preserve-unknown-fields: true + actionPlan: + type: object + description: "Action Plan" + nullable: true + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse clusters defined in a CHI. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + restart: + type: string + description: | + In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile. + This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts. + enum: + - "" + - "RollingUpdate" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Installation. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + troubleshoot: + !!merge <<: *TypeStringBool + description: | + Allows to troubleshoot Pods during CrashLoopBack state. + This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start. + Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts + and give time to troubleshoot via CLI. + Liveness and Readiness probes are disabled as well. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + templating: + type: object + # nullable: true + description: | + Optional, applicable inside ClickHouseInstallationTemplate only. + Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)." + properties: + policy: + type: string + description: | + When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate + will be auto-added into ClickHouseInstallation, selectable by `chiSelector`. + Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly. + enum: + - "" + - "auto" + - "manual" + chiSelector: + type: object + description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + reconciling: &TypeReconcile + type: object + description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + macros: + type: object + description: "macros parameters" + properties: + sections: + type: object + description: "sections behaviour for macros" + properties: + users: + type: object + description: "sections behaviour for macros on users" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + profiles: + type: object + description: "sections behaviour for macros on profiles" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + quotas: + type: object + description: "sections behaviour for macros on quotas" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + settings: + type: object + description: "sections behaviour for macros on settings" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + files: + type: object + description: "sections behaviour for macros on files" + properties: + enabled: + !!merge <<: *TypeStringBool + description: "enabled or not" + runtime: &TypeReconcileRuntime + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: &TypeReconcileStatefulSet + type: object + description: "Optional, StatefulSet reconcile behavior tuning" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "delete" + - "ignore" + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for StatefulSet to be 'Ready' during update" + minimum: 0 + maximum: 3600 + pollInterval: + type: integer + description: "How many seconds to wait between checks for StatefulSet status during update" + minimum: 1 + maximum: 600 + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is. + 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards. + 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. + enum: + - "" + - "abort" + - "rollback" + - "ignore" + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate - proceed and recreate StatefulSet. + enum: + - "" + - "abort" + - "recreate" + host: &TypeReconcileHost + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: + !!merge <<: *TypeStringBool + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for ready probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + reconcile: + !!merge <<: *TypeReconcile + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + zookeeper: &TypeZookeeperConfig + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ + currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` + More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + secure: + !!merge <<: *TypeStringBool + description: "if a secure connection to Zookeeper is required" + availabilityZone: + type: string + description: "availability zone for Zookeeper node" + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + use_compression: + !!merge <<: *TypeStringBool + description: "Enables compression in Keeper protocol if set to true" + users: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure password hashed, authorization restrictions, database level security row filters etc. + More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write directly into XML tag during render *-usersd ConfigMap + + any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key + in this case value from secret will write into environment variable and write to XML tag via from_env=XXX + + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of settings profile + More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles + # nullable: true + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of resource quotas + More details: https://clickhouse.tech/docs/en/operations/quotas/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas + # nullable: true + x-kubernetes-preserve-unknown-fields: true + settings: &TypeSettings + type: object + description: | + allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings + + any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + + secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml + it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + every key in this object is the file name + every value in this object is the file content + you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html + each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored + More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml + + any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets + secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/ + and will automatically update when update secret + it useful for pass SSL certificates from cert-manager or similar tool + look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level, shard-level and replica-level + every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` + all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` + Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zookeeper: + !!merge <<: *TypeZookeeperConfig + description: | + optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.zookeeper` settings + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + schemaPolicy: + type: object + description: | + describes how schema is propagated within replicas and shards + properties: + replica: + type: string + description: "how schema is propagated within a replica" + enum: + # List SchemaPolicyReplicaXXX constants from model + - "" + - "None" + - "All" + shard: + type: string + description: "how schema is propagated between shards" + enum: + # List SchemaPolicyShardXXX constants from model + - "" + - "None" + - "All" + - "DistributedTablesOnly" + insecure: + !!merge <<: *TypeStringBool + description: optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: optional, open secure ports for cluster + secret: + type: object + description: "optional, shared secret value to secure cluster communications" + properties: + auto: + !!merge <<: *TypeStringBool + description: "Auto-generate shared secret value to secure cluster communications" + value: + description: "Cluster shared secret value in plain text" + type: string + valueFrom: + description: "Cluster shared secret source" + type: object + properties: + secretKeyRef: + description: | + Selects a key of a secret in the clickhouse installation namespace. + Should not be used if value is not empty. + type: object + properties: + name: + description: | + Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - name + - key + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + !!merge <<: *TypeReconcileRuntime + host: + !!merge <<: *TypeReconcileHost + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + shardsCount: + type: integer + description: | + how much shards for current ClickHouse cluster will run in Kubernetes, + each shard contains shared-nothing part of data and contains set of replicas, + cluster contains 1 shard by default" + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + shards: + type: array + description: | + optional, allows override top-level `chi.spec.configuration`, cluster-level + `chi.spec.configuration.clusters` settings for each shard separately, + use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + definitionType: + type: string + description: "DEPRECATED - to be removed soon" + weight: + type: integer + description: | + optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + internalReplication: + !!merge <<: *TypeStringBool + description: | + optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise + allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard + override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` + replicasCount: + type: integer + description: | + optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + shard contains 1 replica by default + override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` + minimum: 1 + replicas: + type: array + description: | + optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + insecure: + !!merge <<: *TypeStringBool + description: | + optional, open insecure ports for cluster, defaults to "yes" + secure: + !!merge <<: *TypeStringBool + description: | + optional, open secure ports + tcpPort: + type: integer + description: | + optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` + More info: https://clickhouse.tech/docs/en/interfaces/tcp/ + minimum: 1 + maximum: 65535 + tlsPort: + type: integer + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` + More info: https://clickhouse.tech/docs/en/interfaces/http/ + minimum: 1 + maximum: 65535 + httpsPort: + type: integer + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + useTemplates: + type: array + description: | + list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI` + manifest during render Kubernetes resources to create related ClickHouse clusters" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "name of `ClickHouseInstallationTemplate` (chit) resource" + namespace: + type: string + description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`" + useType: + type: string + description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" + enum: + # List useTypeXXX constants from model + - "" + - "merge" diff --git a/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml new file mode 100644 index 000000000..405dac0a1 --- /dev/null +++ b/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml @@ -0,0 +1,875 @@ +# Template Parameters: +# +# OPERATOR_VERSION=0.26.0 +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com + labels: + clickhouse-keeper.altinity.com/chop: 0.26.0 +spec: + group: clickhouse-keeper.altinity.com + scope: Namespaced + names: + kind: ClickHouseKeeperInstallation + singular: clickhousekeeperinstallation + plural: clickhousekeeperinstallations + shortNames: + - chk + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: status + type: string + description: Resource status + jsonPath: .status.status + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.chop-version + - name: clusters + type: integer + description: Clusters count + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: hosts-completed + type: integer + description: Completed hosts count + jsonPath: .status.hostsCompleted + - name: hosts-updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsUpdated + - name: hosts-added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.hostsAdded + - name: hosts-deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.hostsDeleted + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + - name: suspend + type: string + description: Suspend reconciliation + # Displayed in all priorities + jsonPath: .spec.suspend + subresources: + status: {} + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters" + type: object + required: + - spec + properties: + apiVersion: + description: | + APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: | + Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + type: object + description: | + Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other + properties: + chop-version: + type: string + description: "Operator version" + chop-commit: + type: string + description: "Operator git commit SHA" + chop-date: + type: string + description: "Operator build date" + chop-ip: + type: string + description: "IP address of the operator's pod which managed this resource" + clusters: + type: integer + minimum: 0 + description: "Clusters count" + shards: + type: integer + minimum: 0 + description: "Shards count" + replicas: + type: integer + minimum: 0 + description: "Replicas count" + hosts: + type: integer + minimum: 0 + description: "Hosts count" + status: + type: string + description: "Status" + taskID: + type: string + description: "Current task id" + taskIDsStarted: + type: array + description: "Started task ids" + nullable: true + items: + type: string + taskIDsCompleted: + type: array + description: "Completed task ids" + nullable: true + items: + type: string + action: + type: string + description: "Action" + actions: + type: array + description: "Actions" + nullable: true + items: + type: string + error: + type: string + description: "Last error" + errors: + type: array + description: "Errors" + nullable: true + items: + type: string + hostsUnchanged: + type: integer + minimum: 0 + description: "Unchanged Hosts count" + hostsUpdated: + type: integer + minimum: 0 + description: "Updated Hosts count" + hostsAdded: + type: integer + minimum: 0 + description: "Added Hosts count" + hostsCompleted: + type: integer + minimum: 0 + description: "Completed Hosts count" + hostsDeleted: + type: integer + minimum: 0 + description: "Deleted Hosts count" + hostsDelete: + type: integer + minimum: 0 + description: "About to delete Hosts count" + pods: + type: array + description: "Pods" + nullable: true + items: + type: string + pod-ips: + type: array + description: "Pod IPs" + nullable: true + items: + type: string + fqdns: + type: array + description: "Pods FQDNs" + nullable: true + items: + type: string + endpoint: + type: string + description: "Endpoint" + endpoints: + type: array + description: "All endpoints" + nullable: true + items: + type: string + generation: + type: integer + minimum: 0 + description: "Generation" + normalized: + type: object + description: "Normalized resource requested" + nullable: true + x-kubernetes-preserve-unknown-fields: true + normalizedCompleted: + type: object + description: "Normalized resource completed" + nullable: true + x-kubernetes-preserve-unknown-fields: true + hostsWithTablesCreated: + type: array + description: "List of hosts with tables created by the operator" + nullable: true + items: + type: string + hostsWithReplicaCaughtUp: + type: array + description: "List of hosts with replica caught up" + nullable: true + items: + type: string + usedTemplates: + type: array + description: "List of templates used to build this CHI" + nullable: true + x-kubernetes-preserve-unknown-fields: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md + properties: + taskID: + type: string + description: | + Allows to define custom taskID for CHI update and watch status of this update execution. + Displayed in all .status.taskID* fields. + By default (if not filled) every update of CHI manifest will generate random taskID + stop: &TypeStringBool + type: string + description: | + Allows to stop all ClickHouse Keeper clusters defined in a CHK. + Works as the following: + - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact. + - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s. + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + suspend: + !!merge <<: *TypeStringBool + description: | + Suspend reconciliation of resources managed by a ClickHouse Keeper. + Works as the following: + - When `suspend` is `true` operator stops reconciling all resources. + - When `suspend` is `false` or not set, operator reconciles all resources. + namespaceDomainPattern: + type: string + description: | + Custom domain pattern which will be used for DNS names of `Service` or `Pod`. + Typical use scenario - custom cluster domain in Kubernetes cluster + Example: %s.svc.my.test + reconciling: + type: object + description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + description: | + DISCUSSED TO BE DEPRECATED + Syntax sugar + Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config + Possible values: + - wait - should wait to exclude host, complete queries and include host back into the cluster + - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster + enum: + - "" + - "wait" + - "nowait" + configMapPropagationTimeout: + type: integer + description: | + Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod` + More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: | + Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator, + but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource. + Default behavior is `Delete`" + # nullable: true + properties: + statefulSet: &TypeObjectsCleanup + type: string + description: "Behavior policy for unknown StatefulSet, `Delete` by default" + enum: + # List ObjectsCleanupXXX constants from model + - "" + - "Retain" + - "Delete" + pvc: + type: string + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown PVC, `Delete` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown ConfigMap, `Delete` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for unknown Service, `Delete` by default" + reconcileFailedObjects: + type: object + description: | + Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile. + Default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed StatefulSet, `Retain` by default" + pvc: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed PVC, `Retain` by default" + configMap: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed ConfigMap, `Retain` by default" + service: + !!merge <<: *TypeObjectsCleanup + description: "Behavior policy for failed Service, `Retain` by default" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + replicasUseFQDN: + !!merge <<: *TypeStringBool + description: | + define should replicas be specified by FQDN in ``. + In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup + "no" by default + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + storageManagement: + type: object + description: default storage management options + properties: + provisioner: &TypePVCProvisioner + type: string + description: "defines `PVC` provisioner - be it StatefulSet or the Operator" + enum: + - "" + - "StatefulSet" + - "Operator" + reclaimPolicy: &TypePVCReclaimPolicy + type: string + description: | + defines behavior of `PVC` deletion. + `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet + enum: + - "" + - "Retain" + - "Delete" + templates: &TypeTemplateNames + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + serviceTemplates: + type: array + description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource" + nullable: true + items: + type: string + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + settings: &TypeSettings + type: object + description: | + allows configure multiple aspects and behavior for `clickhouse-keeper` instance + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: &TypeFiles + type: object + description: | + allows define content of any setting + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes clusters layout and allows change settings on cluster-level and replica-level + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + pdbManaged: + !!merge <<: *TypeStringBool + description: | + Specifies whether the Pod Disruption Budget (PDB) should be managed. + During the next installation, if PDB management is enabled, the operator will + attempt to retrieve any existing PDB. If none is found, it will create a new one + and initiate a reconciliation loop. If PDB management is disabled, the existing PDB + will remain intact, and the reconciliation loop will not be executed. By default, + PDB management is enabled. + pdbMaxUnavailable: + type: integer + description: | + Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction, + i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions + by specifying 0. This is a mutually exclusive setting with "minAvailable". + minimum: 0 + maximum: 65535 + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + replicasCount: + type: integer + description: | + how much replicas in each shards for current cluster will run in Kubernetes, + each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + every shard contains 1 replica by default" + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zkPort: + type: integer + minimum: 1 + maximum: 65535 + raftPort: + type: integer + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + templates: + !!merge <<: *TypeTemplateNames + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zkPort: + type: integer + minimum: 1 + maximum: 65535 + raftPort: + type: integer + minimum: 1 + maximum: 65535 + settings: + !!merge <<: *TypeSettings + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + files: + !!merge <<: *TypeFiles + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + templates: + !!merge <<: *TypeTemplateNames + description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: | + use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, + more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + description: | + allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + provisioner: *TypePVCProvisioner + reclaimPolicy: *TypePVCReclaimPolicy + metadata: + type: object + description: | + allows to pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: | + allows define format for generated `Service` name, + look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates + for details about available template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true diff --git a/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml new file mode 100644 index 000000000..fb41e3d13 --- /dev/null +++ b/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml @@ -0,0 +1,563 @@ +# Template Parameters: +# +# NONE +# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseoperatorconfigurations.clickhouse.altinity.com + labels: + clickhouse.altinity.com/chop: 0.26.0 +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseOperatorConfiguration + singular: clickhouseoperatorconfiguration + plural: clickhouseoperatorconfigurations + shortNames: + - chopconf + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: namespaces + type: string + description: Watch namespaces + jsonPath: .status + - name: age + type: date + description: Age of the resource + # Displayed in all priorities + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md" + x-kubernetes-preserve-unknown-fields: true + properties: + status: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + Allows to define settings of the clickhouse-operator. + More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml + Check into etc-clickhouse-operator* ConfigMaps if you need more control + x-kubernetes-preserve-unknown-fields: true + properties: + watch: + type: object + description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment" + properties: + namespaces: + type: object + description: "List of namespaces where clickhouse-operator watches for events." + x-kubernetes-preserve-unknown-fields: true + clickhouse: + type: object + description: "Clickhouse related parameters used by clickhouse-operator" + properties: + configuration: + type: object + properties: + file: + type: object + properties: + path: + type: object + description: | + Each 'path' can be either absolute or relative. + In case path is absolute - it is used as is. + In case path is relative - it is relative to the folder where configuration file you are reading right now is located. + properties: + common: + type: string + description: | + Path to the folder where ClickHouse configuration files common for all instances within a CHI are located. + Default value - config.d + host: + type: string + description: | + Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located. + Default value - conf.d + user: + type: string + description: | + Path to the folder where ClickHouse configuration files with users settings are located. + Files are common for all instances within a CHI. + Default value - users.d + user: + type: object + description: "Default parameters for any user which will create" + properties: + default: + type: object + properties: + profile: + type: string + description: "ClickHouse server configuration `...` for any " + quota: + type: string + description: "ClickHouse server configuration `...` for any " + networksIP: + type: array + description: "ClickHouse server configuration `...` for any " + items: + type: string + password: + type: string + description: "ClickHouse server configuration `...` for any " + network: + type: object + description: "Default network parameters for any user which will create" + properties: + hostRegexpTemplate: + type: string + description: "ClickHouse server configuration `...` for any " + configurationRestartPolicy: + type: object + description: "Configuration restart policy describes what configuration changes require ClickHouse restart" + properties: + rules: + type: array + description: "Array of set of rules per specified ClickHouse versions" + items: + type: object + properties: + version: + type: string + description: "ClickHouse version expression" + rules: + type: array + description: "Set of configuration rules for specified ClickHouse version" + items: + type: object + description: "setting: value pairs for configuration restart policy" + x-kubernetes-preserve-unknown-fields: true + access: + type: object + description: "parameters which use for connect to clickhouse from clickhouse-operator deployment" + properties: + scheme: + type: string + description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto" + username: + type: string + description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName" + password: + type: string + description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName" + rootCA: + type: string + description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse" + secret: + type: object + properties: + namespace: + type: string + description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances" + name: + type: string + description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances" + port: + type: integer + minimum: 1 + maximum: 65535 + description: "Port to be used by operator to connect to ClickHouse instances" + timeouts: + type: object + description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds" + properties: + connect: + type: integer + minimum: 1 + maximum: 10 + description: "Timout to setup connection from the operator to ClickHouse instances. In seconds." + query: + type: integer + minimum: 1 + maximum: 600 + description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds." + addons: + type: object + description: "Configuration addons specifies additional settings" + properties: + rules: + type: array + description: "Array of set of rules per specified ClickHouse versions" + items: + type: object + properties: + version: + type: string + description: "ClickHouse version expression" + spec: + type: object + description: "spec" + properties: + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + properties: + users: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + settings: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: "see same section from CR spec" + x-kubernetes-preserve-unknown-fields: true + metrics: + type: object + description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator" + properties: + timeouts: + type: object + description: | + Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances + Specified in seconds. + properties: + collect: + type: integer + minimum: 1 + maximum: 600 + description: | + Timeout used to limit metrics collection request. In seconds. + Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. + All collected metrics are returned. + tablesRegexp: + type: string + description: | + Regexp to match tables in system database to fetch metrics from. + Multiple tables can be matched using regexp. Matched tables are merged using merge() table function. + Default is "^(metrics|custom_metrics)$". + template: + type: object + description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment" + properties: + chi: + type: object + properties: + policy: + type: string + description: | + CHI template updates handling policy + Possible policy values: + - ReadOnStart. Accept CHIT updates on the operators start only. + - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI + enum: + - "" + - "ReadOnStart" + - "ApplyOnNextReconcile" + path: + type: string + description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located." + reconcile: + type: object + description: "allow tuning reconciling process" + properties: + runtime: + type: object + description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle" + properties: + reconcileCHIsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default" + reconcileShardsThreadsNumber: + type: integer + minimum: 1 + maximum: 65535 + description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default" + reconcileShardsMaxConcurrencyPercent: + type: integer + minimum: 0 + maximum: 100 + description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default." + statefulSet: + type: object + description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator" + properties: + create: + type: object + description: "Behavior during create StatefulSet" + properties: + onFailure: + type: string + description: | + What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + Possible options: + 1. abort - do nothing, just break the process and wait for admin. + 2. delete - delete newly created problematic StatefulSet. + 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet. + update: + type: object + description: "Behavior during update StatefulSet" + properties: + timeout: + type: integer + description: "How many seconds to wait for created/updated StatefulSet to be Ready" + pollInterval: + type: integer + description: "How many seconds to wait between checks for created/updated StatefulSet status" + onFailure: + type: string + description: | + What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + Possible options: + 1. abort - do nothing, just break the process and wait for admin. + 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration. + 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet. + recreate: + type: object + description: "Behavior during recreate StatefulSet" + properties: + onDataLoss: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + onUpdateFailure: + type: string + description: | + What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready. + Possible options: + 1. abort - abort the process, do nothing with the problematic StatefulSet. + 2. recreate (default) - proceed and recreate StatefulSet. + host: + type: object + description: | + Whether the operator during reconcile procedure should wait for a ClickHouse host: + - to be excluded from a ClickHouse cluster + - to complete all running queries + - to be included into a ClickHouse cluster + respectfully before moving forward + properties: + wait: + type: object + properties: + exclude: &TypeStringBool + type: string + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster" + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + queries: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries" + include: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster" + replicas: + type: object + description: "Whether the operator during reconcile procedure should wait for replicas to catch-up" + properties: + all: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up" + new: + !!merge <<: *TypeStringBool + description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up" + delay: + type: integer + description: "replication max absolute delay to consider replica is not delayed" + probes: + type: object + description: "What probes the operator should wait during host launch procedure" + properties: + startup: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for startup probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to do not wait. + readiness: + !!merge <<: *TypeStringBool + description: | + Whether the operator during host launch procedure should wait for readiness probe to succeed. + In case probe is unspecified wait is assumed to be completed successfully. + Default option value is to wait. + drop: + type: object + properties: + replicas: + type: object + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated + properties: + onDelete: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica is deleted + onLostVolume: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop replicas when replica volume is lost + active: + !!merge <<: *TypeStringBool + description: | + Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated + annotation: + type: object + description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources" + properties: + include: + type: array + description: | + When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`, + include annotations with names from the following list + items: + type: string + exclude: + type: array + description: | + When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`, + exclude annotations with names from the following list + items: + type: string + label: + type: object + description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources" + properties: + include: + type: array + description: | + When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, + include labels from the following list + items: + type: string + exclude: + type: array + items: + type: string + description: | + When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, + exclude labels from the following list + appendScope: + !!merge <<: *TypeStringBool + description: | + Whether to append *Scope* labels to StatefulSet and Pod + - "LabelShardScopeIndex" + - "LabelReplicaScopeIndex" + - "LabelCHIScopeIndex" + - "LabelCHIScopeCycleSize" + - "LabelCHIScopeCycleIndex" + - "LabelCHIScopeCycleOffset" + - "LabelClusterScopeIndex" + - "LabelClusterScopeCycleSize" + - "LabelClusterScopeCycleIndex" + - "LabelClusterScopeCycleOffset" + metrics: + type: object + description: "defines metrics exporter options" + properties: + labels: + type: object + description: "defines metric labels options" + properties: + exclude: + type: array + description: | + When adding labels to a metric exclude labels with names from the following list + items: + type: string + status: + type: object + description: "defines status options" + properties: + fields: + type: object + description: "defines status fields options" + properties: + action: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'action'" + actions: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'actions'" + error: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'error'" + errors: + !!merge <<: *TypeStringBool + description: "Whether the operator should fill status field 'errors'" + statefulSet: + type: object + description: "define StatefulSet-specific parameters" + properties: + revisionHistoryLimit: + type: integer + description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n" + pod: + type: object + description: "define pod specific parameters" + properties: + terminationGracePeriod: + type: integer + description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n" + logger: + type: object + description: "allow setup clickhouse-operator logger behavior" + properties: + logtostderr: + type: string + description: "boolean, allows logs to stderr" + alsologtostderr: + type: string + description: "boolean allows logs to stderr and files both" + v: + type: string + description: "verbosity level of clickhouse-operator log, default - 1 max - 9" + stderrthreshold: + type: string + vmodule: + type: string + description: | + Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level. + Ex.: file*=2 sets the 'V' to 2 in all files with names like file*. + log_backtrace_at: + type: string + description: | + It can be set to a file and line number with a logging line. + Ex.: file.go:123 + Each time when this line is being executed, a stack trace will be written to the Info log.