diff --git a/cmd/operator/app/thread_keeper.go b/cmd/operator/app/thread_keeper.go
index e5e707117..bd09323c6 100644
--- a/cmd/operator/app/thread_keeper.go
+++ b/cmd/operator/app/thread_keeper.go
@@ -59,7 +59,10 @@ func initKeeper(ctx context.Context) error {
err = ctrlRuntime.
NewControllerManagedBy(manager).
- For(&api.ClickHouseKeeperInstallation{}, builder.WithPredicates(keeperPredicate())).
+ For(
+ &api.ClickHouseKeeperInstallation{},
+ builder.WithPredicates(keeperPredicate()),
+ ).
Owns(&apps.StatefulSet{}).
Complete(
&controller.Controller{
@@ -88,40 +91,30 @@ func runKeeper(ctx context.Context) error {
func keeperPredicate() predicate.Funcs {
return predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
- obj, ok := e.Object.(*api.ClickHouseKeeperInstallation)
+ new, ok := e.Object.(*api.ClickHouseKeeperInstallation)
if !ok {
return false
}
- // Check if namespace should be watched (includes deny list check)
- if !chop.Config().IsNamespaceWatched(obj.Namespace) {
- logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace)
+ if !controller.ShouldEnqueue(new) {
return false
}
- if obj.Spec.Suspend.Value() {
- return false
- }
return true
},
DeleteFunc: func(e event.DeleteEvent) bool {
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
- obj, ok := e.ObjectNew.(*api.ClickHouseKeeperInstallation)
+ new, ok := e.ObjectNew.(*api.ClickHouseKeeperInstallation)
if !ok {
return false
}
- // Check if namespace should be watched (includes deny list check)
- if !chop.Config().IsNamespaceWatched(obj.Namespace) {
- logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace)
+ if !controller.ShouldEnqueue(new) {
return false
}
- if obj.Spec.Suspend.Value() {
- return false
- }
return true
},
GenericFunc: func(e event.GenericEvent) bool {
diff --git a/config/config-dev.yaml b/config/config-dev.yaml
index 88a239a4e..431c1f92b 100644
--- a/config/config-dev.yaml
+++ b/config/config-dev.yaml
@@ -247,6 +247,10 @@ clickhouse:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -351,6 +355,20 @@ reconcile:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
diff --git a/config/config.yaml b/config/config.yaml
index 3925c35a5..f7f6c1024 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -175,7 +175,7 @@ clickhouse:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -216,7 +216,6 @@ clickhouse:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -245,6 +244,10 @@ clickhouse:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -349,6 +352,20 @@ reconcile:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml
index 8129e82c9..0226d778b 100644
--- a/deploy/builder/templates-config/config.yaml
+++ b/deploy/builder/templates-config/config.yaml
@@ -169,7 +169,7 @@ clickhouse:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -210,7 +210,6 @@ clickhouse:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -239,6 +238,10 @@ clickhouse:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -343,6 +346,20 @@ reconcile:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
index 012f8ea0f..8a820aee2 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -497,6 +497,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
index 629f369b5..dfe3ce30e 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
@@ -240,6 +240,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -318,6 +324,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
index 5323073bf..6374c1426 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
@@ -22,6 +22,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -45,15 +49,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -64,20 +63,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -243,10 +233,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -284,7 +276,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -528,6 +520,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
<<: *TypeStringBool
description: |
@@ -545,11 +542,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
diff --git a/deploy/devspace/yq_transform_clickhouse-operator-install.sh b/deploy/devspace/yq_transform_clickhouse-operator-install.sh
index 8b1c7bba5..db5ec1eed 100755
--- a/deploy/devspace/yq_transform_clickhouse-operator-install.sh
+++ b/deploy/devspace/yq_transform_clickhouse-operator-install.sh
@@ -14,4 +14,9 @@ yq eval -e --inplace "(select(.kind == \"Deployment\" and .metadata.name == \"cl
yq eval -e --inplace '(select(.kind == "Deployment" and .metadata.name == "clickhouse-operator") | .spec.template.spec.containers[] | select(.name=="metrics-exporter") | .imagePullPolicy) = "IfNotPresent"' "${CUR_DIR}/clickhouse-operator-install.yaml"
yq eval -e --inplace '(select(.kind == "Deployment" and .metadata.name == "clickhouse-operator") | .spec.template.spec.containers[] | select(.name=="metrics-exporter") | .securityContext.capabilities.add) = ["SYS_PTRACE"]' "${CUR_DIR}/clickhouse-operator-install.yaml"
-sed -i "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml"
+# Use sed with compatibility for both macOS and Linux
+if [[ "$(uname)" == "Darwin" ]]; then
+ sed -i '' "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml"
+else
+ sed -i "s/namespace: kube-system/namespace: ${OPERATOR_NAMESPACE}/" "${CUR_DIR}/clickhouse-operator-install.yaml"
+fi
diff --git a/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml b/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml
index 1b9b41e0b..989edcf7f 100644
--- a/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml
+++ b/deploy/grafana/grafana-with-grafana-operator/grafana-cr-template.yaml
@@ -6,6 +6,12 @@ metadata:
labels:
app: grafana
spec:
+ # Service account for the Grafana Operator to manage datasources and dashboards
+ serviceAccount:
+ metadata:
+ annotations: {}
+ labels: {}
+
deployment:
metadata:
annotations:
diff --git a/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh b/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh
index dce405fbd..814f9b76e 100755
--- a/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh
+++ b/deploy/grafana/grafana-with-grafana-operator/install-grafana-operator.sh
@@ -65,7 +65,12 @@ echo "Setup Grafana operator into ${GRAFANA_NAMESPACE} namespace"
kubectl create namespace "${GRAFANA_NAMESPACE}" || true
# Setup grafana-operator into dedicated namespace
-sed -i "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml"
+# Use sed with compatibility for both macOS and Linux
+if [[ "$(uname)" == "Darwin" ]]; then
+ sed -i '' "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml"
+else
+ sed -i "s/namespace: system/namespace: ${GRAFANA_NAMESPACE}/g" "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped/kustomization.yaml"
+fi
kubectl kustomize "${GRAFANA_OPERATOR_DIR}/deploy/kustomize/overlays/namespace_scoped" --load-restrictor LoadRestrictionsNone | kubectl apply --server-side -f -
kubectl wait deployment/grafana-operator-controller-manager -n "${GRAFANA_NAMESPACE}" --for=condition=available --timeout=300s
diff --git a/deploy/helm/clickhouse-operator/Chart.yaml b/deploy/helm/clickhouse-operator/Chart.yaml
index d5d319523..ad9a7830a 100644
--- a/deploy/helm/clickhouse-operator/Chart.yaml
+++ b/deploy/helm/clickhouse-operator/Chart.yaml
@@ -17,8 +17,8 @@ description: |-
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
```
type: application
-version: 0.25.6
-appVersion: 0.25.6
+version: 0.26.0
+appVersion: 0.26.0
home: https://github.com/Altinity/clickhouse-operator
icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg
maintainers:
diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md
index 3739cbbd7..b0ac940c1 100644
--- a/deploy/helm/clickhouse-operator/README.md
+++ b/deploy/helm/clickhouse-operator/README.md
@@ -1,6 +1,6 @@
# altinity-clickhouse-operator
-  
+  
Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator).
@@ -75,10 +75,12 @@ crdHook:
| commonLabels | object | `{}` | set of labels that will be applied to all the resources for the operator |
| configs | object | check the `values.yaml` file for the config content (auto-generated from latest operator release) | clickhouse operator configs |
| crdHook.affinity | object | `{}` | affinity for CRD installation job |
+| crdHook.annotations | object | `{}` | additional annotations for CRD installation job |
| crdHook.enabled | bool | `true` | enable automatic CRD installation/update via pre-install/pre-upgrade hooks when disabled, CRDs must be installed manually using kubectl apply |
| crdHook.image.pullPolicy | string | `"IfNotPresent"` | image pull policy for CRD installation job |
| crdHook.image.repository | string | `"bitnami/kubectl"` | image repository for CRD installation job |
| crdHook.image.tag | string | `"latest"` | image tag for CRD installation job |
+| crdHook.imagePullSecrets | list | `[]` | image pull secrets for CRD installation job possible value format `[{"name":"your-secret-name"}]`, check `kubectl explain pod.spec.imagePullSecrets` for details |
| crdHook.nodeSelector | object | `{}` | node selector for CRD installation job |
| crdHook.resources | object | `{}` | resource limits and requests for CRD installation job |
| crdHook.tolerations | list | `[]` | tolerations for CRD installation job |
@@ -93,6 +95,7 @@ crdHook:
| metrics.enabled | bool | `true` | |
| metrics.env | list | `[]` | additional environment variables for the deployment of metrics-exporter containers possible format value `[{"name": "SAMPLE", "value": "text"}]` |
| metrics.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
+| metrics.image.registry | string | `""` | optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) |
| metrics.image.repository | string | `"altinity/metrics-exporter"` | image repository |
| metrics.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
| metrics.resources | object | `{}` | custom resource configuration |
@@ -102,6 +105,7 @@ crdHook:
| operator.containerSecurityContext | object | `{}` | |
| operator.env | list | `[]` | additional environment variables for the clickhouse-operator container in deployment possible format value `[{"name": "SAMPLE", "value": "text"}]` |
| operator.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
+| operator.image.registry | string | `""` | optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com) |
| operator.image.repository | string | `"altinity/clickhouse-operator"` | image repository |
| operator.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
| operator.priorityClassName | string | "" | priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details |
@@ -122,7 +126,7 @@ crdHook:
| serviceMonitor.clickhouseMetrics.metricRelabelings | list | `[]` | |
| serviceMonitor.clickhouseMetrics.relabelings | list | `[]` | |
| serviceMonitor.clickhouseMetrics.scrapeTimeout | string | `""` | |
-| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. You can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below |
+| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below |
| serviceMonitor.operatorMetrics.interval | string | `"30s"` | |
| serviceMonitor.operatorMetrics.metricRelabelings | list | `[]` | |
| serviceMonitor.operatorMetrics.relabelings | list | `[]` | |
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
index 1a42a88be..03bb8e057 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -497,6 +497,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
index 0779a3051..4350b913e 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -497,6 +497,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
index c2604dee9..405dac0a1 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
@@ -1,13 +1,13 @@
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -22,6 +22,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -45,15 +49,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -64,20 +63,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -243,10 +233,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -284,7 +276,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -527,6 +519,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
!!merge <<: *TypeStringBool
description: |
@@ -544,11 +541,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- !!merge <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
index 19434d6d5..fb41e3d13 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
@@ -7,7 +7,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -240,6 +240,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -318,6 +324,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
diff --git a/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json b/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json
index 0f3e34b85..5b299d9ba 100644
--- a/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json
+++ b/deploy/helm/clickhouse-operator/files/Altinity_ClickHouse_Operator_dashboard.json
@@ -1282,7 +1282,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -1412,7 +1412,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-concurrent-queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -1727,7 +1727,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -2253,7 +2253,7 @@
{
"targetBlank": true,
"title": "max_replica_delay_for_distributed_queries",
- "url": "https://clickhouse.com/docs/en/operations/settings/settings#settings-max_replica_delay_for_distributed_queries"
+ "url": "https://clickhouse.com/docs/operations/settings/settings#max_replica_delay_for_distributed_queries"
}
],
"options": {
@@ -4298,7 +4298,7 @@
{
"targetBlank": true,
"title": "mark_cache_size",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-mark-cache-size"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#mark_cache_size"
},
{
"targetBlank": true,
@@ -5335,12 +5335,12 @@
{
"targetBlank": true,
"title": "max_connections",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-connections"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_connections"
},
{
"targetBlank": true,
"title": "max_distributed_connections",
- "url": "https://clickhouse.com/docs/en/operations/settings/settings#max-distributed-connections"
+ "url": "https://clickhouse.com/docs/operations/settings/settings#max_distributed_connections"
},
{
"targetBlank": true,
diff --git a/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json b/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json
index 566f68240..b69a07347 100644
--- a/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json
+++ b/deploy/helm/clickhouse-operator/files/ClickHouseKeeper_dashboard.json
@@ -1001,6 +1001,24 @@
"skipUrlSync": false,
"type": "datasource"
},
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 2,
+ "includeAll": false,
+ "multi": false,
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
{
"allValue": ".+",
"current": {},
diff --git a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
index 1b0ff45ad..6dfc8d0d2 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
@@ -2,9 +2,9 @@
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -66,7 +66,7 @@ spec:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-keeper-usersd-files
containers:
- name: {{ .Chart.Name }}
- image: {{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }}
+ image: {{ if .Values.operator.image.registry }}{{ .Values.operator.image.registry | trimSuffix "/" }}/{{ end }}{{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -144,7 +144,7 @@ spec:
securityContext: {{ toYaml .Values.operator.containerSecurityContext | nindent 12 }}
{{ if .Values.metrics.enabled }}
- name: metrics-exporter
- image: {{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }}
+ image: {{ if .Values.metrics.image.registry }}{{ .Values.metrics.image.registry | trimSuffix "/" }}/{{ end }}{{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy }}
volumeMounts:
- name: etc-clickhouse-operator-folder
diff --git a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
index faf5922dc..aed644b44 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
@@ -3,7 +3,7 @@
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml
index 1946b6e51..e31c127de 100644
--- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml
+++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-configmap.yaml
@@ -11,6 +11,9 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-7"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.crdHook.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
data:
clickhouseinstallations.yaml: |
{{ .Files.Get "crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml" | indent 4 }}
diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml
index 9b9b4e005..16e1d226d 100644
--- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml
+++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-job.yaml
@@ -11,6 +11,9 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.crdHook.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
spec:
template:
metadata:
@@ -21,6 +24,10 @@ spec:
spec:
serviceAccountName: {{ include "altinity-clickhouse-operator.fullname" . }}-crd-install
restartPolicy: OnFailure
+ {{- with .Values.crdHook.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- with .Values.crdHook.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
diff --git a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml
index bc776c990..78ccea60c 100644
--- a/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml
+++ b/deploy/helm/clickhouse-operator/templates/hooks/crd-install-rbac.yaml
@@ -12,6 +12,9 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-6"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.crdHook.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -24,6 +27,9 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-6"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.crdHook.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
rules:
- apiGroups:
- apiextensions.k8s.io
@@ -47,6 +53,9 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-6"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ {{- with .Values.crdHook.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml
index 5567163ec..32d4c5c27 100644
--- a/deploy/helm/clickhouse-operator/values.yaml
+++ b/deploy/helm/clickhouse-operator/values.yaml
@@ -18,6 +18,10 @@ crdHook:
tag: "latest"
# crdHook.image.pullPolicy -- image pull policy for CRD installation job
pullPolicy: IfNotPresent
+ # crdHook.imagePullSecrets -- image pull secrets for CRD installation job
+ # possible value format `[{"name":"your-secret-name"}]`,
+ # check `kubectl explain pod.spec.imagePullSecrets` for details
+ imagePullSecrets: []
# crdHook.resources -- resource limits and requests for CRD installation job
resources: {}
# limits:
@@ -32,8 +36,12 @@ crdHook:
tolerations: []
# crdHook.affinity -- affinity for CRD installation job
affinity: {}
+ # crdHook.annotations -- additional annotations for CRD installation job
+ annotations: {}
operator:
image:
+ # operator.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com)
+ registry: ""
# operator.image.repository -- image repository
repository: altinity/clickhouse-operator
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)
@@ -59,6 +67,8 @@ operator:
metrics:
enabled: true
image:
+ # metrics.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com)
+ registry: ""
# metrics.image.repository -- image repository
repository: altinity/metrics-exporter
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
@@ -398,7 +408,7 @@ configs:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
################################################
@@ -464,6 +474,10 @@ configs:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
################################################
@@ -559,6 +573,19 @@ configs:
# Follow 'abort' path afterwards.
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
diff --git a/deploy/minio/install-minio-operator.sh b/deploy/minio/install-minio-operator.sh
index 0f6158fbc..9aee125b4 100755
--- a/deploy/minio/install-minio-operator.sh
+++ b/deploy/minio/install-minio-operator.sh
@@ -63,13 +63,21 @@ echo "Setup minio.io operator ${MINIO_OPERATOR_VERSION} into ${MINIO_NAMESPACE}
## TODO: need to refactor after next minio-operator release
MINIO_KUSTOMIZE_DIR="${MINIO_OPERATOR_DIR}/resources"
-sed -i -e "s/replicas: 2/replicas: 1/" $MINIO_KUSTOMIZE_DIR/base/deployment.yaml
-sed -i -e "s/name: minio-operator/name: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml
-sed -i -e "s/: restricted/: baseline/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml
-sed -i -e "s/namespace: default/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
-sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
-sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/kustomization.yaml
-sed -i -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
+
+# Use sed with compatibility for both macOS and Linux
+if [[ "$(uname)" == "Darwin" ]]; then
+ SED_INPLACE="sed -i ''"
+else
+ SED_INPLACE="sed -i"
+fi
+
+$SED_INPLACE -e "s/replicas: 2/replicas: 1/" $MINIO_KUSTOMIZE_DIR/base/deployment.yaml
+$SED_INPLACE -e "s/name: minio-operator/name: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml
+$SED_INPLACE -e "s/: restricted/: baseline/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml
+$SED_INPLACE -e "s/namespace: default/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
+$SED_INPLACE -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
+$SED_INPLACE -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/kustomization.yaml
+$SED_INPLACE -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/" $MINIO_KUSTOMIZE_DIR/base/*.yaml
# Setup minio-operator into dedicated namespace via kustomize
kubectl --namespace="${MINIO_NAMESPACE}" apply -k "${MINIO_KUSTOMIZE_DIR}"
diff --git a/deploy/operator-web-installer/clickhouse-operator-install.sh b/deploy/operator-web-installer/clickhouse-operator-install.sh
index 1c3feb65a..368d88f7f 100755
--- a/deploy/operator-web-installer/clickhouse-operator-install.sh
+++ b/deploy/operator-web-installer/clickhouse-operator-install.sh
@@ -141,9 +141,6 @@ check_envsubst_available
# Manifest is expected to be ready-to-use manifest file
MANIFEST="${MANIFEST:-""}"
-# Template can have params to substitute
-DEFAULT_TEMPLATE="https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml"
-TEMPLATE="${TEMPLATE:-"${DEFAULT_TEMPLATE}"}"
# Namespace to install operator
OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"kube-system"}"
METRICS_EXPORTER_NAMESPACE="${OPERATOR_NAMESPACE}"
@@ -153,6 +150,9 @@ if [[ -z "${OPERATOR_VERSION}" ]]; then
RELEASE_VERSION=$(get_file https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/release)
fi
OPERATOR_VERSION="${OPERATOR_VERSION:-"${RELEASE_VERSION}"}"
+# Template can have params to substitute
+DEFAULT_TEMPLATE="https://raw.githubusercontent.com/Altinity/clickhouse-operator/${OPERATOR_VERSION:-master}/deploy/operator/clickhouse-operator-install-template.yaml"
+TEMPLATE="${TEMPLATE:-"${DEFAULT_TEMPLATE}"}"
OPERATOR_IMAGE="${OPERATOR_IMAGE:-"altinity/clickhouse-operator:${OPERATOR_VERSION}"}"
OPERATOR_IMAGE_PULL_POLICY="${OPERATOR_IMAGE_PULL_POLICY:-"Always"}"
METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"altinity/metrics-exporter:${OPERATOR_VERSION}"}"
@@ -199,4 +199,4 @@ elif [[ ! -z "${TEMPLATE}" ]]; then
)
else
echo "Neither manifest nor template available. Abort."
-fi
+fi
\ No newline at end of file
diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml
index c781de370..50ee7b39c 100644
--- a/deploy/operator/clickhouse-operator-install-ansible.yaml
+++ b/deploy/operator/clickhouse-operator-install-ansible.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -33,6 +33,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -56,10 +60,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -504,6 +504,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1470,14 +1544,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1492,6 +1566,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -1515,10 +1593,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1963,6 +2037,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2932,7 +3080,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3165,6 +3313,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3243,6 +3397,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3470,14 +3642,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3492,6 +3664,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3515,15 +3691,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3534,20 +3705,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3713,10 +3875,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3754,7 +3918,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3998,6 +4162,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
<<: *TypeStringBool
description: |
@@ -4015,11 +4184,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4368,7 +4532,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
---
# Template Parameters:
#
@@ -4394,7 +4558,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
@@ -4626,7 +4790,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4648,7 +4812,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -4829,7 +4993,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -4870,7 +5034,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -4899,6 +5062,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -5003,6 +5170,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5156,7 +5337,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5172,7 +5353,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5271,7 +5452,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5371,7 +5552,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5434,7 +5615,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5450,7 +5631,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5528,7 +5709,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5546,7 +5727,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5554,7 +5735,7 @@ data:
# Template parameters available:
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN={{ password }}
#
@@ -5564,7 +5745,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5575,9 +5756,9 @@ stringData:
#
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5588,7 +5769,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5636,7 +5817,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.6
+ image: altinity/clickhouse-operator:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5712,7 +5893,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.6
+ image: altinity/metrics-exporter:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5803,7 +5984,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
index bc9215dc4..7301128d1 100644
--- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -23,6 +23,10 @@ spec:
- chi
version: v1
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ JSONPath: .status.status
- name: version
type: string
description: Operator version
@@ -46,10 +50,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
JSONPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- JSONPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -492,6 +492,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1453,14 +1527,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1472,6 +1546,10 @@ spec:
- chit
version: v1
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ JSONPath: .status.status
- name: version
type: string
description: Operator version
@@ -1495,10 +1573,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
JSONPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- JSONPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1939,6 +2013,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2903,7 +3051,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3132,6 +3280,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3210,6 +3364,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3432,14 +3604,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3454,6 +3626,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3477,15 +3653,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3496,20 +3667,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3675,10 +3837,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3716,7 +3880,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3959,6 +4123,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
!!merge <<: *TypeStringBool
description: |
@@ -3976,11 +4145,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- !!merge <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4327,7 +4491,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
# Template Parameters:
#
@@ -4352,7 +4516,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
# Core API group
@@ -4572,7 +4736,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4605,7 +4769,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
# Core API group
@@ -4825,7 +4989,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4847,7 +5011,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -5028,7 +5192,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -5069,7 +5233,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -5098,6 +5261,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -5202,6 +5369,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5354,7 +5535,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5370,7 +5551,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5464,7 +5645,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5562,7 +5743,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5624,7 +5805,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5640,7 +5821,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5715,7 +5896,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5733,7 +5914,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5741,7 +5922,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5751,7 +5932,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5762,9 +5943,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5775,7 +5956,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5823,7 +6004,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.6
+ image: altinity/clickhouse-operator:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5897,7 +6078,7 @@ spec:
- containerPort: 9999
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.6
+ image: altinity/metrics-exporter:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5987,7 +6168,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml
index 63f75781a..8f2f01468 100644
--- a/deploy/operator/clickhouse-operator-install-bundle.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -497,6 +497,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1463,14 +1537,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1485,6 +1559,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -1508,10 +1586,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1956,6 +2030,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2925,7 +3073,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3158,6 +3306,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3236,6 +3390,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3463,14 +3635,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3485,6 +3657,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3508,15 +3684,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3527,20 +3698,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3706,10 +3868,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3747,7 +3911,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3991,6 +4155,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
<<: *TypeStringBool
description: |
@@ -4008,11 +4177,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4361,7 +4525,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
---
# Template Parameters:
#
@@ -4387,7 +4551,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
@@ -4619,7 +4783,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4653,7 +4817,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
@@ -4885,7 +5049,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4907,7 +5071,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -5088,7 +5252,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -5129,7 +5293,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -5158,6 +5321,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -5262,6 +5429,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5415,7 +5596,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5431,7 +5612,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5530,7 +5711,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5630,7 +5811,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5693,7 +5874,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5709,7 +5890,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5787,7 +5968,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5805,7 +5986,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5813,7 +5994,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5823,7 +6004,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5834,9 +6015,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5847,7 +6028,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5895,7 +6076,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.6
+ image: altinity/clickhouse-operator:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5971,7 +6152,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.6
+ image: altinity/metrics-exporter:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -6062,7 +6243,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
index 456623492..5684aed86 100644
--- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -23,6 +23,10 @@ spec:
- chi
version: v1
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ JSONPath: .status.status
- name: version
type: string
description: Operator version
@@ -46,10 +50,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
JSONPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- JSONPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -492,6 +492,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1453,14 +1527,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1472,6 +1546,10 @@ spec:
- chit
version: v1
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ JSONPath: .status.status
- name: version
type: string
description: Operator version
@@ -1495,10 +1573,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
JSONPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- JSONPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1939,6 +2013,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2903,7 +3051,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3132,6 +3280,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3210,6 +3364,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3432,14 +3604,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3454,6 +3626,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3477,15 +3653,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3496,20 +3667,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3675,10 +3837,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3716,7 +3880,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3959,6 +4123,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
!!merge <<: *TypeStringBool
description: |
@@ -3976,11 +4145,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- !!merge <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4327,7 +4491,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
# Template Parameters:
#
@@ -4352,7 +4516,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
# Core API group
@@ -4572,7 +4736,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4594,7 +4758,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -4775,7 +4939,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -4816,7 +4980,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -4845,6 +5008,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -4949,6 +5116,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5101,7 +5282,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5117,7 +5298,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5211,7 +5392,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5309,7 +5490,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5371,7 +5552,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5387,7 +5568,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5462,7 +5643,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5480,7 +5661,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5488,7 +5669,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5498,7 +5679,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5522,7 +5703,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5734,7 +5915,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml
index ea1238ee5..85d6a3834 100644
--- a/deploy/operator/clickhouse-operator-install-template.yaml
+++ b/deploy/operator/clickhouse-operator-install-template.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -497,6 +497,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1463,14 +1537,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1485,6 +1559,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -1508,10 +1586,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1956,6 +2030,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2925,7 +3073,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3158,6 +3306,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3236,6 +3390,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3463,14 +3635,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3485,6 +3657,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3508,15 +3684,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3527,20 +3698,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3706,10 +3868,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3747,7 +3911,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3991,6 +4155,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
<<: *TypeStringBool
description: |
@@ -4008,11 +4177,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4361,7 +4525,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
---
# Template Parameters:
#
@@ -4387,7 +4551,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
@@ -4619,7 +4783,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4641,7 +4805,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -4822,7 +4986,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -4863,7 +5027,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -4892,6 +5055,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -4996,6 +5163,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5149,7 +5330,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5165,7 +5346,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5264,7 +5445,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5364,7 +5545,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5427,7 +5608,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5443,7 +5624,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5521,7 +5702,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5539,7 +5720,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5547,7 +5728,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5557,7 +5738,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5581,7 +5762,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5796,7 +5977,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml
index a62e6a2a0..34328b53b 100644
--- a/deploy/operator/clickhouse-operator-install-tf.yaml
+++ b/deploy/operator/clickhouse-operator-install-tf.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -33,6 +33,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -56,10 +60,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -504,6 +504,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -1470,14 +1544,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1492,6 +1566,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -1515,10 +1593,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -1963,6 +2037,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host: &TypeReconcileHost
type: object
description: |
@@ -2932,7 +3080,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3165,6 +3313,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -3243,6 +3397,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -3470,14 +3642,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3492,6 +3664,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3515,15 +3691,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -3534,20 +3705,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -3713,10 +3875,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -3754,7 +3918,7 @@ spec:
stop: &TypeStringBool
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -3998,6 +4162,11 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
<<: *TypeStringBool
description: |
@@ -4015,11 +4184,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- <<: *TypeTemplateNames
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
@@ -4368,7 +4532,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
---
# Template Parameters:
#
@@ -4394,7 +4558,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
rules:
#
@@ -4626,7 +4790,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4648,7 +4812,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
config.yaml: |
@@ -4829,7 +4993,7 @@ data:
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
- connect: 1
+ connect: 5
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
@@ -4870,7 +5034,6 @@ data:
quotas:
settings:
files:
-
- version: ">= 23.5"
spec:
configuration:
@@ -4899,6 +5062,10 @@ data:
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ tablesRegexp: "^(metrics|custom_metrics)$"
keeper:
configuration:
@@ -5003,6 +5170,20 @@ data:
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
# Reconcile Host scenario
host:
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
@@ -5156,7 +5337,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5172,7 +5353,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5271,7 +5452,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5371,7 +5552,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5434,7 +5615,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5450,7 +5631,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5528,7 +5709,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
readme: |
@@ -5546,7 +5727,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
data:
---
@@ -5554,7 +5735,7 @@ data:
# Template parameters available:
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=${password}
#
@@ -5564,7 +5745,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
type: Opaque
stringData:
@@ -5575,9 +5756,9 @@ stringData:
#
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.26.0
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.26.0
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5588,7 +5769,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
replicas: 1
@@ -5636,7 +5817,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.6
+ image: altinity/clickhouse-operator:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5712,7 +5893,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.6
+ image: altinity/metrics-exporter:0.26.0
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5803,7 +5984,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml
index b666e6e13..49b5b79de 100644
--- a/deploy/operator/parts/crd.yaml
+++ b/deploy/operator/parts/crd.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -26,6 +26,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -49,10 +53,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -706,6 +706,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host:
type: object
description: |
@@ -1320,6 +1394,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host:
type: object
description: |
@@ -3556,14 +3704,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -3578,6 +3726,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -3601,10 +3753,6 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- name: hosts-completed
type: integer
description: Completed hosts count
@@ -4258,6 +4406,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host:
type: object
description: |
@@ -4872,6 +5094,80 @@ spec:
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
host:
type: object
description: |
@@ -7111,7 +7407,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.6
+ clickhouse.altinity.com/chop: 0.26.0
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -7344,6 +7640,12 @@ spec:
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
@@ -7422,6 +7724,24 @@ spec:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
host:
type: object
description: |
@@ -7994,14 +8314,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.6
+# OPERATOR_VERSION=0.26.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.6
+ clickhouse-keeper.altinity.com/chop: 0.26.0
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -8016,6 +8336,10 @@ spec:
served: true
storage: true
additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
- name: version
type: string
description: Operator version
@@ -8039,15 +8363,10 @@ spec:
description: TaskID
priority: 1 # show in wide view
jsonPath: .status.taskID
- - name: status
- type: string
- description: Resource status
- jsonPath: .status.status
- - name: hosts-unchanged
+ - name: hosts-completed
type: integer
- description: Unchanged hosts count
- priority: 1 # show in wide view
- jsonPath: .status.hostsUnchanged
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
- name: hosts-updated
type: integer
description: Updated hosts count
@@ -8058,20 +8377,11 @@ spec:
description: Added hosts count
priority: 1 # show in wide view
jsonPath: .status.hostsAdded
- - name: hosts-completed
- type: integer
- description: Completed hosts count
- jsonPath: .status.hostsCompleted
- name: hosts-deleted
type: integer
description: Hosts deleted count
priority: 1 # show in wide view
jsonPath: .status.hostsDeleted
- - name: hosts-delete
- type: integer
- description: Hosts to be deleted count
- priority: 1 # show in wide view
- jsonPath: .status.hostsDelete
- name: endpoint
type: string
description: Client access endpoint
@@ -8237,10 +8547,12 @@ spec:
normalized:
type: object
description: "Normalized resource requested"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized resource completed"
+ nullable: true
x-kubernetes-preserve-unknown-fields: true
hostsWithTablesCreated:
type: array
@@ -8278,7 +8590,7 @@ spec:
stop:
type: string
description: |
- Allows to stop all ClickHouse clusters defined in a CHI.
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
Works as the following:
- When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
- When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
@@ -8609,6 +8921,46 @@ spec:
description: |
optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
override top-level `chi.spec.configuration.files`
+ templates:
+ type: object
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
pdbManaged:
type: string
enum:
@@ -8651,46 +9003,6 @@ spec:
by specifying 0. This is a mutually exclusive setting with "minAvailable".
minimum: 0
maximum: 65535
- templates:
- type: object
- # nullable: true
- properties:
- hostTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
- podTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- dataVolumeClaimTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- logVolumeClaimTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- serviceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
- serviceTemplates:
- type: array
- description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
- nullable: true
- items:
- type: string
- clusterServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
- shardServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
- replicaServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
- volumeClaimTemplate:
- type: string
- description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- description: |
- optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
- override top-level `chi.spec.configuration.templates`
layout:
type: object
description: |
diff --git a/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml b/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml
new file mode 100644
index 000000000..26c6644f6
--- /dev/null
+++ b/deploy/operatorhub/0.25.6/clickhouse-operator.v0.25.6.clusterserviceversion.yaml
@@ -0,0 +1,1660 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: ClusterServiceVersion
+metadata:
+ name: clickhouse-operator.v0.25.6
+ namespace: placeholder
+ annotations:
+ capabilities: Full Lifecycle
+ categories: Database
+ containerImage: docker.io/altinity/clickhouse-operator:0.25.6
+ createdAt: '2025-12-12T02:07:15Z'
+ support: Altinity Ltd. https://altinity.com
+ description: The Altinity® Kubernetes Operator for ClickHouse® manages the full lifecycle of ClickHouse clusters.
+ repository: https://github.com/altinity/clickhouse-operator
+ certified: 'false'
+ alm-examples: |
+ [
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "simple-01"
+ },
+ "spec": {
+ "configuration": {
+ "users": {
+ "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01",
+ "test_user/password": "test_password",
+ "test_user/networks/ip": [
+ "0.0.0.0/0"
+ ]
+ },
+ "clusters": [
+ {
+ "name": "simple"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "use-templates-all",
+ "labels": {
+ "target-chi-label-manual": "target-chi-label-manual-value",
+ "target-chi-label-auto": "target-chi-label-auto-value"
+ }
+ },
+ "spec": {
+ "useTemplates": [
+ {
+ "name": "chit-01"
+ },
+ {
+ "name": "chit-02"
+ }
+ ],
+ "configuration": {
+ "clusters": [
+ {
+ "name": "c1"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseOperatorConfiguration",
+ "metadata": {
+ "name": "chop-config-01"
+ },
+ "spec": {
+ "watch": {
+ "namespaces": {
+ "include": [],
+ "exclude": []
+ }
+ },
+ "clickhouse": {
+ "configuration": {
+ "file": {
+ "path": {
+ "common": "config.d",
+ "host": "conf.d",
+ "user": "users.d"
+ }
+ },
+ "user": {
+ "default": {
+ "profile": "default",
+ "quota": "default",
+ "networksIP": [
+ "::1",
+ "127.0.0.1"
+ ],
+ "password": "default"
+ }
+ },
+ "network": {
+ "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
+ }
+ },
+ "access": {
+ "username": "clickhouse_operator",
+ "password": "clickhouse_operator_password",
+ "secret": {
+ "namespace": "",
+ "name": ""
+ },
+ "port": 8123
+ }
+ },
+ "template": {
+ "chi": {
+ "path": "templates.d"
+ }
+ },
+ "reconcile": {
+ "runtime": {
+ "reconcileCHIsThreadsNumber": 10,
+ "reconcileShardsThreadsNumber": 5,
+ "reconcileShardsMaxConcurrencyPercent": 50
+ },
+ "statefulSet": {
+ "create": {
+ "onFailure": "ignore"
+ },
+ "update": {
+ "timeout": 300,
+ "pollInterval": 5,
+ "onFailure": "abort"
+ }
+ },
+ "host": {
+ "wait": {
+ "exclude": true,
+ "queries": true,
+ "include": false,
+ "replicas": {
+ "all": "no",
+ "new": "yes",
+ "delay": 10
+ },
+ "probes": {
+ "startup": "no",
+ "readiness": "yes"
+ }
+ }
+ }
+ },
+ "annotation": {
+ "include": [],
+ "exclude": []
+ },
+ "label": {
+ "include": [],
+ "exclude": [],
+ "appendScope": "no"
+ },
+ "statefulSet": {
+ "revisionHistoryLimit": 0
+ },
+ "pod": {
+ "terminationGracePeriod": 30
+ },
+ "logger": {
+ "logtostderr": "true",
+ "alsologtostderr": "false",
+ "v": "1",
+ "stderrthreshold": "",
+ "vmodule": "",
+ "log_backtrace_at": ""
+ }
+ }
+ }
+ ]
+spec:
+ version: 0.25.6
+ minKubeVersion: 1.12.6
+ maturity: alpha
+ replaces: clickhouse-operator.v0.25.5
+ maintainers:
+ - email: support@altinity.com
+ name: Altinity
+ provider:
+ name: Altinity
+ displayName: Altinity® Kubernetes Operator for ClickHouse®
+ keywords:
+ - "clickhouse"
+ - "database"
+ - "oltp"
+ - "timeseries"
+ - "time series"
+ - "altinity"
+ customresourcedefinitions:
+ owned:
+ - description: ClickHouse Installation - set of ClickHouse Clusters
+ displayName: ClickHouseInstallation
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallation
+ name: clickhouseinstallations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Installation Template - template for ClickHouse Installation
+ displayName: ClickHouseInstallationTemplate
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallationTemplate
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Operator Configuration - configuration of ClickHouse operator
+ displayName: ClickHouseOperatorConfiguration
+ group: clickhouse.altinity.com
+ kind: ClickHouseOperatorConfiguration
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance
+ displayName: ClickHouseKeeperInstallation
+ group: clickhouse-keeper.altinity.com
+ kind: ClickHouseKeeperInstallation
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ description: |-
+ ## ClickHouse
+ [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports.
+ Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details.
+ ## The Altinity Operator for ClickHouse
+ The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment.
+ Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples.
+ links:
+ - name: Altinity
+ url: https://altinity.com/
+ - name: Operator homepage
+ url: https://www.altinity.com/kubernetes-operator
+ - name: Github
+ url: https://github.com/altinity/clickhouse-operator
+ - name: Documentation
+ url: https://github.com/Altinity/clickhouse-operator/tree/master/docs
+ icon:
+ - mediatype: image/png
+ base64data: |-
+ iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs
+ vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ
+ BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf
+ 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW
+ 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh
+ jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye
+ x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m
+ zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2
+ cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB
+ fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f
+ sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72
+ 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K
+ 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw
+ mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt
+ fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO
+ HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O
+ N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM
+ vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr
+ D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7
+ 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w
+ ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m
+ 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI
+ zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7
+ b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs
+ 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6
+ dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj
+ 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM
+ jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72
+ ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv
+ 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY
+ nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7
+ b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH
+ IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq
+ z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8
+ bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0
+ wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8
+ FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y
+ vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0
+ HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x
+ wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj
+ Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F
+ t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6
+ e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD
+ Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H
+ fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk
+ j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw
+ hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ
+ cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z
+ +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+
+ TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8
+ b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab
+ Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y
+ ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m
+ sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+
+ VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw
+ vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex
+ 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox
+ zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+
+ XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0
+ c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP
+ WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955
+ wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72
+ zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD
+ XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax
+ WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r
+ brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx
+ dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V
+ bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi
+ /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq
+ mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe
+ 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL
+ e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT
+ H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+
+ pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89
+ 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H
+ Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf
+ k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c
+ jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/
+ 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK
+ bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP
+ VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y
+ UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT
+ ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM
+ 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B
+ u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+
+ XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A
+ lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d
+ P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW
+ ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx
+ uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT
+ b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE
+ HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj
+ ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8
+ FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K
+ l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8
+ M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT
+ 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh
+ xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe
+ +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr
+ vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr
+ 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx
+ AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw
+ 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte
+ fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y
+ UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N
+ b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE
+ thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2
+ SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri
+ XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST
+ HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m
+ nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4
+ E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc
+ yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN
+ hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2
+ EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ
+ ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35
+ W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ
+ D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL
+ 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5
+ VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4
+ oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy
+ 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k
+ Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX
+ /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln
+ U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A
+ KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP
+ fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh
+ r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH
+ hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U
+ /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+
+ Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD
+ yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/
+ tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo
+ n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K
+ dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5
+ lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee
+ IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n
+ Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS
+ X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe
+ qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz
+ MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u
+ CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP
+ sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr
+ ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI
+ feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8
+ WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv
+ ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz
+ flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo
+ zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0
+ UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj
+ rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ
+ eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ
+ EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz
+ mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+
+ Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t
+ ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u
+ fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL
+ TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn
+ YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn
+ K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh
+ olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M
+ JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z
+ yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf
+ mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m
+ Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n
+ Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ
+ z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP
+ vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8
+ YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD
+ 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z
+ l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl
+ UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf
+ +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii
+ 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO
+ xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+
+ ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0
+ Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L
+ OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926
+ wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE
+ qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/
+ r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI
+ 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+
+ nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx
+ LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU
+ Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe
+ q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR
+ eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W
+ NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw
+ YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc
+ 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx
+ yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l
+ vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I
+ uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP
+ 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF
+ RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG
+ 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo
+ 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ
+ OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+
+ r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch
+ brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553
+ aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG
+ R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq
+ FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq
+ yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+
+ RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV
+ xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0
+ w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX
+ v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw
+ mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7
+ oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K
+ j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB
+ rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM
+ N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT
+ I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP
+ /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+
+ R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81
+ r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf
+ WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b
+ /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1
+ F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm
+ +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T
+ pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf
+ N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb
+ PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1
+ LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8
+ 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX
+ TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U
+ YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE
+ ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2
+ eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo
+ eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK
+ Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t
+ VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf
+ 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv
+ ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc
+ PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z
+ hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV
+ PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE
+ kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc
+ Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf
+ bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN
+ E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My
+ 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm
+ XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM
+ 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD
+ pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8
+ 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ
+ nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4
+ epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj
+ GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA
+ Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H
+ llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5
+ eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz
+ YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg
+ Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X
+ M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16
+ GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198
+ uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8
+ 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV
+ aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg
+ Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN
+ ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W
+ 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P
+ Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx
+ LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C
+ VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99
+ 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk
+ bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt
+ NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h
+ GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8
+ soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA
+ kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc
+ Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s
+ yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7
+ uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u
+ 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh
+ lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp
+ 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34
+ R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q
+ 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g
+ B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs
+ Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/
+ qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+
+ LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31
+ tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO
+ wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi
+ 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5
+ 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ
+ 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl
+ bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c
+ Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb
+ 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e
+ qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx
+ WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2
+ 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC
+ xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i
+ 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM
+ cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7
+ 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m
+ c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef
+ mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy
+ 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc
+ 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39
+ W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r
+ NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv
+ rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD
+ KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN
+ Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx
+ 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG
+ 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT
+ 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9
+ m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+
+ inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB
+ 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c
+ r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf
+ g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG
+ HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t
+ LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq
+ mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX
+ b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO
+ P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48
+ xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde
+ dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx
+ U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+
+ 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ
+ Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR
+ IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6
+ On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ
+ ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP
+ 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El
+ fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk
+ FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55
+ bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z
+ rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW
+ 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq
+ CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv
+ 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw
+ cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL
+ T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R
+ b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer
+ e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+
+ k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu
+ CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT
+ DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv
+ /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX
+ n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF
+ /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011
+ F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX
+ fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8
+ gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD
+ TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0
+ 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+
+ 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH
+ /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn
+ 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt
+ PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6
+ EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k
+ z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9
+ UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg
+ 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL
+ fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny
+ 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo
+ XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J
+ /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm
+ 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM
+ dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw
+ HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW
+ tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk
+ EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai
+ c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3
+ 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi
+ kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx
+ xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc
+ gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U
+ eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc
+ Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht
+ 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b
+ tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx
+ 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x
+ tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/
+ 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH
+ Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2
+ Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8
+ DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9
+ PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv
+ Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0
+ 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i
+ eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU
+ PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS
+ HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q
+ ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik
+ l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4
+ HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd
+ E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z
+ LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY
+ KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha
+ muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6
+ od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi
+ QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4
+ xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3
+ zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8
+ J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN
+ DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W
+ PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK
+ po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO
+ oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O
+ CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2
+ jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw
+ HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t
+ v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1
+ AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY
+ jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE
+ iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3
+ 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355
+ w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI
+ fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI
+ b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV
+ bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY
+ Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy
+ yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ
+ bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf
+ 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok
+ wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk
+ R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej
+ Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n
+ +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8
+ F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k
+ 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/
+ Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl
+ n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J
+ n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT
+ E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1
+ jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ
+ /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP
+ jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5
+ t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y
+ 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R
+ uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu
+ WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l
+ n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151
+ F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW
+ m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw
+ aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on
+ /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E
+ m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO
+ TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm
+ l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL
+ P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/
+ v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3
+ GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh
+ MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr
+ sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr
+ fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ
+ 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O
+ FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf
+ 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh
+ 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1
+ bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ
+ bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G
+ ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse
+ D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV
+ Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX
+ dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6
+ GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y
+ wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm
+ RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs
+ /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5
+ ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC
+ EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup
+ 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44
+ /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf
+ yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z
+ c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX
+ eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q
+ iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m
+ fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/
+ 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP
+ sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd
+ iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk
+ PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS
+ Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH
+ 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5
+ sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi
+ h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC
+ 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x
+ FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK
+ pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf
+ 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6
+ i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu
+ v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/
+ 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5
+ iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao
+ 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96
+ EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht
+ I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn
+ JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub
+ jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn
+ Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena
+ NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r
+ 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd
+ QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4
+ cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv
+ 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2
+ P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF
+ lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2
+ mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba
+ D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG
+ Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL
+ qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8
+ MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH
+ FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51
+ q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K
+ 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj
+ 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi
+ 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF
+ 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF
+ Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x
+ SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc
+ pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO
+ OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL
+ 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW
+ 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe
+ tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb
+ +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM
+ NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv
+ f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od
+ yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz
+ 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam
+ PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC
+ FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW
+ 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88
+ LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue
+ cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd
+ BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9
+ eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J
+ z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH
+ CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/
+ IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p
+ n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs
+ X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+
+ yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT
+ Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN
+ hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr
+ RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k
+ 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s
+ VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu
+ nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4
+ nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv
+ Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu
+ 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y
+ d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH
+ Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3
+ /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n
+ E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf
+ 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m
+ 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim
+ 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue
+ tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc
+ jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m
+ nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h
+ Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N
+ v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf
+ hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe
+ PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn
+ T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw
+ O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v
+ Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk
+ AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g
+ 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7
+ Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq
+ JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9
+ V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+
+ meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9
+ TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S
+ hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL
+ 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S
+ W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7
+ 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv
+ P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G
+ 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY
+ P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW
+ Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt
+ q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j
+ 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh
+ 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT
+ B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS
+ l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt
+ VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH
+ 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR
+ qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5
+ Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy
+ 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p
+ rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q
+ jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n
+ /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo
+ 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa
+ LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb
+ F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16
+ WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+
+ 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F
+ RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe
+ +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W
+ qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55
+ hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu
+ I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha
+ xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL
+ fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK
+ zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4
+ 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ
+ pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc
+ 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+
+ znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of
+ u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf
+ QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9
+ a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs
+ fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn
+ hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn
+ 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo
+ liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7
+ sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV
+ 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3
+ A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu
+ +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt
+ jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk
+ arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT
+ k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf
+ /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+
+ VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4
+ 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY
+ Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv
+ v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl
+ 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA
+ 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el
+ sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq
+ FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa
+ Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E
+ VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w
+ /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2
+ 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr
+ 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW
+ feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd
+ 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe
+ M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj
+ SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8
+ Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y
+ n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf
+ ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An
+ 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If
+ /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5
+ mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj
+ A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W
+ wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5
+ z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo
+ 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj
+ t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP
+ bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S
+ bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT
+ RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F
+ X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6
+ sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl
+ Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj
+ D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn
+ nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36
+ WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15
+ 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf
+ ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4
+ 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1
+ I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb
+ IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2
+ G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U
+ UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP
+ n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv
+ 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve
+ iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M
+ lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U
+ M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf
+ RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6
+ rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe
+ la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu
+ dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ
+ d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v
+ Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt
+ nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx
+ 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv
+ +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2
+ 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U
+ vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X
+ fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n
+ 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG
+ PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga
+ STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7
+ 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG
+ f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj
+ 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ
+ LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u
+ 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0
+ /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334
+ 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4
+ fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2
+ L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl
+ LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4
+ yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq
+ 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA
+ n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7
+ 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd
+ 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2
+ Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK
+ FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4
+ avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D
+ /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt
+ TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn
+ vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6
+ 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR
+ kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd
+ 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3
+ IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt
+ zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84
+ g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1
+ jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r
+ lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW
+ HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB
+ HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG
+ HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/
+ EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k
+ SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj
+ SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37
+ H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0
+ fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2
+ Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1
+ 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh
+ vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO
+ iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5
+ 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl
+ rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3
+ 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x
+ savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV
+ HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s
+ u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ
+ N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi
+ Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/
+ ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/
+ 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5
+ j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s
+ rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan
+ H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4
+ Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd
+ np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ
+ +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+
+ bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6
+ xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc
+ QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT
+ P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR
+ BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU
+ sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9
+ 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++
+ 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7
+ /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh
+ fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz
+ 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf
+ ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58
+ zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ
+ C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i
+ Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU
+ i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1
+ xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR
+ l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8
+ Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+
+ A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj
+ 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab
+ dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x
+ 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN
+ WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59
+ vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2
+ ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf
+ OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94
+ 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg
+ Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi
+ sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw
+ mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+
+ wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+
+ qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX
+ OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI
+ lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH
+ +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai
+ jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L
+ jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i
+ 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1
+ T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n
+ lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c
+ 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB
+ +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/
+ /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0
+ w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr
+ oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT
+ F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq
+ Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR
+ qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el
+ DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm
+ /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE
+ a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/
+ 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7
+ 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg
+ h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar
+ /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33
+ k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx
+ 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3
+ SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y
+ m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP
+ songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm
+ 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8
+ ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm
+ 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg
+ 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6
+ fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ
+ /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe
+ icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV
+ jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe
+ hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8
+ SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe
+ A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16
+ WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN
+ up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg
+ ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP
+ cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze
+ pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/
+ HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3
+ 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78
+ aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd
+ AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy
+ 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M
+ eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe
+ WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF
+ o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7
+ XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M
+ O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m
+ I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO
+ +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X
+ jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M
+ paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A
+ 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv
+ 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z
+ +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q
+ mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN
+ EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF
+ FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1
+ Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f
+ Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT
+ nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s
+ eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex
+ rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS
+ 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU
+ l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj
+ LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa
+ ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/
+ 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7
+ 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf
+ +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU
+ gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w
+ phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ
+ fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8
+ qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv
+ polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY
+ MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231
+ o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2
+ TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x
+ DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/
+ v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7
+ vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l
+ Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM
+ VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn
+ 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs
+ vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5
+ rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ
+ 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P
+ nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8
+ HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD
+ /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3
+ a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE
+ wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf
+ eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU
+ t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw
+ pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj
+ tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h
+ PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95
+ GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R
+ 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0
+ JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s
+ 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA
+ Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T
+ VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE
+ +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy
+ dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P
+ WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt
+ I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv
+ X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F
+ W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv
+ z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN
+ 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz
+ vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4
+ BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs
+ 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+
+ IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc
+ ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3
+ ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON
+ vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x
+ cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r
+ WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v
+ xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/
+ 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+
+ opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H
+ 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH
+ ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK
+ F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6
+ flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj
+ gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6
+ dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9
+ CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO
+ oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U
+ U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72
+ RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF
+ O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc
+ 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX
+ v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL
+ CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc
+ t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA
+ gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL
+ wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX
+ 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz
+ 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7
+ jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75
+ lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf
+ m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx
+ tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59
+ X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL
+ 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk
+ lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ
+ +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q
+ ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3
+ 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb
+ mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU
+ jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH
+ 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm
+ wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx
+ DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh
+ xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx
+ 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh
+ vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw
+ uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+
+ dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM
+ 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5
+ JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0
+ bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu
+ HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW
+ +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G
+ uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi
+ ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk
+ koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6
+ Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+
+ ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt
+ Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6
+ xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4
+ HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt
+ Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj
+ Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7
+ zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4
+ aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1
+ AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X
+ v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha
+ N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy
+ n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn
+ DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct
+ dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr
+ Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco
+ GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG
+ qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH
+ wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS
+ VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP
+ Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq
+ DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh
+ 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd
+ 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b
+ j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j
+ i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X
+ x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7
+ C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw
+ v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je
+ wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q
+ BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628
+ 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe
+ Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b
+ FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv
+ l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv
+ cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh
+ edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j
+ VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O
+ /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql
+ CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d
+ tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk
+ SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD
+ k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2
+ ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3
+ UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql
+ 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N
+ 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E
+ d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC
+ q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+
+ E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec
+ eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff
+ Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr
+ l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP
+ jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4
+ nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v
+ E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu
+ V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l
+ f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC
+ 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z
+ 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T
+ 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/
+ 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs
+ T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg
+ LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36
+ NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK
+ zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl
+ tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg
+ 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI
+ wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47
+ Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv
+ t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W
+ PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu
+ Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj
+ r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL
+ k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx
+ XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv
+ xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB
+ HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P
+ Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2
+ mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m
+ kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc
+ WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue
+ CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn
+ 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh
+ 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/
+ CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1
+ MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd
+ 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1
+ ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg==
+ installModes:
+ - supported: true
+ type: OwnNamespace
+ - supported: true
+ type: SingleNamespace
+ - supported: true
+ type: MultiNamespace
+ - supported: true
+ type: AllNamespaces
+ install:
+ strategy: deployment
+ spec:
+ deployments:
+ - name: clickhouse-operator
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clickhouse-operator
+ template:
+ metadata:
+ labels:
+ app: clickhouse-operator
+ spec:
+ containers:
+ - env:
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.memory
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: docker.io/altinity/clickhouse-operator:0.25.6
+ imagePullPolicy: Always
+ name: clickhouse-operator
+ - image: docker.io/altinity/metrics-exporter:0.25.6
+ imagePullPolicy: Always
+ name: metrics-exporter
+ serviceAccountName: clickhouse-operator
+ permissions:
+ - serviceAccountName: clickhouse-operator
+ rules:
+ #
+ # Core API group
+ #
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ - persistentvolumeclaims
+ - secrets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ #
+ # apps.* resources
+ #
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ # The operator deployment personally, identified by name
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ resourceNames:
+ - clickhouse-operator
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ #
+ # policy.* resources
+ #
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ #
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
+ # apiextensions
+ #
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ # clickhouse - related resources
+ - apiGroups:
+ - clickhouse.altinity.com
+ #
+ # The operator's specific Custom Resources
+ #
+
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ # clickhouse-keeper - related resources
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
diff --git a/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..1a42a88be
--- /dev/null
+++ b/deploy/operatorhub/0.25.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1453 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallation
+# SINGULAR=clickhouseinstallation
+# PLURAL=clickhouseinstallations
+# SHORT=chi
+# OPERATOR_VERSION=0.25.6
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallation
+ singular: clickhouseinstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..0779a3051
--- /dev/null
+++ b/deploy/operatorhub/0.25.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1453 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallationTemplate
+# SINGULAR=clickhouseinstallationtemplate
+# PLURAL=clickhouseinstallationtemplates
+# SHORT=chit
+# OPERATOR_VERSION=0.25.6
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallationTemplate
+ singular: clickhouseinstallationtemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
new file mode 100644
index 000000000..c2604dee9
--- /dev/null
+++ b/deploy/operatorhub/0.25.6/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -0,0 +1,883 @@
+# Template Parameters:
+#
+# OPERATOR_VERSION=0.25.6
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ labels:
+ clickhouse-keeper.altinity.com/chop: 0.25.6
+spec:
+ group: clickhouse-keeper.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseKeeperInstallation
+ singular: clickhousekeeperinstallation
+ plural: clickhousekeeperinstallations
+ shortNames:
+ - chk
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-unchanged
+ type: integer
+ description: Unchanged hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUnchanged
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: hosts-delete
+ type: integer
+ description: Hosts to be deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDelete
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Keeper.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure multiple aspects and behavior for `clickhouse-keeper` instance
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level and replica-level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
diff --git a/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..19434d6d5
--- /dev/null
+++ b/deploy/operatorhub/0.25.6/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,539 @@
+# Template Parameters:
+#
+# NONE
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseOperatorConfiguration
+ singular: clickhouseoperatorconfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: namespaces
+ type: string
+ description: Watch namespaces
+ jsonPath: .status
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ watch:
+ type: object
+ description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
+ properties:
+ namespaces:
+ type: object
+ description: "List of namespaces where clickhouse-operator watches for events."
+ x-kubernetes-preserve-unknown-fields: true
+ clickhouse:
+ type: object
+ description: "Clickhouse related parameters used by clickhouse-operator"
+ properties:
+ configuration:
+ type: object
+ properties:
+ file:
+ type: object
+ properties:
+ path:
+ type: object
+ description: |
+ Each 'path' can be either absolute or relative.
+ In case path is absolute - it is used as is.
+ In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ properties:
+ common:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ Default value - config.d
+ host:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ Default value - conf.d
+ user:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files with users settings are located.
+ Files are common for all instances within a CHI.
+ Default value - users.d
+ user:
+ type: object
+ description: "Default parameters for any user which will create"
+ properties:
+ default:
+ type: object
+ properties:
+ profile:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ quota:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ networksIP:
+ type: array
+ description: "ClickHouse server configuration `...` for any "
+ items:
+ type: string
+ password:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ network:
+ type: object
+ description: "Default network parameters for any user which will create"
+ properties:
+ hostRegexpTemplate:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ configurationRestartPolicy:
+ type: object
+ description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ rules:
+ type: array
+ description: "Set of configuration rules for specified ClickHouse version"
+ items:
+ type: object
+ description: "setting: value pairs for configuration restart policy"
+ x-kubernetes-preserve-unknown-fields: true
+ access:
+ type: object
+ description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
+ properties:
+ scheme:
+ type: string
+ description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
+ username:
+ type: string
+ description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ password:
+ type: string
+ description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ rootCA:
+ type: string
+ description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
+ secret:
+ type: object
+ properties:
+ namespace:
+ type: string
+ description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ name:
+ type: string
+ description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ port:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "Port to be used by operator to connect to ClickHouse instances"
+ timeouts:
+ type: object
+ description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
+ properties:
+ connect:
+ type: integer
+ minimum: 1
+ maximum: 10
+ description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
+ query:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
+ addons:
+ type: object
+ description: "Configuration addons specifies additional settings"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ spec:
+ type: object
+ description: "spec"
+ properties:
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ properties:
+ users:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ files:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ metrics:
+ type: object
+ description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
+ properties:
+ timeouts:
+ type: object
+ description: |
+ Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ Specified in seconds.
+ properties:
+ collect:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: |
+ Timeout used to limit metrics collection request. In seconds.
+ Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ All collected metrics are returned.
+ template:
+ type: object
+ description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
+ properties:
+ chi:
+ type: object
+ properties:
+ policy:
+ type: string
+ description: |
+ CHI template updates handling policy
+ Possible policy values:
+ - ReadOnStart. Accept CHIT updates on the operators start only.
+ - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ enum:
+ - ""
+ - "ReadOnStart"
+ - "ApplyOnNextReconcile"
+ path:
+ type: string
+ description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileCHIsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for created/updated StatefulSet to be Ready"
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for created/updated StatefulSet status"
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude: &TypeStringBool
+ type: string
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ annotation:
+ type: object
+ description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ include annotations with names from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ exclude annotations with names from the following list
+ items:
+ type: string
+ label:
+ type: object
+ description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ include labels from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ items:
+ type: string
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ appendScope:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ metrics:
+ type: object
+ description: "defines metrics exporter options"
+ properties:
+ labels:
+ type: object
+ description: "defines metric labels options"
+ properties:
+ exclude:
+ type: array
+ description: |
+ When adding labels to a metric exclude labels with names from the following list
+ items:
+ type: string
+ status:
+ type: object
+ description: "defines status options"
+ properties:
+ fields:
+ type: object
+ description: "defines status fields options"
+ properties:
+ action:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'action'"
+ actions:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'actions'"
+ error:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'error'"
+ errors:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'errors'"
+ statefulSet:
+ type: object
+ description: "define StatefulSet-specific parameters"
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
+ pod:
+ type: object
+ description: "define pod specific parameters"
+ properties:
+ terminationGracePeriod:
+ type: integer
+ description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
+ logger:
+ type: object
+ description: "allow setup clickhouse-operator logger behavior"
+ properties:
+ logtostderr:
+ type: string
+ description: "boolean, allows logs to stderr"
+ alsologtostderr:
+ type: string
+ description: "boolean allows logs to stderr and files both"
+ v:
+ type: string
+ description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
+ stderrthreshold:
+ type: string
+ vmodule:
+ type: string
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ log_backtrace_at:
+ type: string
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
diff --git a/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml b/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml
new file mode 100644
index 000000000..0de0cd65f
--- /dev/null
+++ b/deploy/operatorhub/0.26.0/clickhouse-operator.v0.26.0.clusterserviceversion.yaml
@@ -0,0 +1,1666 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: ClusterServiceVersion
+metadata:
+ name: clickhouse-operator.v0.26.0
+ namespace: placeholder
+ annotations:
+ capabilities: Full Lifecycle
+ categories: Database
+ containerImage: docker.io/altinity/clickhouse-operator:0.26.0
+ createdAt: '2026-02-20T18:07:52Z'
+ support: Altinity Ltd. https://altinity.com
+ description: The Altinity® Kubernetes Operator for ClickHouse® manages the full lifecycle of ClickHouse clusters.
+ repository: https://github.com/altinity/clickhouse-operator
+ certified: 'false'
+ alm-examples: |
+ [
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "simple-01"
+ },
+ "spec": {
+ "configuration": {
+ "users": {
+ "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01",
+ "test_user/password": "test_password",
+ "test_user/networks/ip": [
+ "0.0.0.0/0"
+ ]
+ },
+ "clusters": [
+ {
+ "name": "simple"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "use-templates-all",
+ "labels": {
+ "target-chi-label-manual": "target-chi-label-manual-value",
+ "target-chi-label-auto": "target-chi-label-auto-value"
+ }
+ },
+ "spec": {
+ "useTemplates": [
+ {
+ "name": "chit-01"
+ },
+ {
+ "name": "chit-02"
+ }
+ ],
+ "configuration": {
+ "clusters": [
+ {
+ "name": "c1"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseOperatorConfiguration",
+ "metadata": {
+ "name": "chop-config-01"
+ },
+ "spec": {
+ "watch": {
+ "namespaces": {
+ "include": [],
+ "exclude": []
+ }
+ },
+ "clickhouse": {
+ "configuration": {
+ "file": {
+ "path": {
+ "common": "config.d",
+ "host": "conf.d",
+ "user": "users.d"
+ }
+ },
+ "user": {
+ "default": {
+ "profile": "default",
+ "quota": "default",
+ "networksIP": [
+ "::1",
+ "127.0.0.1"
+ ],
+ "password": "default"
+ }
+ },
+ "network": {
+ "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
+ }
+ },
+ "access": {
+ "username": "clickhouse_operator",
+ "password": "clickhouse_operator_password",
+ "secret": {
+ "namespace": "",
+ "name": ""
+ },
+ "port": 8123
+ },
+ "metrics": {
+ "timeouts": {
+ "collect": 9
+ },
+ "tablesRegexp": "^(metrics|custom_metrics)$"
+ }
+ },
+ "template": {
+ "chi": {
+ "path": "templates.d"
+ }
+ },
+ "reconcile": {
+ "runtime": {
+ "reconcileCHIsThreadsNumber": 10,
+ "reconcileShardsThreadsNumber": 5,
+ "reconcileShardsMaxConcurrencyPercent": 50
+ },
+ "statefulSet": {
+ "create": {
+ "onFailure": "ignore"
+ },
+ "update": {
+ "timeout": 300,
+ "pollInterval": 5,
+ "onFailure": "abort"
+ }
+ },
+ "host": {
+ "wait": {
+ "exclude": "true",
+ "queries": "true",
+ "include": "false",
+ "replicas": {
+ "all": "no",
+ "new": "yes",
+ "delay": 10
+ },
+ "probes": {
+ "startup": "no",
+ "readiness": "yes"
+ }
+ }
+ }
+ },
+ "annotation": {
+ "include": [],
+ "exclude": []
+ },
+ "label": {
+ "include": [],
+ "exclude": [],
+ "appendScope": "no"
+ },
+ "statefulSet": {
+ "revisionHistoryLimit": 0
+ },
+ "pod": {
+ "terminationGracePeriod": 30
+ },
+ "logger": {
+ "logtostderr": "true",
+ "alsologtostderr": "false",
+ "v": "1",
+ "stderrthreshold": "",
+ "vmodule": "",
+ "log_backtrace_at": ""
+ }
+ }
+ }
+ ]
+spec:
+ version: 0.26.0
+ minKubeVersion: 1.12.6
+ maturity: alpha
+ replaces: clickhouse-operator.v0.25.6
+ maintainers:
+ - email: support@altinity.com
+ name: Altinity
+ provider:
+ name: Altinity
+ displayName: Altinity® Kubernetes Operator for ClickHouse®
+ keywords:
+ - "clickhouse"
+ - "database"
+ - "oltp"
+ - "timeseries"
+ - "time series"
+ - "altinity"
+ customresourcedefinitions:
+ owned:
+ - description: ClickHouse Installation - set of ClickHouse Clusters
+ displayName: ClickHouseInstallation
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallation
+ name: clickhouseinstallations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Installation Template - template for ClickHouse Installation
+ displayName: ClickHouseInstallationTemplate
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallationTemplate
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Operator Configuration - configuration of ClickHouse operator
+ displayName: ClickHouseOperatorConfiguration
+ group: clickhouse.altinity.com
+ kind: ClickHouseOperatorConfiguration
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance
+ displayName: ClickHouseKeeperInstallation
+ group: clickhouse-keeper.altinity.com
+ kind: ClickHouseKeeperInstallation
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ description: |-
+ ## ClickHouse
+ [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports.
+ Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details.
+ ## The Altinity Operator for ClickHouse
+ The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment.
+ Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples.
+ links:
+ - name: Altinity
+ url: https://altinity.com/
+ - name: Operator homepage
+ url: https://www.altinity.com/kubernetes-operator
+ - name: Github
+ url: https://github.com/altinity/clickhouse-operator
+ - name: Documentation
+ url: https://github.com/Altinity/clickhouse-operator/tree/master/docs
+ icon:
+ - mediatype: image/png
+ base64data: |-
+ iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs
+ vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ
+ BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf
+ 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW
+ 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh
+ jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye
+ x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m
+ zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2
+ cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB
+ fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f
+ sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72
+ 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K
+ 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw
+ mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt
+ fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO
+ HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O
+ N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM
+ vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr
+ D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7
+ 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w
+ ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m
+ 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI
+ zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7
+ b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs
+ 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6
+ dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj
+ 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM
+ jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72
+ ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv
+ 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY
+ nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7
+ b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH
+ IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq
+ z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8
+ bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0
+ wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8
+ FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y
+ vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0
+ HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x
+ wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj
+ Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F
+ t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6
+ e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD
+ Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H
+ fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk
+ j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw
+ hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ
+ cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z
+ +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+
+ TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8
+ b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab
+ Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y
+ ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m
+ sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+
+ VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw
+ vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex
+ 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox
+ zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+
+ XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0
+ c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP
+ WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955
+ wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72
+ zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD
+ XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax
+ WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r
+ brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx
+ dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V
+ bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi
+ /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq
+ mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe
+ 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL
+ e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT
+ H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+
+ pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89
+ 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H
+ Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf
+ k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c
+ jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/
+ 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK
+ bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP
+ VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y
+ UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT
+ ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM
+ 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B
+ u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+
+ XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A
+ lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d
+ P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW
+ ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx
+ uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT
+ b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE
+ HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj
+ ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8
+ FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K
+ l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8
+ M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT
+ 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh
+ xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe
+ +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr
+ vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr
+ 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx
+ AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw
+ 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte
+ fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y
+ UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N
+ b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE
+ thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2
+ SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri
+ XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST
+ HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m
+ nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4
+ E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc
+ yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN
+ hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2
+ EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ
+ ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35
+ W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ
+ D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL
+ 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5
+ VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4
+ oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy
+ 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k
+ Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX
+ /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln
+ U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A
+ KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP
+ fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh
+ r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH
+ hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U
+ /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+
+ Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD
+ yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/
+ tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo
+ n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K
+ dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5
+ lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee
+ IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n
+ Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS
+ X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe
+ qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz
+ MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u
+ CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP
+ sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr
+ ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI
+ feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8
+ WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv
+ ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz
+ flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo
+ zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0
+ UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj
+ rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ
+ eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ
+ EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz
+ mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+
+ Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t
+ ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u
+ fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL
+ TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn
+ YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn
+ K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh
+ olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M
+ JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z
+ yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf
+ mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m
+ Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n
+ Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ
+ z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP
+ vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8
+ YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD
+ 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z
+ l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl
+ UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf
+ +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii
+ 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO
+ xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+
+ ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0
+ Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L
+ OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926
+ wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE
+ qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/
+ r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI
+ 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+
+ nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx
+ LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU
+ Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe
+ q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR
+ eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W
+ NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw
+ YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc
+ 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx
+ yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l
+ vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I
+ uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP
+ 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF
+ RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG
+ 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo
+ 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ
+ OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+
+ r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch
+ brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553
+ aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG
+ R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq
+ FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq
+ yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+
+ RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV
+ xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0
+ w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX
+ v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw
+ mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7
+ oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K
+ j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB
+ rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM
+ N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT
+ I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP
+ /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+
+ R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81
+ r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf
+ WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b
+ /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1
+ F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm
+ +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T
+ pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf
+ N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb
+ PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1
+ LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8
+ 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX
+ TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U
+ YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE
+ ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2
+ eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo
+ eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK
+ Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t
+ VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf
+ 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv
+ ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc
+ PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z
+ hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV
+ PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE
+ kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc
+ Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf
+ bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN
+ E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My
+ 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm
+ XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM
+ 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD
+ pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8
+ 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ
+ nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4
+ epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj
+ GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA
+ Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H
+ llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5
+ eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz
+ YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg
+ Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X
+ M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16
+ GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198
+ uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8
+ 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV
+ aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg
+ Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN
+ ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W
+ 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P
+ Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx
+ LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C
+ VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99
+ 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk
+ bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt
+ NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h
+ GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8
+ soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA
+ kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc
+ Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s
+ yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7
+ uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u
+ 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh
+ lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp
+ 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34
+ R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q
+ 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g
+ B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs
+ Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/
+ qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+
+ LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31
+ tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO
+ wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi
+ 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5
+ 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ
+ 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl
+ bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c
+ Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb
+ 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e
+ qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx
+ WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2
+ 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC
+ xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i
+ 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM
+ cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7
+ 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m
+ c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef
+ mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy
+ 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc
+ 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39
+ W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r
+ NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv
+ rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD
+ KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN
+ Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx
+ 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG
+ 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT
+ 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9
+ m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+
+ inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB
+ 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c
+ r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf
+ g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG
+ HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t
+ LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq
+ mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX
+ b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO
+ P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48
+ xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde
+ dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx
+ U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+
+ 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ
+ Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR
+ IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6
+ On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ
+ ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP
+ 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El
+ fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk
+ FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55
+ bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z
+ rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW
+ 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq
+ CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv
+ 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw
+ cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL
+ T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R
+ b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer
+ e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+
+ k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu
+ CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT
+ DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv
+ /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX
+ n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF
+ /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011
+ F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX
+ fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8
+ gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD
+ TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0
+ 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+
+ 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH
+ /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn
+ 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt
+ PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6
+ EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k
+ z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9
+ UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg
+ 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL
+ fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny
+ 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo
+ XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J
+ /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm
+ 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM
+ dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw
+ HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW
+ tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk
+ EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai
+ c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3
+ 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi
+ kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx
+ xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc
+ gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U
+ eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc
+ Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht
+ 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b
+ tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx
+ 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x
+ tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/
+ 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH
+ Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2
+ Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8
+ DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9
+ PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv
+ Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0
+ 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i
+ eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU
+ PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS
+ HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q
+ ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik
+ l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4
+ HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd
+ E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z
+ LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY
+ KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha
+ muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6
+ od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi
+ QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4
+ xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3
+ zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8
+ J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN
+ DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W
+ PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK
+ po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO
+ oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O
+ CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2
+ jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw
+ HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t
+ v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1
+ AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY
+ jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE
+ iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3
+ 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355
+ w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI
+ fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI
+ b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV
+ bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY
+ Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy
+ yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ
+ bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf
+ 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok
+ wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk
+ R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej
+ Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n
+ +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8
+ F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k
+ 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/
+ Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl
+ n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J
+ n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT
+ E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1
+ jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ
+ /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP
+ jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5
+ t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y
+ 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R
+ uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu
+ WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l
+ n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151
+ F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW
+ m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw
+ aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on
+ /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E
+ m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO
+ TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm
+ l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL
+ P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/
+ v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3
+ GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh
+ MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr
+ sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr
+ fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ
+ 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O
+ FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf
+ 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh
+ 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1
+ bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ
+ bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G
+ ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse
+ D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV
+ Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX
+ dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6
+ GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y
+ wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm
+ RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs
+ /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5
+ ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC
+ EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup
+ 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44
+ /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf
+ yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z
+ c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX
+ eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q
+ iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m
+ fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/
+ 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP
+ sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd
+ iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk
+ PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS
+ Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH
+ 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5
+ sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi
+ h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC
+ 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x
+ FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK
+ pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf
+ 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6
+ i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu
+ v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/
+ 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5
+ iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao
+ 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96
+ EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht
+ I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn
+ JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub
+ jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn
+ Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena
+ NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r
+ 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd
+ QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4
+ cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv
+ 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2
+ P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF
+ lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2
+ mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba
+ D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG
+ Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL
+ qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8
+ MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH
+ FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51
+ q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K
+ 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj
+ 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi
+ 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF
+ 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF
+ Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x
+ SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc
+ pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO
+ OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL
+ 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW
+ 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe
+ tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb
+ +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM
+ NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv
+ f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od
+ yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz
+ 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam
+ PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC
+ FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW
+ 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88
+ LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue
+ cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd
+ BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9
+ eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J
+ z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH
+ CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/
+ IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p
+ n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs
+ X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+
+ yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT
+ Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN
+ hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr
+ RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k
+ 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s
+ VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu
+ nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4
+ nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv
+ Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu
+ 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y
+ d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH
+ Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3
+ /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n
+ E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf
+ 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m
+ 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim
+ 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue
+ tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc
+ jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m
+ nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h
+ Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N
+ v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf
+ hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe
+ PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn
+ T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw
+ O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v
+ Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk
+ AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g
+ 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7
+ Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq
+ JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9
+ V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+
+ meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9
+ TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S
+ hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL
+ 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S
+ W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7
+ 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv
+ P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G
+ 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY
+ P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW
+ Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt
+ q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j
+ 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh
+ 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT
+ B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS
+ l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt
+ VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH
+ 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR
+ qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5
+ Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy
+ 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p
+ rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q
+ jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n
+ /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo
+ 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa
+ LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb
+ F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16
+ WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+
+ 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F
+ RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe
+ +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W
+ qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55
+ hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu
+ I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha
+ xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL
+ fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK
+ zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4
+ 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ
+ pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc
+ 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+
+ znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of
+ u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf
+ QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9
+ a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs
+ fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn
+ hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn
+ 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo
+ liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7
+ sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV
+ 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3
+ A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu
+ +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt
+ jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk
+ arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT
+ k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf
+ /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+
+ VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4
+ 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY
+ Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv
+ v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl
+ 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA
+ 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el
+ sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq
+ FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa
+ Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E
+ VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w
+ /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2
+ 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr
+ 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW
+ feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd
+ 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe
+ M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj
+ SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8
+ Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y
+ n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf
+ ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An
+ 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If
+ /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5
+ mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj
+ A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W
+ wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5
+ z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo
+ 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj
+ t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP
+ bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S
+ bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT
+ RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F
+ X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6
+ sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl
+ Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj
+ D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn
+ nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36
+ WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15
+ 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf
+ ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4
+ 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1
+ I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb
+ IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2
+ G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U
+ UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP
+ n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv
+ 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve
+ iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M
+ lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U
+ M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf
+ RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6
+ rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe
+ la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu
+ dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ
+ d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v
+ Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt
+ nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx
+ 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv
+ +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2
+ 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U
+ vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X
+ fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n
+ 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG
+ PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga
+ STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7
+ 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG
+ f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj
+ 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ
+ LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u
+ 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0
+ /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334
+ 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4
+ fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2
+ L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl
+ LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4
+ yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq
+ 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA
+ n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7
+ 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd
+ 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2
+ Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK
+ FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4
+ avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D
+ /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt
+ TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn
+ vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6
+ 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR
+ kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd
+ 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3
+ IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt
+ zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84
+ g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1
+ jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r
+ lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW
+ HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB
+ HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG
+ HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/
+ EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k
+ SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj
+ SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37
+ H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0
+ fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2
+ Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1
+ 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh
+ vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO
+ iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5
+ 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl
+ rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3
+ 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x
+ savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV
+ HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s
+ u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ
+ N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi
+ Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/
+ ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/
+ 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5
+ j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s
+ rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan
+ H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4
+ Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd
+ np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ
+ +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+
+ bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6
+ xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc
+ QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT
+ P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR
+ BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU
+ sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9
+ 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++
+ 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7
+ /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh
+ fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz
+ 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf
+ ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58
+ zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ
+ C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i
+ Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU
+ i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1
+ xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR
+ l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8
+ Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+
+ A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj
+ 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab
+ dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x
+ 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN
+ WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59
+ vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2
+ ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf
+ OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94
+ 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg
+ Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi
+ sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw
+ mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+
+ wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+
+ qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX
+ OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI
+ lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH
+ +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai
+ jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L
+ jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i
+ 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1
+ T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n
+ lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c
+ 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB
+ +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/
+ /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0
+ w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr
+ oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT
+ F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq
+ Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR
+ qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el
+ DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm
+ /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE
+ a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/
+ 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7
+ 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg
+ h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar
+ /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33
+ k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx
+ 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3
+ SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y
+ m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP
+ songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm
+ 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8
+ ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm
+ 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg
+ 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6
+ fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ
+ /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe
+ icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV
+ jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe
+ hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8
+ SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe
+ A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16
+ WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN
+ up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg
+ ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP
+ cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze
+ pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/
+ HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3
+ 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78
+ aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd
+ AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy
+ 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M
+ eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe
+ WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF
+ o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7
+ XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M
+ O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m
+ I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO
+ +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X
+ jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M
+ paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A
+ 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv
+ 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z
+ +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q
+ mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN
+ EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF
+ FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1
+ Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f
+ Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT
+ nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s
+ eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex
+ rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS
+ 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU
+ l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj
+ LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa
+ ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/
+ 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7
+ 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf
+ +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU
+ gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w
+ phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ
+ fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8
+ qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv
+ polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY
+ MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231
+ o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2
+ TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x
+ DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/
+ v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7
+ vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l
+ Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM
+ VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn
+ 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs
+ vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5
+ rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ
+ 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P
+ nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8
+ HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD
+ /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3
+ a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE
+ wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf
+ eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU
+ t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw
+ pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj
+ tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h
+ PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95
+ GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R
+ 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0
+ JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s
+ 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA
+ Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T
+ VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE
+ +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy
+ dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P
+ WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt
+ I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv
+ X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F
+ W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv
+ z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN
+ 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz
+ vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4
+ BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs
+ 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+
+ IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc
+ ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3
+ ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON
+ vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x
+ cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r
+ WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v
+ xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/
+ 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+
+ opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H
+ 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH
+ ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK
+ F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6
+ flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj
+ gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6
+ dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9
+ CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO
+ oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U
+ U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72
+ RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF
+ O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc
+ 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX
+ v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL
+ CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc
+ t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA
+ gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL
+ wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX
+ 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz
+ 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7
+ jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75
+ lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf
+ m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx
+ tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59
+ X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL
+ 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk
+ lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ
+ +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q
+ ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3
+ 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb
+ mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU
+ jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH
+ 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm
+ wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx
+ DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh
+ xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx
+ 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh
+ vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw
+ uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+
+ dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM
+ 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5
+ JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0
+ bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu
+ HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW
+ +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G
+ uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi
+ ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk
+ koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6
+ Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+
+ ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt
+ Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6
+ xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4
+ HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt
+ Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj
+ Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7
+ zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4
+ aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1
+ AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X
+ v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha
+ N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy
+ n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn
+ DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct
+ dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr
+ Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco
+ GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG
+ qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH
+ wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS
+ VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP
+ Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq
+ DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh
+ 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd
+ 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b
+ j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j
+ i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X
+ x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7
+ C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw
+ v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je
+ wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q
+ BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628
+ 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe
+ Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b
+ FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv
+ l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv
+ cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh
+ edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j
+ VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O
+ /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql
+ CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d
+ tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk
+ SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD
+ k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2
+ ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3
+ UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql
+ 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N
+ 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E
+ d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC
+ q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+
+ E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec
+ eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff
+ Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr
+ l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP
+ jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4
+ nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v
+ E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu
+ V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l
+ f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC
+ 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z
+ 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T
+ 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/
+ 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs
+ T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg
+ LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36
+ NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK
+ zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl
+ tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg
+ 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI
+ wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47
+ Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv
+ t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W
+ PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu
+ Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj
+ r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL
+ k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx
+ XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv
+ xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB
+ HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P
+ Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2
+ mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m
+ kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc
+ WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue
+ CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn
+ 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh
+ 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/
+ CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1
+ MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd
+ 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1
+ ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg==
+ installModes:
+ - supported: true
+ type: OwnNamespace
+ - supported: true
+ type: SingleNamespace
+ - supported: true
+ type: MultiNamespace
+ - supported: true
+ type: AllNamespaces
+ install:
+ strategy: deployment
+ spec:
+ deployments:
+ - name: clickhouse-operator
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clickhouse-operator
+ template:
+ metadata:
+ labels:
+ app: clickhouse-operator
+ spec:
+ containers:
+ - env:
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.memory
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: docker.io/altinity/clickhouse-operator:0.26.0
+ imagePullPolicy: Always
+ name: clickhouse-operator
+ - image: docker.io/altinity/metrics-exporter:0.26.0
+ imagePullPolicy: Always
+ name: metrics-exporter
+ serviceAccountName: clickhouse-operator
+ permissions:
+ - serviceAccountName: clickhouse-operator
+ rules:
+ #
+ # Core API group
+ #
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ - persistentvolumeclaims
+ - secrets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ #
+ # apps.* resources
+ #
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ # The operator deployment personally, identified by name
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ resourceNames:
+ - clickhouse-operator
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ #
+ # policy.* resources
+ #
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ #
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
+ # apiextensions
+ #
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ # clickhouse - related resources
+ - apiGroups:
+ - clickhouse.altinity.com
+ #
+ # The operator's specific Custom Resources
+ #
+
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ # clickhouse-keeper - related resources
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
diff --git a/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..03bb8e057
--- /dev/null
+++ b/deploy/operatorhub/0.26.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1527 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallation
+# SINGULAR=clickhouseinstallation
+# PLURAL=clickhouseinstallations
+# SHORT=chi
+# OPERATOR_VERSION=0.26.0
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.26.0
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallation
+ singular: clickhouseinstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..4350b913e
--- /dev/null
+++ b/deploy/operatorhub/0.26.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1527 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallationTemplate
+# SINGULAR=clickhouseinstallationtemplate
+# PLURAL=clickhouseinstallationtemplates
+# SHORT=chit
+# OPERATOR_VERSION=0.26.0
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.26.0
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallationTemplate
+ singular: clickhouseinstallationtemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet: &TypeReconcileStatefulSet
+ type: object
+ description: "Optional, StatefulSet reconcile behavior tuning"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "delete"
+ - "ignore"
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for StatefulSet to be 'Ready' during update"
+ minimum: 0
+ maximum: 3600
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for StatefulSet status during update"
+ minimum: 1
+ maximum: 600
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is.
+ 2. rollback - delete Pod and rollback StatefulSet to previous Generation. Follow 'abort' path afterwards.
+ 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "rollback"
+ - "ignore"
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate - proceed and recreate StatefulSet.
+ enum:
+ - ""
+ - "abort"
+ - "recreate"
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
new file mode 100644
index 000000000..405dac0a1
--- /dev/null
+++ b/deploy/operatorhub/0.26.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -0,0 +1,875 @@
+# Template Parameters:
+#
+# OPERATOR_VERSION=0.26.0
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ labels:
+ clickhouse-keeper.altinity.com/chop: 0.26.0
+spec:
+ group: clickhouse-keeper.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseKeeperInstallation
+ singular: clickhousekeeperinstallation
+ plural: clickhousekeeperinstallations
+ shortNames:
+ - chk
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse Keeper clusters defined in a CHK.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Keeper.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure multiple aspects and behavior for `clickhouse-keeper` instance
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level and replica-level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
diff --git a/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..fb41e3d13
--- /dev/null
+++ b/deploy/operatorhub/0.26.0/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,563 @@
+# Template Parameters:
+#
+# NONE
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.26.0
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseOperatorConfiguration
+ singular: clickhouseoperatorconfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: namespaces
+ type: string
+ description: Watch namespaces
+ jsonPath: .status
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ watch:
+ type: object
+ description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
+ properties:
+ namespaces:
+ type: object
+ description: "List of namespaces where clickhouse-operator watches for events."
+ x-kubernetes-preserve-unknown-fields: true
+ clickhouse:
+ type: object
+ description: "Clickhouse related parameters used by clickhouse-operator"
+ properties:
+ configuration:
+ type: object
+ properties:
+ file:
+ type: object
+ properties:
+ path:
+ type: object
+ description: |
+ Each 'path' can be either absolute or relative.
+ In case path is absolute - it is used as is.
+ In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ properties:
+ common:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ Default value - config.d
+ host:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ Default value - conf.d
+ user:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files with users settings are located.
+ Files are common for all instances within a CHI.
+ Default value - users.d
+ user:
+ type: object
+ description: "Default parameters for any user which will create"
+ properties:
+ default:
+ type: object
+ properties:
+ profile:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ quota:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ networksIP:
+ type: array
+ description: "ClickHouse server configuration `...` for any "
+ items:
+ type: string
+ password:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ network:
+ type: object
+ description: "Default network parameters for any user which will create"
+ properties:
+ hostRegexpTemplate:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ configurationRestartPolicy:
+ type: object
+ description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ rules:
+ type: array
+ description: "Set of configuration rules for specified ClickHouse version"
+ items:
+ type: object
+ description: "setting: value pairs for configuration restart policy"
+ x-kubernetes-preserve-unknown-fields: true
+ access:
+ type: object
+ description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
+ properties:
+ scheme:
+ type: string
+ description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
+ username:
+ type: string
+ description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ password:
+ type: string
+ description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ rootCA:
+ type: string
+ description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
+ secret:
+ type: object
+ properties:
+ namespace:
+ type: string
+ description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ name:
+ type: string
+ description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ port:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "Port to be used by operator to connect to ClickHouse instances"
+ timeouts:
+ type: object
+ description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
+ properties:
+ connect:
+ type: integer
+ minimum: 1
+ maximum: 10
+ description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
+ query:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
+ addons:
+ type: object
+ description: "Configuration addons specifies additional settings"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ spec:
+ type: object
+ description: "spec"
+ properties:
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ properties:
+ users:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ files:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ metrics:
+ type: object
+ description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
+ properties:
+ timeouts:
+ type: object
+ description: |
+ Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ Specified in seconds.
+ properties:
+ collect:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: |
+ Timeout used to limit metrics collection request. In seconds.
+ Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ All collected metrics are returned.
+ tablesRegexp:
+ type: string
+ description: |
+ Regexp to match tables in system database to fetch metrics from.
+ Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ Default is "^(metrics|custom_metrics)$".
+ template:
+ type: object
+ description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
+ properties:
+ chi:
+ type: object
+ properties:
+ policy:
+ type: string
+ description: |
+ CHI template updates handling policy
+ Possible policy values:
+ - ReadOnStart. Accept CHIT updates on the operators start only.
+ - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ enum:
+ - ""
+ - "ReadOnStart"
+ - "ApplyOnNextReconcile"
+ path:
+ type: string
+ description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileCHIsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for created/updated StatefulSet to be Ready"
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for created/updated StatefulSet status"
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ recreate:
+ type: object
+ description: "Behavior during recreate StatefulSet"
+ properties:
+ onDataLoss:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to PVC data loss or missing volumes.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ onUpdateFailure:
+ type: string
+ description: |
+ What to do in case operator needs to recreate StatefulSet due to update failure or StatefulSet not ready.
+ Possible options:
+ 1. abort - abort the process, do nothing with the problematic StatefulSet.
+ 2. recreate (default) - proceed and recreate StatefulSet.
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude: &TypeStringBool
+ type: string
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ annotation:
+ type: object
+ description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ include annotations with names from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ exclude annotations with names from the following list
+ items:
+ type: string
+ label:
+ type: object
+ description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ include labels from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ items:
+ type: string
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ appendScope:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ metrics:
+ type: object
+ description: "defines metrics exporter options"
+ properties:
+ labels:
+ type: object
+ description: "defines metric labels options"
+ properties:
+ exclude:
+ type: array
+ description: |
+ When adding labels to a metric exclude labels with names from the following list
+ items:
+ type: string
+ status:
+ type: object
+ description: "defines status options"
+ properties:
+ fields:
+ type: object
+ description: "defines status fields options"
+ properties:
+ action:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'action'"
+ actions:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'actions'"
+ error:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'error'"
+ errors:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'errors'"
+ statefulSet:
+ type: object
+ description: "define StatefulSet-specific parameters"
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
+ pod:
+ type: object
+ description: "define pod specific parameters"
+ properties:
+ terminationGracePeriod:
+ type: integer
+ description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
+ logger:
+ type: object
+ description: "allow setup clickhouse-operator logger behavior"
+ properties:
+ logtostderr:
+ type: string
+ description: "boolean, allows logs to stderr"
+ alsologtostderr:
+ type: string
+ description: "boolean allows logs to stderr and files both"
+ v:
+ type: string
+ description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
+ stderrthreshold:
+ type: string
+ vmodule:
+ type: string
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ log_backtrace_at:
+ type: string
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
diff --git a/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml b/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml
index 4cb3436ba..bfd86262d 100644
--- a/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml
+++ b/deploy/prometheus/prometheus-alert-rules-chkeeper.yaml
@@ -93,7 +93,6 @@ spec:
echo "ClickHouseKeeper Write $((($writeEnd - $writeBegin) / 5)) b/s"
```
-
- alert: ClickHouseKeeperHighEphemeralNodes
expr: ClickHouseAsyncMetrics_KeeperEphemeralsCount{app=~'clickhouse-keeper.*'} > 100
for: 10m
@@ -196,7 +195,7 @@ spec:
description: |-
ClickHouse Keeper is using {{ with printf "(ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{pod_name='%s',namespace='%s'} / ClickHouseAsyncMetrics_KeeperMaxFileDescriptorCount{pod_name='%s',namespace='%s'}) * 100" .Labels.pod_name .Labels.namespace .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.1f" }}{{ end }}% of available file descriptors.
- Current open FDs: {{ with printf "ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }}
- Max FDs: {{ with printf "ClickHouseAsyncMetrics_KeeperMaxFileDescriptorCount{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%.0f" }}{{ end }}
+ Current open FDs: {{ with printf "ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%d" }}{{ end }}
+ Max FDs: {{ with printf "ClickHouseAsyncMetrics_KeeperMaxFileDescriptorCount{pod_name='%s',namespace='%s'}" .Labels.pod_name .Labels.namespace | query }}{{ . | first | value | printf "%d" }}{{ end }}
If this continues to increase, the keeper may run out of file descriptors and become unresponsive.
diff --git a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml
index d78349e30..2d4303da5 100644
--- a/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml
+++ b/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml
@@ -72,8 +72,8 @@ spec:
`increase(chi_clickhouse_event_DNSError[1m])` = {{ with printf "increase(chi_clickhouse_event_DNSError{hostname='%s',exported_namespace='%s'}[1m]) or increase(chi_clickhouse_event_NetworkErrors{hostname='%s',exported_namespace='%s'}[1m])" .Labels.hostname .Labels.exported_namespace .Labels.hostname .Labels.exported_namespace | query }}{{ . | first | value | printf "%.2f" }} errors{{ end }}
Please check DNS settings in `/etc/resolve.conf` and `` part of `/etc/clickhouse-server/`
See documentation:
- - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers
- - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-disable-internal-dns-cache
+ - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#remote_servers
+ - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#disable_internal_dns_cache
- https://clickhouse.com/docs/en/sql-reference/statements/system/
- alert: ClickHouseDistributedFilesToInsertHigh
@@ -112,7 +112,7 @@ spec:
`increase(chi_clickhouse_event_DistributedConnectionFailAtAll[1m])` = {{ with printf "increase(chi_clickhouse_event_DistributedConnectionFailAtAll{hostname='%s',exported_namespace='%s'}[1m])" .Labels.hostname .Labels.exported_namespace | query }}{{ . | first | value | printf "%.2f" }} errors{{ end }}
Please, check communications between clickhouse server and host `remote_servers` in `/etc/clickhouse-server/`
- https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers
+ https://clickhouse.com/docs/operations/server-configuration-parameters/settings#remote_servers
Also, you can check logs:
```kubectl logs -n {{ $labels.exported_namespace }} $( echo {{ $labels.hostname }} | cut -d '.' -f 1)-0 -f```
@@ -178,7 +178,7 @@ spec:
Please use Buffer table
https://clickhouse.com/docs/en/engines/table-engines/special/buffer/
or
- https://clickhouse.com/docs/en/operations/settings/settings/#async-insert
+ https://clickhouse.com/docs/operations/settings/settings#async_insert
- alert: ClickHouseLongestRunningQuery
expr: chi_clickhouse_metric_LongestRunningQuery > 600
@@ -259,7 +259,7 @@ spec:
The ClickHouse is adapted to run not a very large number of parallel SQL requests, not every HTTP/TCP(Native)/MySQL protocol connection means a running SQL request, but a large number of open connections can cause a spike in sudden SQL requests, resulting in performance degradation.
Also read documentation:
- - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#max-concurrent-queries
+ - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries
- alert: ClickHouseTooManyRunningQueries
@@ -279,7 +279,7 @@ spec:
Look at following documentation parts:
- https://clickhouse.com/docs/en/operations/settings/query-complexity/
- https://clickhouse.com/docs/en/operations/quotas/
- - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#max-concurrent-queries
+ - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries
- https://clickhouse.com/docs/en/operations/system-tables/query_log/
- alert: ClickHouseSystemSettingsChanged
@@ -527,7 +527,7 @@ spec:
chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolTask = {{ with printf "chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolTask{exported_namespace='%s',chi='%s',hostname='%s'}" .Labels.exported_namespace .Labels.chi .Labels.hostname | query }}{{ . | first | value | printf "%.0f" }}{{ end }}
chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolSize = {{ with printf "chi_clickhouse_metric_BackgroundMessageBrokerSchedulePoolSize{exported_namespace='%s',chi='%s',hostname='%s'}" .Labels.exported_namespace .Labels.chi .Labels.hostname | query }}{{ . | first | value | printf "%.0f" }}{{ end }}
- https://kb.altinity.com/altinity-kb-integrations/altinity-kb-kafka/background_message_broker_schedule_pool_size/
- - https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size
+ - https://clickhouse.com/docs/operations/server-configuration-parameters/settings#background_message_broker_schedule_pool_size
- https://clickhouse.com/docs/en/operations/system-tables/metrics#backgroundmessagebrokerschedulepoolsize
This pool is used for tasks related to message streaming from Apache Kafka or other message brokers.
You need to increase `background_message_broker_schedule_pool_size` to fix the problem.
diff --git a/dev/generate_helm_chart.sh b/dev/generate_helm_chart.sh
index 5a3ce88f9..c4812865d 100755
--- a/dev/generate_helm_chart.sh
+++ b/dev/generate_helm_chart.sh
@@ -225,14 +225,14 @@ function update_deployment_resource() {
done
yq e -i '.spec.template.spec.containers[0].name |= "{{ .Chart.Name }}"' "${file}"
- yq e -i '.spec.template.spec.containers[0].image |= "{{ .Values.operator.image.repository }}:{{ include \"altinity-clickhouse-operator.operator.tag\" . }}"' "${file}"
+ yq e -i '.spec.template.spec.containers[0].image |= "{{ if .Values.operator.image.registry }}{{ .Values.operator.image.registry | trimSuffix \"/\" }}/{{ end }}{{ .Values.operator.image.repository }}:{{ include \"altinity-clickhouse-operator.operator.tag\" . }}"' "${file}"
yq e -i '.spec.template.spec.containers[0].imagePullPolicy |= "{{ .Values.operator.image.pullPolicy }}"' "${file}"
yq e -i '.spec.template.spec.containers[0].resources |= "{{ toYaml .Values.operator.resources | nindent 12 }}"' "${file}"
yq e -i '.spec.template.spec.containers[0].securityContext |= "{{ toYaml .Values.operator.containerSecurityContext | nindent 12 }}"' "${file}"
yq e -i '(.spec.template.spec.containers[0].env[] | select(.valueFrom.resourceFieldRef.containerName == "clickhouse-operator") | .valueFrom.resourceFieldRef.containerName) = "{{ .Chart.Name }}"' "${file}"
yq e -i '.spec.template.spec.containers[0].env += ["{{ with .Values.operator.env }}{{ toYaml . | nindent 12 }}{{ end }}"]' "${file}"
- yq e -i '.spec.template.spec.containers[1].image |= "{{ .Values.metrics.image.repository }}:{{ include \"altinity-clickhouse-operator.metrics.tag\" . }}"' "${file}"
+ yq e -i '.spec.template.spec.containers[1].image |= "{{ if .Values.metrics.image.registry }}{{ .Values.metrics.image.registry | trimSuffix \"/\" }}/{{ end }}{{ .Values.metrics.image.repository }}:{{ include \"altinity-clickhouse-operator.metrics.tag\" . }}"' "${file}"
yq e -i '.spec.template.spec.containers[1].imagePullPolicy |= "{{ .Values.metrics.image.pullPolicy }}"' "${file}"
yq e -i '.spec.template.spec.containers[1].resources |= "{{ toYaml .Values.metrics.resources | nindent 12 }}"' "${file}"
yq e -i '.spec.template.spec.containers[1].securityContext |= "{{ toYaml .Values.metrics.containerSecurityContext | nindent 12 }}"' "${file}"
diff --git a/docs/chi-examples/70-chop-config.yaml b/docs/chi-examples/70-chop-config.yaml
index a00ca3ffe..05ee2e1e4 100644
--- a/docs/chi-examples/70-chop-config.yaml
+++ b/docs/chi-examples/70-chop-config.yaml
@@ -81,6 +81,22 @@ spec:
# Port where to connect to ClickHouse instances to
port: 8123
+ ################################################
+ ##
+ ## Metrics collection
+ ##
+ ################################################
+ metrics:
+ timeouts:
+ # Timeout used to limit metrics collection request. In seconds.
+ # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ # All collected metrics are returned.
+ collect: 9
+ # Regexp to match tables in system database to fetch metrics from.
+ # Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ # Default is "^(metrics|custom_metrics)$".
+ tablesRegexp: "^(metrics|custom_metrics)$"
+
################################################
##
## Templates Section
@@ -154,16 +170,16 @@ spec:
# - to be included into a ClickHouse cluster
# respectfully before moving forward with host reconcile
wait:
- exclude: true
- queries: true
- include: false
+ exclude: "true"
+ queries: "true"
+ include: "false"
# The operator during reconcile procedure should wait for replicas to catch-up
# replication delay a.k.a replication lag for the following replicas
replicas:
# All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
- all: no
+ all: "no"
# New replicas only are requested to wait for replication to catch-up
- new: yes
+ new: "yes"
# Replication catch-up is considered to be completed as soon as replication delay
# a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
# is within this specified delay (in seconds)
@@ -172,11 +188,11 @@ spec:
# Whether the operator during host launch procedure should wait for startup probe to succeed.
# In case probe is unspecified wait is assumed to be completed successfully.
# Default option value is to do not wait.
- startup: no
+ startup: "no"
# Whether the operator during host launch procedure should wait for readiness probe to succeed.
# In case probe is unspecified wait is assumed to be completed successfully.
# Default option value is to wait.
- readiness: yes
+ readiness: "yes"
################################################
##
diff --git a/docs/chi-examples/99-clickhouseinstallation-max.yaml b/docs/chi-examples/99-clickhouseinstallation-max.yaml
index 7eea7110c..8dfa95ae2 100644
--- a/docs/chi-examples/99-clickhouseinstallation-max.yaml
+++ b/docs/chi-examples/99-clickhouseinstallation-max.yaml
@@ -104,6 +104,53 @@ spec:
# Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
+ # Optional, overwrites reconcile.statefulSet from the operator's config
+ # Reconcile StatefulSet scenario
+ statefulSet:
+ # Create StatefulSet scenario
+ create:
+ # What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ onFailure: ignore
+
+ # Update StatefulSet scenario
+ update:
+ # How many seconds to wait for created/updated StatefulSet to be 'Ready'
+ timeout: 300
+ # How many seconds to wait between checks/polls for created/updated StatefulSet status
+ pollInterval: 5
+ # What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
+ # Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
+ # Follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ onFailure: abort
+
+ # Recreate StatefulSet scenario
+ recreate:
+ # What to do in case operator is in need to recreate StatefulSet?
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. recreate - proceed and recreate StatefulSet.
+
+ # Triggered when PVC data loss or missing volumes are detected
+ onDataLoss: recreate
+ # Triggered when StatefulSet update fails or StatefulSet is not ready
+ onUpdateFailure: recreate
+
+ # Optional, overwrites reconcile.host from the operator's config
+ # Reconcile Host scenario
host:
# Whether the operator during reconcile procedure should wait for a ClickHouse host:
# - to be excluded from a ClickHouse cluster
diff --git a/docs/operator_configuration.md b/docs/operator_configuration.md
index 73c11ec4b..c38efd0e5 100644
--- a/docs/operator_configuration.md
+++ b/docs/operator_configuration.md
@@ -18,7 +18,7 @@ Operator settings are initialized in-order from 3 sources:
* etc-clickhouse-operator-files configmap (also a part of default [clickhouse-operator-install-bundle.yaml][clickhouse-operator-install-bundle.yaml]
* `ClickHouseOperatorConfiguration` resource. See [example][70-chop-config.yaml] for details.
-Next sources merges with the previous one. Changes to `etc-clickhouse-operator-files` are not monitored, but picked up if operator is restarted. Changes to `ClickHouseOperatorConfiguration` are monitored by an operator and applied immediately.
+Next sources merge with the previous ones. Currently the operator does not self-reconcile its own configuration: changes to `etc-clickhouse-operator-files` or `ClickHouseOperatorConfiguration` are read only at startup and require an operator restart to apply.
`config.yaml` has following settings:
@@ -163,5 +163,31 @@ spec:
...
```
+#### Applying Changes from ClickHouseInstallationTemplates
+
+Changes applied to a ClickHouseInstallationTemaplte do not automatically trigger a reconcile of the ClickHouseInstallations using the template. This is by design and intended to preserve user control and prevent undesirable rollouts to ClickHouseInstallations.
+
+To apply the changes to ClickHouseInstallations, update the spec.taskID:
+
+```
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+...
+spec:
+ taskID: "randomly-generated-string"
+...
+```
+
+> Note, ClickHouse settings applied to the ClickHouse server through `spec.configuration.settings` in a ClickHouseInstallationTemplate will not trigger a server restart whether or not the setting requires a server restart to be applied. To apply the settings and restart the server, you should also set `spec.restart` to `'RollingUpdate'`. RollingUpdate should be used sparingly. It is typically removed after usage to prevent unecessary restarts:
+
+```
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+...
+spec:
+ restart: "RollingUpdate"
+...
+```
+
[clickhouse-operator-install-bundle.yaml]: ../deploy/operator/clickhouse-operator-install-bundle.yaml
[70-chop-config.yaml]: ./chi-examples/70-chop-config.yaml
diff --git a/docs/security_hardening.md b/docs/security_hardening.md
index 7bf5d4fe9..c4c92a99a 100644
--- a/docs/security_hardening.md
+++ b/docs/security_hardening.md
@@ -73,7 +73,7 @@ stringData:
We recommend that you do not include the **user** and **password** within the operator configuration without a **secret**, though it is also supported.
-To change '**clickhouse_operator**' user password you can modify `etc-clickhouse-operator-files` configmap or create `ClickHouseOperatorConfiguration` object.
+To change '**clickhouse_operator**' user password you can modify `etc-clickhouse-operator-files` configmap or create `ClickHouseOperatorConfiguration` object, then restart the operator to apply the change.
See [operator configuration](https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md) for more information about operator configuration files.
diff --git a/go.mod b/go.mod
index 3f00e5c23..ebceb3187 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/altinity/clickhouse-operator
-go 1.25.4
+go 1.25.6
replace (
github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.0
diff --git a/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json b/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json
index f9a9e24fa..68287e986 100644
--- a/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json
+++ b/grafana-dashboard/Altinity_ClickHouse_Operator_dashboard.json
@@ -1282,7 +1282,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -1412,7 +1412,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-concurrent-queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -1727,7 +1727,7 @@
{
"targetBlank": true,
"title": "max_concurent_queries",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_concurrent_queries"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_concurrent_queries"
},
{
"targetBlank": true,
@@ -2253,7 +2253,7 @@
{
"targetBlank": true,
"title": "max_replica_delay_for_distributed_queries",
- "url": "https://clickhouse.com/docs/en/operations/settings/settings#settings-max_replica_delay_for_distributed_queries"
+ "url": "https://clickhouse.com/docs/operations/settings/settings#max_replica_delay_for_distributed_queries"
}
],
"options": {
@@ -4298,7 +4298,7 @@
{
"targetBlank": true,
"title": "mark_cache_size",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-mark-cache-size"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#mark_cache_size"
},
{
"targetBlank": true,
@@ -5335,12 +5335,12 @@
{
"targetBlank": true,
"title": "max_connections",
- "url": "https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max-connections"
+ "url": "https://clickhouse.com/docs/operations/server-configuration-parameters/settings#max_connections"
},
{
"targetBlank": true,
"title": "max_distributed_connections",
- "url": "https://clickhouse.com/docs/en/operations/settings/settings#max-distributed-connections"
+ "url": "https://clickhouse.com/docs/operations/settings/settings#max_distributed_connections"
},
{
"targetBlank": true,
diff --git a/grafana-dashboard/ClickHouseKeeper_dashboard.json b/grafana-dashboard/ClickHouseKeeper_dashboard.json
index 2b47c9419..fbb298822 100644
--- a/grafana-dashboard/ClickHouseKeeper_dashboard.json
+++ b/grafana-dashboard/ClickHouseKeeper_dashboard.json
@@ -983,6 +983,24 @@
],
"templating": {
"list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 2,
+ "includeAll": false,
+ "multi": false,
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
{
"allValue": ".+",
"current": {},
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
index 9f1c01e86..4c658b287 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
@@ -41,10 +41,10 @@ func (cr *ClickHouseKeeperInstallation) GetSpecA() any {
}
func (cr *ClickHouseKeeperInstallation) GetRuntime() apiChi.ICustomResourceRuntime {
- return cr.ensureRuntime()
+ return cr.EnsureRuntime()
}
-func (cr *ClickHouseKeeperInstallation) ensureRuntime() *ClickHouseKeeperInstallationRuntime {
+func (cr *ClickHouseKeeperInstallation) EnsureRuntime() *ClickHouseKeeperInstallationRuntime {
if cr == nil {
return nil
}
@@ -172,7 +172,7 @@ func (cr *ClickHouseKeeperInstallation) FillStatus(endpoints util.Slice[string],
ClustersCount: cr.ClustersCount(),
ShardsCount: cr.ShardsCount(),
HostsCount: cr.HostsCount(),
- TaskID: "",
+ TaskID: cr.GetSpecT().GetTaskID().Value(),
HostsUpdatedCount: 0,
HostsAddedCount: 0,
HostsUnchangedCount: 0,
@@ -423,7 +423,10 @@ func (cr *ClickHouseKeeperInstallation) IsAuto() bool {
// IsStopped checks whether CR is stopped
func (cr *ClickHouseKeeperInstallation) IsStopped() bool {
- return false
+ if cr == nil {
+ return false
+ }
+ return cr.GetSpecT().GetStop().Value()
}
// IsRollingUpdate checks whether CHI should perform rolling update
@@ -454,20 +457,20 @@ func (cr *ClickHouseKeeperInstallation) Copy(opts types.CopyCROptions) *ClickHou
return nil
}
- var chi2 *ClickHouseKeeperInstallation
- if err := json.Unmarshal(jsonBytes, &chi2); err != nil {
+ var cr2 *ClickHouseKeeperInstallation
+ if err := json.Unmarshal(jsonBytes, &cr2); err != nil {
return nil
}
if opts.SkipStatus {
- chi2.Status = nil
+ cr2.Status = nil
}
if opts.SkipManagedFields {
- chi2.SetManagedFields(nil)
+ cr2.SetManagedFields(nil)
}
- return chi2
+ return cr2
}
// JSON returns JSON string
@@ -499,7 +502,7 @@ func (cr *ClickHouseKeeperInstallation) YAML(opts types.CopyCROptions) string {
return string(yamlBytes)
}
-// FirstHost returns first host of the CHI
+// FirstHost returns first host of the CR
func (cr *ClickHouseKeeperInstallation) FirstHost() *apiChi.Host {
var result *apiChi.Host
cr.WalkHosts(func(host *apiChi.Host) error {
@@ -651,7 +654,6 @@ func (cr *ClickHouseKeeperInstallation) WalkTillError(
ctx context.Context,
fCRPreliminary func(ctx context.Context, chi *ClickHouseKeeperInstallation) error,
fCluster func(ctx context.Context, cluster *Cluster) error,
- fShards func(ctx context.Context, shards []*ChkShard) error,
fCRFinal func(ctx context.Context, chi *ClickHouseKeeperInstallation) error,
) error {
if err := fCRPreliminary(ctx, cr); err != nil {
@@ -663,14 +665,6 @@ func (cr *ClickHouseKeeperInstallation) WalkTillError(
if err := fCluster(ctx, cluster); err != nil {
return err
}
-
- shards := make([]*ChkShard, 0, len(cluster.Layout.Shards))
- for shardIndex := range cluster.Layout.Shards {
- shards = append(shards, cluster.Layout.Shards[shardIndex])
- }
- if err := fShards(ctx, shards); err != nil {
- return err
- }
}
if err := fCRFinal(ctx, cr); err != nil {
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
index 301287826..3cd12e3d4 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
@@ -21,15 +21,14 @@ import (
// Cluster defines item of a clusters section of .configuration
type Cluster struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
-
- Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
- Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
- PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"`
- PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
- Reconcile apiChi.ClusterReconcile `json:"reconcile" yaml:"reconcile"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"`
+ PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
+ Reconcile *apiChi.ClusterReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"`
+ Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
Runtime ChkClusterRuntime `json:"-" yaml:"-"`
}
@@ -137,7 +136,8 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
}
// GetReconcile is a getter
-func (cluster *Cluster) GetReconcile() apiChi.ClusterReconcile {
+func (cluster *Cluster) GetReconcile() *apiChi.ClusterReconcile {
+ cluster.Reconcile = cluster.Reconcile.Ensure()
return cluster.Reconcile
}
@@ -182,7 +182,7 @@ func (cluster *Cluster) SelectSettingsSourceFrom(shard apiChi.IShard, replica ap
return replica
}
-// InheritFilesFrom inherits files from CHI
+// InheritFilesFrom inherits files from CR
func (cluster *Cluster) InheritFilesFrom(chk *ClickHouseKeeperInstallation) {
if chk.GetSpecT().Configuration == nil {
return
@@ -203,6 +203,17 @@ func (cluster *Cluster) InheritFilesFrom(chk *ClickHouseKeeperInstallation) {
})
}
+// InheritClusterReconcileFrom inherits reconcile runtime from CHI
+func (cluster *Cluster) InheritClusterReconcileFrom(chk *ClickHouseKeeperInstallation) {
+ if chk.Spec.Reconcile == nil {
+ return
+ }
+ reconcile := cluster.GetReconcile()
+ reconcile.Runtime = reconcile.Runtime.MergeFrom(chk.Spec.Reconcile.Runtime, apiChi.MergeTypeFillEmptyValues)
+ reconcile.Host = reconcile.Host.MergeFrom(chk.Spec.Reconcile.Host)
+ cluster.Reconcile = reconcile
+}
+
// InheritTemplatesFrom inherits templates from CHI
func (cluster *Cluster) InheritTemplatesFrom(chk *ClickHouseKeeperInstallation) {
if chk.GetSpec().GetDefaults() == nil {
@@ -220,6 +231,11 @@ func (cluster *Cluster) GetServiceTemplate() (*apiChi.ServiceTemplate, bool) {
return nil, false
}
+// GetCR gets parent CR
+func (cluster *Cluster) GetCR() *ClickHouseKeeperInstallation {
+ return cluster.Runtime.CHK
+}
+
func (cluster *Cluster) GetAncestor() apiChi.ICluster {
return (*Cluster)(nil)
}
@@ -373,6 +389,11 @@ func (cluster *Cluster) IsNonZero() bool {
return cluster != nil
}
+// IsStopped checks whether host is stopped
+func (cluster *Cluster) IsStopped() bool {
+ return cluster.GetCR().IsStopped()
+}
+
func (cluster *Cluster) Ensure(create func() *Cluster) *Cluster {
if cluster == nil {
cluster = create()
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
index 28d15e41b..5a7003691 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
@@ -22,6 +22,7 @@ import (
// ChkSpec defines spec section of ClickHouseKeeper resource
type ChkSpec struct {
TaskID *types.Id `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ Stop *types.StringBool `json:"stop,omitempty" yaml:"stop,omitempty"`
NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
Suspend *types.StringBool `json:"suspend,omitempty" yaml:"suspend,omitempty"`
Reconciling *apiChi.ChiReconcile `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
@@ -47,6 +48,13 @@ func (spec *ChkSpec) GetTaskID() *types.Id {
return spec.TaskID
}
+func (spec *ChkSpec) GetStop() *types.StringBool {
+ if spec == nil {
+ return (*types.StringBool)(nil)
+ }
+ return spec.Stop
+}
+
func (spec *ChkSpec) GetNamespaceDomainPattern() *types.String {
if spec == nil {
return (*types.String)(nil)
@@ -90,6 +98,9 @@ func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) {
if !spec.HasTaskID() {
spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
}
+ if !spec.Stop.HasValue() {
+ spec.Stop = spec.Stop.MergeFrom(from.Stop)
+ }
if !spec.NamespaceDomainPattern.HasValue() {
spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
}
@@ -100,6 +111,10 @@ func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) {
if from.HasTaskID() {
spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
}
+ if from.Stop.HasValue() {
+ // Override by non-empty values only
+ spec.Stop = from.Stop
+ }
if from.NamespaceDomainPattern.HasValue() {
spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
index 4ce19b13c..5fa70cec3 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
@@ -75,6 +75,7 @@ type Status struct {
Endpoints []string `json:"endpoints,omitempty" yaml:"endpoints,omitempty"`
NormalizedCR *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
NormalizedCRCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
+ ActionPlan *chi.ActionPlan `json:"actionPlan,omitempty" yaml:"actionPlan,omitempty"`
HostsWithTablesCreated []string `json:"hostsWithTablesCreated,omitempty" yaml:"hostsWithTablesCreated,omitempty"`
HostsWithReplicaCaughtUp []string `json:"hostsWithReplicaCaughtUp,omitempty" yaml:"hostsWithReplicaCaughtUp,omitempty"`
UsedTemplates []*chi.TemplateRef `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"`
@@ -281,7 +282,7 @@ func (s *Status) HostCompleted() {
}
// ReconcileStart marks reconcile start
-func (s *Status) ReconcileStart(deleteHostsCount int) {
+func (s *Status) ReconcileStart(ap chi.IActionPlan) {
doWithWriteLock(s, func(s *Status) {
if s == nil {
return
@@ -292,7 +293,8 @@ func (s *Status) ReconcileStart(deleteHostsCount int) {
s.HostsUnchangedCount = 0
s.HostsCompletedCount = 0
s.HostsDeletedCount = 0
- s.HostsDeleteCount = deleteHostsCount
+ s.HostsDeleteCount = ap.GetRemovedHostsNum()
+ s.ActionPlan = ap.(*chi.ActionPlan)
pushTaskIDStartedNoSync(s)
})
}
@@ -338,6 +340,13 @@ func (s *Status) DeleteStart() {
})
}
+// SetActionPlan sets action plan
+func (s *Status) SetActionPlan(ap chi.IActionPlan) {
+ doWithWriteLock(s, func(s *Status) {
+ s.ActionPlan = ap.(*chi.ActionPlan)
+ })
+}
+
func prepareOptions(opts types.CopyStatusOptions) types.CopyStatusOptions {
if opts.FieldGroupInheritable {
opts.Copy.TaskIDsStarted = true
@@ -531,6 +540,9 @@ func (s *Status) CopyFrom(f *Status, opts types.CopyStatusOptions) {
if opts.Copy.NormalizedCRCompleted {
s.NormalizedCRCompleted = from.NormalizedCRCompleted
}
+ if opts.Copy.ActionPlan {
+ s.ActionPlan = from.ActionPlan
+ }
if opts.Copy.HostsWithTablesCreated {
s.HostsWithTablesCreated = nil
if len(from.HostsWithTablesCreated) > 0 {
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
index 69c676eea..3474ad439 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
@@ -15,20 +15,20 @@
package v1
import (
- "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
"sync"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClickHouseKeeperInstallation defines a ClickHouse Keeper ChkCluster
type ClickHouseKeeperInstallation struct {
- meta.TypeMeta `json:",inline" yaml:",inline"`
- meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
+ meta.TypeMeta `json:",inline" yaml:",inline"`
+ meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Spec ChkSpec `json:"spec" yaml:"spec"`
Status *Status `json:"status,omitempty" yaml:"status,omitempty"`
@@ -43,6 +43,7 @@ type ClickHouseKeeperInstallationRuntime struct {
commonConfigMutex sync.Mutex `json:"-" yaml:"-"`
MinVersion *swversion.SoftWareVersion `json:"-" yaml:"-"`
MaxVersion *swversion.SoftWareVersion `json:"-" yaml:"-"`
+ ActionPlan apiChi.IActionPlan `json:"-" yaml:"-"`
}
func newClickHouseKeeperInstallationRuntime() *ClickHouseKeeperInstallationRuntime {
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
index 645b11f29..ccd103091 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
@@ -288,6 +288,11 @@ func (in *ChkSpec) DeepCopyInto(out *ChkSpec) {
*out = new(types.Id)
**out = **in
}
+ if in.Stop != nil {
+ in, out := &in.Stop, &out.Stop
+ *out = new(types.StringBool)
+ **out = **in
+ }
if in.NamespaceDomainPattern != nil {
in, out := &in.NamespaceDomainPattern, &out.NamespaceDomainPattern
*out = new(types.String)
@@ -471,7 +476,11 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(types.Int32)
**out = **in
}
- in.Reconcile.DeepCopyInto(&out.Reconcile)
+ if in.Reconcile != nil {
+ in, out := &in.Reconcile, &out.Reconcile
+ *out = new(clickhousealtinitycomv1.ClusterReconcile)
+ (*in).DeepCopyInto(*out)
+ }
in.Runtime.DeepCopyInto(&out.Runtime)
return
}
@@ -612,6 +621,11 @@ func (in *Status) DeepCopyInto(out *Status) {
*out = new(ClickHouseKeeperInstallation)
(*in).DeepCopyInto(*out)
}
+ if in.ActionPlan != nil {
+ in, out := &in.ActionPlan, &out.ActionPlan
+ *out = new(clickhousealtinitycomv1.ActionPlan)
+ (*in).DeepCopyInto(*out)
+ }
if in.HostsWithTablesCreated != nil {
in, out := &in.HostsWithTablesCreated, &out.HostsWithTablesCreated
*out = make([]string, len(*in))
diff --git a/pkg/apis/clickhouse.altinity.com/v1/interface.go b/pkg/apis/clickhouse.altinity.com/v1/interface.go
index e1bba0ca0..2e91421e7 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/interface.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/interface.go
@@ -134,7 +134,7 @@ type ICluster interface {
SelectSettingsSourceFrom(shard IShard, replica IReplica) any
GetRuntime() IClusterRuntime
- GetReconcile() ClusterReconcile
+ GetReconcile() *ClusterReconcile
GetServiceTemplate() (*ServiceTemplate, bool)
GetAncestor() ICluster
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
index 0edef3cd0..2d0b31257 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
@@ -482,20 +482,20 @@ func (cr *ClickHouseInstallation) Copy(opts types.CopyCROptions) *ClickHouseInst
return nil
}
- var chi2 *ClickHouseInstallation
- if err := json.Unmarshal(jsonBytes, &chi2); err != nil {
+ var cr2 *ClickHouseInstallation
+ if err := json.Unmarshal(jsonBytes, &cr2); err != nil {
return nil
}
if opts.SkipStatus {
- chi2.Status = nil
+ cr2.Status = nil
}
if opts.SkipManagedFields {
- chi2.SetManagedFields(nil)
+ cr2.SetManagedFields(nil)
}
- return chi2
+ return cr2
}
// JSON returns JSON string
@@ -527,7 +527,7 @@ func (cr *ClickHouseInstallation) YAML(opts types.CopyCROptions) string {
return string(yamlBytes)
}
-// FirstHost returns first host of the CHI
+// FirstHost returns first host of the CR
func (cr *ClickHouseInstallation) FirstHost() *Host {
var result *Host
cr.WalkHosts(func(host *Host) error {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
index cebf8a6ec..a379677f8 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
@@ -31,7 +31,7 @@ type Cluster struct {
Secret *ClusterSecret `json:"secret,omitempty" yaml:"secret,omitempty"`
PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"`
PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
- Reconcile ClusterReconcile `json:"reconcile" yaml:"reconcile"`
+ Reconcile *ClusterReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"`
Layout *ChiClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
Runtime ChiClusterRuntime `json:"-" yaml:"-"`
@@ -146,7 +146,8 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
}
// GetReconcile is a getter
-func (cluster *Cluster) GetReconcile() ClusterReconcile {
+func (cluster *Cluster) GetReconcile() *ClusterReconcile {
+ cluster.Reconcile = cluster.Reconcile.Ensure()
return cluster.Reconcile
}
@@ -207,7 +208,7 @@ func (cluster *Cluster) InheritZookeeperFrom(chi *ClickHouseInstallation) {
cluster.Zookeeper = cluster.Zookeeper.MergeFrom(chi.GetSpecT().Configuration.Zookeeper, MergeTypeFillEmptyValues)
}
-// InheritFilesFrom inherits files from CHI
+// InheritFilesFrom inherits files from CR
func (cluster *Cluster) InheritFilesFrom(chi *ClickHouseInstallation) {
if chi.GetSpecT().Configuration == nil {
return
@@ -233,8 +234,11 @@ func (cluster *Cluster) InheritClusterReconcileFrom(chi *ClickHouseInstallation)
if chi.Spec.Reconcile == nil {
return
}
- cluster.Reconcile.Runtime = cluster.Reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues)
- cluster.Reconcile.Host = cluster.Reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host)
+ reconcile := cluster.GetReconcile()
+ reconcile.Runtime = reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues)
+ reconcile.StatefulSet = reconcile.StatefulSet.MergeFrom(chi.Spec.Reconcile.StatefulSet)
+ reconcile.Host = reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host)
+ cluster.Reconcile = reconcile
}
// InheritTemplatesFrom inherits templates from CHI
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
index be2403e65..82e685e4b 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
@@ -39,7 +39,7 @@ import (
const (
// Default values for update timeout and polling period in seconds
defaultStatefulSetUpdateTimeout = 300
- defaultStatefulSetUpdatePollInterval = 15
+ defaultStatefulSetUpdatePollInterval = 5
// Default values for ClickHouse user configuration
// 1. user/profile
@@ -72,12 +72,15 @@ const (
// Timeouts used to limit connection and queries from the operator to ClickHouse instances. In seconds
// defaultTimeoutConnect specifies default timeout to connect to the ClickHouse instance. In seconds
- defaultTimeoutConnect = 2
+ defaultTimeoutConnect = 5
// defaultTimeoutQuery specifies default timeout to query the CLickHouse instance. In seconds
defaultTimeoutQuery = 5
// defaultTimeoutCollect specifies default timeout to collect metrics from the ClickHouse instance. In seconds
defaultTimeoutCollect = 8
+ // defaultMetricsTablesRegexp specifies default regexp to match tables in system database to fetch metrics from
+ defaultMetricsTablesRegexp = "^(metrics|custom_metrics)$"
+
// defaultReconcileCHIsThreadsNumber specifies default number of controller threads running concurrently.
// Used in case no other specified in config
defaultReconcileCHIsThreadsNumber = 1
@@ -136,6 +139,22 @@ const (
OnStatefulSetUpdateFailureActionIgnore = "ignore"
)
+const (
+ // What to do in case StatefulSet needs to be recreated due to PVC data loss or missing volumes
+ // Abort - Loss: abort CHI reconcile
+ OnStatefulSetRecreateOnDataLossActionAbort = "abort"
+
+ // Recreate - Loss: proceed and recreate StatefulSet
+ OnStatefulSetRecreateOnDataLossActionRecreate = "recreate"
+
+ // What to do in case StatefulSet needs to be recreated due to update failure or StatefulSet not ready
+ // Abort - Failure: abort CHI reconcile
+ OnStatefulSetRecreateOnUpdateFailureActionAbort = "abort"
+
+ // Recreate - Failure: proceed and recreate StatefulSet
+ OnStatefulSetRecreateOnUpdateFailureActionRecreate = "recreate"
+)
+
const (
defaultMaxReplicationDelay = 10
)
@@ -355,6 +374,10 @@ type OperatorConfigClickHouse struct {
Timeouts struct {
Collect time.Duration `json:"collect" yaml:"collect"`
} `json:"timeouts" yaml:"timeouts"`
+ // TablesRegexp specifies regexp to match tables in system database to fetch metrics from.
+ // Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
+ // Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
+ TablesRegexp string `json:"tablesRegexp" yaml:"tablesRegexp"`
} `json:"metrics" yaml:"metrics"`
}
@@ -422,10 +445,15 @@ type OperatorConfigReconcile struct {
} `json:"create" yaml:"create"`
Update struct {
- Timeout uint64 `json:"timeout" yaml:"timeout"`
+ Timeout uint64 `json:"timeout" yaml:"timeout"`
PollInterval uint64 `json:"pollInterval" yaml:"pollInterval"`
- OnFailure string `json:"onFailure" yaml:"onFailure"`
+ OnFailure string `json:"onFailure" yaml:"onFailure"`
} `json:"update" yaml:"update"`
+
+ Recreate struct {
+ OnDataLoss string `json:"onDataLoss" yaml:"onDataLoss"`
+ OnUpdateFailure string `json:"onUpdateFailure" yaml:"onUpdateFailure"`
+ } `json:"recreate" yaml:"recreate"`
} `json:"statefulSet" yaml:"statefulSet"`
Host ReconcileHost `json:"host" yaml:"host"`
@@ -446,8 +474,8 @@ type ReconcileHost struct {
Drop ReconcileHostDrop `json:"drop" yaml:"drop"`
}
-func (rh ReconcileHost) Normalize() ReconcileHost {
- rh.Wait = rh.Wait.Normalize()
+func (rh ReconcileHost) Normalize(readiness *types.StringBool, overwrite bool) ReconcileHost {
+ rh.Wait = rh.Wait.Normalize(readiness, overwrite)
rh.Drop = rh.Drop.Normalize()
return rh
}
@@ -467,7 +495,7 @@ type ReconcileHostWait struct {
Probes *ReconcileHostWaitProbes `json:"probes,omitempty" yaml:"probes,omitempty"`
}
-func (wait ReconcileHostWait) Normalize() ReconcileHostWait {
+func (wait ReconcileHostWait) Normalize(readiness *types.StringBool, overwrite bool) ReconcileHostWait {
if wait.Replicas == nil {
wait.Replicas = &ReconcileHostWaitReplicas{}
}
@@ -478,10 +506,13 @@ func (wait ReconcileHostWait) Normalize() ReconcileHostWait {
}
if wait.Probes == nil {
- // Default value
+ // Apply default when probes are not specified at all.
wait.Probes = &ReconcileHostWaitProbes{
- Readiness: types.NewStringBool(true),
+ Readiness: readiness,
}
+ } else if overwrite {
+ // Force override even when a value is already set.
+ wait.Probes.Readiness = readiness
}
return wait
@@ -1018,10 +1049,18 @@ func (c *OperatorConfig) normalizeSectionReconcileStatefulSet() {
if c.Reconcile.StatefulSet.Update.OnFailure == "" {
c.Reconcile.StatefulSet.Update.OnFailure = OnStatefulSetUpdateFailureActionRollback
}
+
+ // Default Recreate actions - recreate
+ if c.Reconcile.StatefulSet.Recreate.OnDataLoss == "" {
+ c.Reconcile.StatefulSet.Recreate.OnDataLoss = OnStatefulSetRecreateOnDataLossActionRecreate
+ }
+ if c.Reconcile.StatefulSet.Recreate.OnUpdateFailure == "" {
+ c.Reconcile.StatefulSet.Recreate.OnUpdateFailure = OnStatefulSetRecreateOnUpdateFailureActionRecreate
+ }
}
func (c *OperatorConfig) normalizeSectionReconcileHost() {
- c.Reconcile.Host = c.Reconcile.Host.Normalize()
+ c.Reconcile.Host = c.Reconcile.Host.Normalize(nil, false)
}
func (c *OperatorConfig) normalizeSectionClickHouseConfigurationUserDefault() {
@@ -1105,6 +1144,10 @@ func (c *OperatorConfig) normalizeSectionClickHouseMetrics() {
}
// Adjust seconds to time.Duration
c.ClickHouse.Metrics.Timeouts.Collect = c.ClickHouse.Metrics.Timeouts.Collect * time.Second
+
+ if c.ClickHouse.Metrics.TablesRegexp == "" {
+ c.ClickHouse.Metrics.TablesRegexp = defaultMetricsTablesRegexp
+ }
}
func (c *OperatorConfig) normalizeSectionLogger() {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go
index 3cbbb0f91..83ceb1947 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go
@@ -33,6 +33,8 @@ type ChiReconcile struct {
// Runtime specifies runtime settings
Runtime ReconcileRuntime `json:"runtime,omitempty" yaml:"runtime,omitempty"`
+ // StatefulSet specifies StatefulSet reconcile settings
+ StatefulSet ReconcileStatefulSet `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
// Host specifies host-lever reconcile settings
Host ReconcileHost `json:"host" yaml:"host"`
}
@@ -40,10 +42,67 @@ type ChiReconcile struct {
type ClusterReconcile struct {
// Runtime specifies runtime settings
Runtime ReconcileRuntime `json:"runtime" yaml:"runtime"`
+ // StatefulSet specifies StatefulSet reconcile settings
+ StatefulSet ReconcileStatefulSet `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
// Host specifies host-lever reconcile settings
Host ReconcileHost `json:"host" yaml:"host"`
}
+// ReconcileStatefulSet defines StatefulSet reconcile settings
+type ReconcileStatefulSet struct {
+ Create ReconcileStatefulSetCreate `json:"create,omitempty" yaml:"create,omitempty"`
+ Update ReconcileStatefulSetUpdate `json:"update,omitempty" yaml:"update,omitempty"`
+ Recreate ReconcileStatefulSetRecreate `json:"recreate,omitempty" yaml:"recreate,omitempty"`
+}
+
+// ReconcileStatefulSetCreate defines StatefulSet create settings
+type ReconcileStatefulSetCreate struct {
+ OnFailure string `json:"onFailure,omitempty" yaml:"onFailure,omitempty"`
+}
+
+// ReconcileStatefulSetUpdate defines StatefulSet update settings
+type ReconcileStatefulSetUpdate struct {
+ Timeout uint64 `json:"timeout,omitempty" yaml:"timeout,omitempty"`
+ PollInterval uint64 `json:"pollInterval,omitempty" yaml:"pollInterval,omitempty"`
+ OnFailure string `json:"onFailure,omitempty" yaml:"onFailure,omitempty"`
+}
+
+// ReconcileStatefulSetRecreate defines StatefulSet recreate settings
+type ReconcileStatefulSetRecreate struct {
+ OnDataLoss string `json:"onDataLoss,omitempty" yaml:"onDataLoss,omitempty"`
+ OnUpdateFailure string `json:"onUpdateFailure,omitempty" yaml:"onUpdateFailure,omitempty"`
+}
+
+// MergeFrom merges from specified ReconcileStatefulSet
+func (s ReconcileStatefulSet) MergeFrom(from ReconcileStatefulSet) ReconcileStatefulSet {
+ if s.Create.OnFailure == "" {
+ s.Create.OnFailure = from.Create.OnFailure
+ }
+ if s.Update.Timeout == 0 {
+ s.Update.Timeout = from.Update.Timeout
+ }
+ if s.Update.PollInterval == 0 {
+ s.Update.PollInterval = from.Update.PollInterval
+ }
+ if s.Update.OnFailure == "" {
+ s.Update.OnFailure = from.Update.OnFailure
+ }
+ if s.Recreate.OnDataLoss == "" {
+ s.Recreate.OnDataLoss = from.Recreate.OnDataLoss
+ }
+ if s.Recreate.OnUpdateFailure == "" {
+ s.Recreate.OnUpdateFailure = from.Recreate.OnUpdateFailure
+ }
+ return s
+}
+
+func (reconcile *ClusterReconcile) Ensure() *ClusterReconcile {
+ if reconcile == nil {
+ reconcile = &ClusterReconcile{}
+ }
+ return reconcile
+}
+
// NewChiReconcile creates new reconcile
func NewChiReconcile() *ChiReconcile {
return new(ChiReconcile)
@@ -81,6 +140,7 @@ func (r *ChiReconcile) MergeFrom(from *ChiReconcile, _type MergeType) *ChiReconc
r.Cleanup = r.Cleanup.MergeFrom(from.Cleanup, _type)
r.Macros = r.Macros.MergeFrom(from.Macros, _type)
r.Runtime = r.Runtime.MergeFrom(from.Runtime, _type)
+ r.StatefulSet = r.StatefulSet.MergeFrom(from.StatefulSet)
r.Host = r.Host.MergeFrom(from.Host)
return r
@@ -187,6 +247,31 @@ func (r *ChiReconcile) InheritRuntimeFrom(from OperatorConfigReconcileRuntime) {
}
}
+func (r *ChiReconcile) InheritStatefulSetFrom(from OperatorConfigReconcile) {
+ if r == nil {
+ return
+ }
+
+ if r.StatefulSet.Create.OnFailure == "" {
+ r.StatefulSet.Create.OnFailure = from.StatefulSet.Create.OnFailure
+ }
+ if r.StatefulSet.Update.Timeout == 0 {
+ r.StatefulSet.Update.Timeout = from.StatefulSet.Update.Timeout
+ }
+ if r.StatefulSet.Update.PollInterval == 0 {
+ r.StatefulSet.Update.PollInterval = from.StatefulSet.Update.PollInterval
+ }
+ if r.StatefulSet.Update.OnFailure == "" {
+ r.StatefulSet.Update.OnFailure = from.StatefulSet.Update.OnFailure
+ }
+ if r.StatefulSet.Recreate.OnDataLoss == "" {
+ r.StatefulSet.Recreate.OnDataLoss = from.StatefulSet.Recreate.OnDataLoss
+ }
+ if r.StatefulSet.Recreate.OnUpdateFailure == "" {
+ r.StatefulSet.Recreate.OnUpdateFailure = from.StatefulSet.Recreate.OnUpdateFailure
+ }
+}
+
func (r *ChiReconcile) InheritHostFrom(from ReconcileHost) {
r.Host = r.Host.MergeFrom(from)
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
index 0a3addd51..d30587887 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
@@ -300,11 +300,15 @@ func (s *Settings) SetScalarsFromMap(m map[string]string) *Settings {
return s
}
-// Keys gets keys of the settings
+// Keys gets keys of the settings in alphabetical order
func (s *Settings) Keys() (keys []string) {
s.WalkKeys(func(key string, setting *Setting) {
keys = append(keys, key)
})
+
+ // Sort keys to ensure deterministic ordering for Kubernetes manifest stability.
+ // Consistent ordering prevents unnecessary resource updates during reconciliation.
+ sort.Strings(keys)
return keys
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
index 650a73849..3969e8aaf 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
@@ -157,8 +157,7 @@ func (spec *ChiSpec) MergeFrom(from *ChiSpec, _type MergeType) {
spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
}
if from.Suspend.HasValue() {
- // Override by non-empty values only
- spec.Suspend = from.Suspend
+ spec.Suspend = spec.Suspend.MergeFrom(from.Suspend)
}
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
index 1f1c3cc09..5d6f5b3e7 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
@@ -2,9 +2,12 @@
package v1
import (
- "github.com/stretchr/testify/require"
"sync"
"testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
)
var normalizedChiA = &ClickHouseInstallation{}
@@ -158,12 +161,14 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
name: "CopyFrom",
goRoutineA: func(s *Status) {
s.PushAction("always-present-action") // CopyFrom preserves existing actions (does not clobber)
- s.CopyFrom(copyTestStatusFrom, CopyStatusOptions{
- Actions: true,
- Errors: true,
- MainFields: true,
- WholeStatus: true,
- InheritableFields: true,
+ s.CopyFrom(copyTestStatusFrom, types.CopyStatusOptions{
+ CopyStatusFieldGroup: types.CopyStatusFieldGroup{
+ FieldGroupActions: true,
+ FieldGroupErrors: true,
+ FieldGroupMain: true,
+ FieldGroupWholeStatus: true,
+ FieldGroupInheritable: true,
+ },
})
},
goRoutineB: func(s *Status) {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go
index 9206ef9ce..d25fbc157 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/types.go
@@ -39,8 +39,8 @@ type ClickHouseInstallation struct {
meta.TypeMeta `json:",inline" yaml:",inline"`
meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec ChiSpec `json:"spec" yaml:"spec"`
- Status *Status `json:"status,omitempty" yaml:"status,omitempty"`
+ Spec ChiSpec `json:"spec" yaml:"spec"`
+ Status *Status `json:"status,omitempty" yaml:"status,omitempty"`
runtime *ClickHouseInstallationRuntime `json:"-" yaml:"-"`
statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
@@ -73,20 +73,6 @@ func (runtime *ClickHouseInstallationRuntime) UnlockCommonConfig() {
runtime.commonConfigMutex.Unlock()
}
-func (runtime *ClickHouseInstallationRuntime) HasReferenceSoftwareVersion() bool {
- if runtime == nil {
- return false
- }
- return runtime.MinVersion != nil
-}
-
-func (runtime *ClickHouseInstallationRuntime) GetReferenceSoftwareVersion() *swversion.SoftWareVersion {
- if runtime.HasReferenceSoftwareVersion() {
- return runtime.MinVersion
- }
- return nil
-}
-
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
index bfae92b5b..3fa03f173 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
@@ -854,7 +854,11 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(types.Int32)
**out = **in
}
- in.Reconcile.DeepCopyInto(&out.Reconcile)
+ if in.Reconcile != nil {
+ in, out := &in.Reconcile, &out.Reconcile
+ *out = new(ClusterReconcile)
+ (*in).DeepCopyInto(*out)
+ }
if in.Layout != nil {
in, out := &in.Layout, &out.Layout
*out = new(ChiClusterLayout)
diff --git a/pkg/apis/swversion/software_version.go b/pkg/apis/swversion/software_version.go
index 529618ad5..f40cd9497 100644
--- a/pkg/apis/swversion/software_version.go
+++ b/pkg/apis/swversion/software_version.go
@@ -55,7 +55,10 @@ func (in *SoftWareVersion) DeepCopyInto(out *SoftWareVersion) {
}
// NewSoftWareVersion creates new software version
-// version - specifies original software version, such as: 21 or 21.1 or 21.9.6.24-alpha
+// version - specifies original software version, such as:
+// a) 21 or
+// b) 21.1 or
+// c) 21.9.6.24-alpha
func NewSoftWareVersion(version string) *SoftWareVersion {
if strings.TrimSpace(version) == "" {
return nil
@@ -64,12 +67,12 @@ func NewSoftWareVersion(version string) *SoftWareVersion {
// Fetch comma-separated parts of the software version
parts := strings.Split(version, ".")
- // Need to have at least something to as a major version
+ // Need to have at least something to be treated as a major version
if len(parts) < 1 {
return nil
}
- // Need to have at least 3 parts in software version specification
+ // Pad to have 3 parts in software version specification
for len(parts) < 3 {
parts = append(parts, "0")
}
@@ -82,15 +85,16 @@ func NewSoftWareVersion(version string) *SoftWareVersion {
}
}
- // Normalized version of the original
+ // Build normalized version from the original/padded parts
normalized := strings.Join(parts, ".")
- // Build version
+ // Build semver version
_semver, err := semver.NewVersion(normalized)
if err != nil {
return nil
}
+ // So far so good, version is available
return &SoftWareVersion{
original: version,
normalized: normalized,
@@ -98,6 +102,8 @@ func NewSoftWareVersion(version string) *SoftWareVersion {
}
}
+// NewSoftWareVersionFromTag build SoftWareVersion from docker image tag
+// Tag 'latest' leads to default MaxVersion()
func NewSoftWareVersionFromTag(tag string) *SoftWareVersion {
if strings.ToLower(strings.TrimSpace(tag)) == "latest" {
return MaxVersion()
@@ -157,6 +163,7 @@ func (v *SoftWareVersion) IsKnown() bool {
return !v.IsUnknown()
}
+// SetDescription sets string description
func (v *SoftWareVersion) SetDescription(format string, args ...interface{}) *SoftWareVersion {
if v == nil {
return nil
diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go
index 3534ca622..13e481ab2 100644
--- a/pkg/controller/chi/controller.go
+++ b/pkg/controller/chi/controller.go
@@ -154,7 +154,7 @@ func (c *Controller) addEventHandlersCHI(
chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
chi := obj.(*api.ClickHouseInstallation)
- if !shouldEnqueue(chi) {
+ if !ShouldEnqueue(chi) {
return
}
log.V(3).M(chi).Info("chiInformer.AddFunc")
@@ -163,7 +163,7 @@ func (c *Controller) addEventHandlersCHI(
UpdateFunc: func(old, new interface{}) {
oldChi := old.(*api.ClickHouseInstallation)
newChi := new.(*api.ClickHouseInstallation)
- if !shouldEnqueue(newChi) {
+ if !ShouldEnqueue(newChi) {
return
}
log.V(3).M(newChi).Info("chiInformer.UpdateFunc")
@@ -692,6 +692,13 @@ func (c *Controller) updateWatch(chi *api.ClickHouseInstallation) {
go c.updateWatchAsync(watched)
}
+// allocateWatch
+func (c *Controller) allocateWatch(chi *api.ClickHouseInstallation) {
+ watched := metrics.NewWatchedCR(chi)
+ watched.Clusters = nil
+ go c.updateWatchAsync(watched)
+}
+
// updateWatchAsync
func (c *Controller) updateWatchAsync(chi *metrics.WatchedCR) {
if err := clickhouse.InformMetricsExporterAboutWatchedCHI(chi); err != nil {
@@ -701,6 +708,33 @@ func (c *Controller) updateWatchAsync(chi *metrics.WatchedCR) {
}
}
+// addHostWatch adds a single host to monitoring
+func (c *Controller) addHostWatch(host *api.Host) {
+ req := &clickhouse.HostRequest{
+ CRNamespace: host.Runtime.Address.Namespace,
+ CRName: host.Runtime.Address.CHIName,
+ ClusterName: host.Runtime.Address.ClusterName,
+ Host: &metrics.WatchedHost{
+ Name: host.Name,
+ Hostname: host.Runtime.Address.FQDN,
+ TCPPort: host.TCPPort.Value(),
+ TLSPort: host.TLSPort.Value(),
+ HTTPPort: host.HTTPPort.Value(),
+ HTTPSPort: host.HTTPSPort.Value(),
+ },
+ }
+ go c.addHostWatchAsync(req)
+}
+
+// addHostWatchAsync
+func (c *Controller) addHostWatchAsync(req *clickhouse.HostRequest) {
+ if err := clickhouse.InformMetricsExporterAboutWatchedHost(req); err != nil {
+ log.V(1).F().Info("FAIL add host watch (%s/%s/%s/%s): %q", req.CRNamespace, req.CRName, req.ClusterName, req.Host.Hostname, err)
+ } else {
+ log.V(1).Info("OK add host watch (%s/%s/%s/%s)", req.CRNamespace, req.CRName, req.ClusterName, req.Host.Hostname)
+ }
+}
+
// deleteWatch
func (c *Controller) deleteWatch(chi *api.ClickHouseInstallation) {
watched := metrics.NewWatchedCR(chi)
@@ -924,15 +958,10 @@ func (c *Controller) handleObject(obj interface{}) {
// TODO c.enqueueObject(chi.Namespace, chi.Name, chi)
}
-func shouldEnqueue(chi *api.ClickHouseInstallation) bool {
- if !chop.Config().IsNamespaceWatched(chi.Namespace) {
- log.V(2).M(chi).Info("chiInformer: skip enqueue, namespace '%s' is not watched or is in deny list", chi.Namespace)
- return false
- }
-
- // if CR is suspended, should skip reconciliation
- if chi.Spec.Suspend.Value() {
- log.V(5).M(chi).Info("chiInformer: skip enqueue, CHI suspended")
+func ShouldEnqueue(cr *api.ClickHouseInstallation) bool {
+ ns := cr.GetNamespace()
+ if !chop.Config().IsNamespaceWatched(ns) {
+ log.V(2).M(cr).Info("skip enqueue, namespace '%s' is not watched or is in deny list", ns)
return false
}
diff --git a/pkg/controller/chi/controller_test.go b/pkg/controller/chi/controller_test.go
index e93ed1d4f..54acd2f26 100644
--- a/pkg/controller/chi/controller_test.go
+++ b/pkg/controller/chi/controller_test.go
@@ -39,8 +39,8 @@ func Test_shouldEnqueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := shouldEnqueue(tt.chi); got != tt.want {
- t.Errorf("shouldEnqueue() = %v, want %v", got, tt.want)
+ if got := ShouldEnqueue(tt.chi); got != tt.want {
+ t.Errorf("ShouldEnqueue() = %v, want %v", got, tt.want)
}
})
}
diff --git a/pkg/controller/chi/metrics/pkg.go b/pkg/controller/chi/metrics/pkg.go
index ddbc9255e..ed87c2e8a 100644
--- a/pkg/controller/chi/metrics/pkg.go
+++ b/pkg/controller/chi/metrics/pkg.go
@@ -21,20 +21,20 @@ import (
"github.com/altinity/clickhouse-operator/pkg/util"
)
-func CHIInitZeroValues(ctx context.Context, src labelsSource) {
+func CRInitZeroValues(ctx context.Context, src labelsSource) {
chiInitZeroValues(ctx, src)
}
-func CHIReconcilesStarted(ctx context.Context, src labelsSource) {
+func CRReconcilesStarted(ctx context.Context, src labelsSource) {
chiReconcilesStarted(ctx, src)
}
-func CHIReconcilesCompleted(ctx context.Context, src labelsSource) {
+func CRReconcilesCompleted(ctx context.Context, src labelsSource) {
chiReconcilesCompleted(ctx, src)
}
-func CHIReconcilesAborted(ctx context.Context, src labelsSource) {
+func CRReconcilesAborted(ctx context.Context, src labelsSource) {
chiReconcilesAborted(ctx, src)
}
-func CHIReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) {
+func CRReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) {
chiReconcilesTimings(ctx, src, seconds)
}
@@ -67,7 +67,7 @@ func PodDelete(ctx context.Context) {
var r = map[string]bool{}
var mx = sync.Mutex{}
-func CHIRegister(ctx context.Context, src labelsSource) {
+func CRRegister(ctx context.Context, src labelsSource) {
mx.Lock()
defer mx.Unlock()
@@ -81,7 +81,7 @@ func CHIRegister(ctx context.Context, src labelsSource) {
chiRegister(ctx, src)
}
-func CHIUnregister(ctx context.Context, src labelsSource) {
+func CRUnregister(ctx context.Context, src labelsSource) {
mx.Lock()
defer mx.Unlock()
diff --git a/pkg/controller/chi/worker-deleter.go b/pkg/controller/chi/worker-deleter.go
index 0e3fafc35..f070a22e4 100644
--- a/pkg/controller/chi/worker-deleter.go
+++ b/pkg/controller/chi/worker-deleter.go
@@ -61,8 +61,8 @@ func (w *worker) clean(ctx context.Context, cr api.ICustomResource) {
cr.(*api.ClickHouseInstallation).EnsureStatus().SyncHostTablesCreated()
}
-// dropReplicas cleans Zookeeper for replicas that are properly deleted - via Action Plan
-func (w *worker) dropReplicas(ctx context.Context, cr *api.ClickHouseInstallation) {
+// dropZKReplicas cleans Zookeeper for replicas that are properly deleted - via Action Plan
+func (w *worker) dropZKReplicas(ctx context.Context, cr *api.ClickHouseInstallation) {
// Iterate over Action Plan and drop all replicas that are properly removed as removed hosts
w.a.V(1).M(cr).F().S().Info("drop replicas based on AP")
cnt := 0
@@ -72,7 +72,7 @@ func (w *worker) dropReplicas(ctx context.Context, cr *api.ClickHouseInstallatio
func(shard api.IShard) {
},
func(host *api.Host) {
- _ = w.dropReplica(ctx, host, NewDropReplicaOptions().SetRegularDrop())
+ _ = w.dropZKReplica(ctx, host, NewDropReplicaOptions().SetRegularDrop())
cnt++
},
)
@@ -244,7 +244,7 @@ func (w *worker) discoveryAndDeleteCR(ctx context.Context, cr api.ICustomResourc
return nil
}
- metrics.CHIUnregister(ctx, cr)
+ metrics.CRUnregister(ctx, cr)
objs := w.c.discovery(ctx, cr)
if objs.NumStatefulSet() > 0 {
@@ -445,8 +445,8 @@ func (a dropReplicaOptionsArr) First() *dropReplicaOptions {
return nil
}
-// dropReplica drops replica's info from Zookeeper
-func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.Host, opts *dropReplicaOptions) error {
+// dropZKReplica drops replica's info from Zookeeper
+func (w *worker) dropZKReplica(ctx context.Context, hostToDrop *api.Host, opts *dropReplicaOptions) error {
if hostToDrop == nil {
w.a.V(1).F().Error("FAILED to drop replica. Need to have host to drop. hostToDrop: %s", hostToDrop.GetName())
return nil
diff --git a/pkg/controller/chi/worker-migrator.go b/pkg/controller/chi/worker-migrator.go
index f3b44a620..00acb7f44 100644
--- a/pkg/controller/chi/worker-migrator.go
+++ b/pkg/controller/chi/worker-migrator.go
@@ -16,6 +16,7 @@ package chi
import (
"context"
+
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/chop"
a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
@@ -86,7 +87,7 @@ func (w *worker) migrateTables(ctx context.Context, host *api.Host, opts *migrat
Info(
"Need to drop replica on host %d to shard %d in cluster %s",
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- w.dropReplica(ctx, host, NewDropReplicaOptions().SetForceDropUponStorageLoss())
+ w.dropZKReplica(ctx, host, NewDropReplicaOptions().SetForceDropUponStorageLoss())
}
w.a.V(1).
diff --git a/pkg/controller/chi/worker-monitoring.go b/pkg/controller/chi/worker-monitoring.go
index ef837d06e..a3f2efab7 100644
--- a/pkg/controller/chi/worker-monitoring.go
+++ b/pkg/controller/chi/worker-monitoring.go
@@ -19,34 +19,70 @@ import (
a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
)
-// excludeFromMonitoring excludes stopped CHI from monitoring
-func (w *worker) excludeFromMonitoring(chi *api.ClickHouseInstallation) {
- if !chi.IsStopped() {
- // No need to exclude non-stopped CHI
+// addHostToMonitoring adds a single host to monitoring.
+// Used during reconcile to enable monitoring for individual hosts as they become ready.
+func (w *worker) addHostToMonitoring(host *api.Host) {
+ if host.GetCR().IsStopped() {
return
}
- // CR is stopped, let's exclude it from monitoring
- // because it makes no sense to send SQL requests to stopped instances
- w.a.V(1).
- WithEvent(chi, a.EventActionReconcile, a.EventReasonReconcileInProgress).
- WithAction(chi).
- M(chi).F().
- Info("exclude CHI from monitoring")
- w.c.deleteWatch(chi)
+ w.a.V(1).M(host).F().Info("add host to monitoring: %s", host.Runtime.Address.FQDN)
+ w.c.addHostWatch(host)
}
-// addToMonitoring adds CHI to monitoring
-func (w *worker) addToMonitoring(chi *api.ClickHouseInstallation) {
- if chi.IsStopped() {
- // No need to add stopped CHI
+// prepareMonitoring prepares monitoring state before reconcile begins.
+// For stopped CR - excludes from monitoring.
+// For running CR with ancestor - preserves old topology in monitoring.
+// For new running CR - allocates an empty slot in monitoring index.
+func (w *worker) prepareMonitoring(cr *api.ClickHouseInstallation) {
+
+ if cr.IsStopped() {
+ // CR is stopped
+ // Exclude it from monitoring cause it makes no sense to send SQL requests to stopped instances
+
+ w.a.V(1).
+ WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress).
+ WithAction(cr).
+ M(cr).F().
+ Info("exclude CHI from monitoring")
+ w.c.deleteWatch(cr)
+ } else {
+ // CR is NOT stopped, it is running
+ // Ensure CR is registered in monitoring
+ w.a.V(1).
+ WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress).
+ WithAction(cr).
+ M(cr).F().
+ Info("ensure CHI in monitoring")
+
+ if cr.HasAncestor() {
+ // Ensure CR is watched
+ w.c.updateWatch(cr.GetAncestorT())
+ } else {
+ // CR is a new one - allocate monitoring
+ w.c.allocateWatch(cr)
+ }
+ }
+}
+
+// addToMonitoring adds CR to monitoring
+func (w *worker) addToMonitoring(cr *api.ClickHouseInstallation) {
+ // Important
+ // Include into monitoring RUN-ning CR
+ // Stopped CR is not touched
+
+ if cr.IsStopped() {
+ // No need to add stopped CR
return
}
+ // CR is running
+ // Include it into monitoring
+
w.a.V(1).
- WithEvent(chi, a.EventActionReconcile, a.EventReasonReconcileInProgress).
- WithAction(chi).
- M(chi).F().
+ WithEvent(cr, a.EventActionReconcile, a.EventReasonReconcileInProgress).
+ WithAction(cr).
+ M(cr).F().
Info("add CHI to monitoring")
- w.c.updateWatch(chi)
+ w.c.updateWatch(cr)
}
diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go
index c08f621d3..22c9bcf8e 100644
--- a/pkg/controller/chi/worker-reconciler-chi.go
+++ b/pkg/controller/chi/worker-reconciler-chi.go
@@ -57,24 +57,48 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal
w.a.M(new).S().P()
defer w.a.M(new).E().P()
- metrics.CHIInitZeroValues(ctx, new)
- metrics.CHIReconcilesStarted(ctx, new)
+ metrics.CRInitZeroValues(ctx, new)
+ metrics.CRReconcilesStarted(ctx, new)
startTime := time.Now()
new = w.buildCR(ctx, new)
switch {
+ case new.Spec.Suspend.Value():
+ // if CR is suspended, should skip reconciliation
+ w.a.M(new).F().Info("Suspended CR")
+ if new.EnsureStatus().GetStatus() == api.StatusInProgress || new.EnsureRuntime().ActionPlan.HasActionsToDo() {
+ // Either was mid-reconcile when suspended, or has pending changes suppressed by suspend — mark as Aborted
+ new.EnsureStatus().ReconcileAbort()
+ _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ CopyStatusFieldGroup: types.CopyStatusFieldGroup{
+ FieldGroupMain: true,
+ },
+ },
+ })
+ w.a.V(1).
+ WithEvent(new, a.EventActionReconcile, a.EventReasonReconcileFailed).
+ WithAction(new).
+ M(new).F().
+ Warning("reconcile aborted due to suspend")
+ metrics.CRReconcilesAborted(ctx, new)
+ } else {
+ metrics.CRReconcilesCompleted(ctx, new)
+ }
+ return nil
case new.EnsureRuntime().ActionPlan.HasActionsToDo():
w.a.M(new).F().Info("ActionPlan has actions - continue reconcile")
case w.isAfterFinalizerInstalled(new.GetAncestorT(), new):
w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2")
default:
w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile")
+ metrics.CRReconcilesCompleted(ctx, new)
return nil
}
w.markReconcileStart(ctx, new)
- w.excludeFromMonitoring(new)
+ w.prepareMonitoring(new)
w.setHostStatusesPreliminary(ctx, new)
if err := w.reconcile(ctx, new); err != nil {
@@ -86,7 +110,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal
err = common.ErrCRUDAbort
w.markReconcileCompletedUnsuccessfully(ctx, new, err)
if errors.Is(err, common.ErrCRUDAbort) {
- metrics.CHIReconcilesAborted(ctx, new)
+ metrics.CRReconcilesAborted(ctx, new)
}
} else {
// Reconcile successful
@@ -97,13 +121,14 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstal
}
w.clean(ctx, new)
- w.dropReplicas(ctx, new)
w.addToMonitoring(new)
w.waitForIPAddresses(ctx, new)
w.finalizeReconcileAndMarkCompleted(ctx, new)
- metrics.CHIReconcilesCompleted(ctx, new)
- metrics.CHIReconcilesTimings(ctx, new, time.Since(startTime).Seconds())
+ w.dropZKReplicas(ctx, new)
+
+ metrics.CRReconcilesCompleted(ctx, new)
+ metrics.CRReconcilesTimings(ctx, new, time.Since(startTime).Seconds())
}
return nil
@@ -182,7 +207,7 @@ func (w *worker) logSWVersion(ctx context.Context, cr *api.ClickHouseInstallatio
l.M(host).Info("Host software version: %s %s", host.GetName(), host.Runtime.Version.Render())
return nil
})
- l.M(cr).Info("CR software versions [min, max]: %s %s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render())
+ l.M(cr).Info("CR software versions min=%s max=%s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render())
}
// reconcile reconciles Custom Resource
@@ -256,7 +281,7 @@ func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomReso
defer log.V(2).F().E().Info("second stage")
if cr.IsStopped() {
- // Stopped CHI must have no entry point
+ // Stopped CR must have no entry point
return nil
}
@@ -405,6 +430,7 @@ func (w *worker) hostForceRestart(ctx context.Context, host *api.Host, opts *sta
}
metrics.HostReconcilesRestart(ctx, host.GetCR())
+
return nil
}
@@ -732,6 +758,9 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error {
},
})
+ // Host reconcile completed successfully - add it to monitoring
+ w.addHostToMonitoring(host)
+
metrics.HostReconcilesCompleted(ctx, host.GetCR())
metrics.HostReconcilesTimings(ctx, host.GetCR(), time.Since(startTime).Seconds())
@@ -770,15 +799,26 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
w.a.V(1).M(host).F().Info("Reconcile PVCs and data loss for host: %s", host.GetName())
- // In case data loss or volumes missing detected we may need to specify additional reconcile options
+ // In case data loss or volumes missing detected we may
+ // 1. need to specify additional reconcile options
+ // 2. abort the reconcile completely
err := w.reconcileHostPVCs(ctx, host)
+ onDataLoss := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnDataLoss
switch {
case storage.ErrIsDataLoss(err):
+ if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort {
+ w.a.V(1).M(host).F().Warning("Data loss detected for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName())
+ return common.ErrCRUDAbort
+ }
stsReconcileOpts, migrateTableOpts = w.hostPVCsDataLossDetectedOptions(host)
w.a.V(1).
M(host).F().
Info("Data loss detected for host: %s.", host.GetName())
case storage.ErrIsVolumeMissed(err):
+ if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort {
+ w.a.V(1).M(host).F().Warning("Data volume missed for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName())
+ return common.ErrCRUDAbort
+ }
// stsReconcileOpts, migrateTableOpts = w.hostPVCsDataVolumeMissedDetectedOptions(host)
stsReconcileOpts, migrateTableOpts = w.hostPVCsDataLossDetectedOptions(host)
w.a.V(1).
diff --git a/pkg/controller/chi/worker-reconciler-helper.go b/pkg/controller/chi/worker-reconciler-helper.go
index 4896ca123..25aaafee8 100644
--- a/pkg/controller/chi/worker-reconciler-helper.go
+++ b/pkg/controller/chi/worker-reconciler-helper.go
@@ -23,7 +23,6 @@ import (
"github.com/altinity/clickhouse-operator/pkg/apis/swversion"
"github.com/altinity/clickhouse-operator/pkg/controller/common"
"github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
- "github.com/altinity/clickhouse-operator/pkg/util"
)
func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *swversion.SoftWareVersion {
@@ -70,8 +69,8 @@ func (w *worker) isHostSoftwareAbleToRespond(ctx context.Context, host *api.Host
// getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shards reconcile
func (w *worker) getReconcileShardsWorkersNum(cluster *api.Cluster, opts *common.ReconcileShardsAndHostsOptions) int {
- availableWorkers := float64(cluster.Reconcile.Runtime.ReconcileShardsThreadsNumber)
- maxConcurrencyPercent := float64(cluster.Reconcile.Runtime.ReconcileShardsMaxConcurrencyPercent)
+ availableWorkers := float64(cluster.GetReconcile().Runtime.ReconcileShardsThreadsNumber)
+ maxConcurrencyPercent := float64(cluster.GetReconcile().Runtime.ReconcileShardsMaxConcurrencyPercent)
_100Percent := float64(100)
shardsNum := float64(len(cluster.Layout.Shards))
@@ -148,44 +147,6 @@ func (w *worker) runConcurrently(ctx context.Context, workersNum int, startShard
return err
}
-func (w *worker) runConcurrentlyInBatches(ctx context.Context, workersNum int, start int, shards []*api.ChiShard) error {
- for startShardIndex := 0; startShardIndex < len(shards); startShardIndex += workersNum {
- endShardIndex := util.IncTopped(startShardIndex, workersNum, len(shards))
- concurrentlyProcessedShards := shards[startShardIndex:endShardIndex]
- w.a.V(1).Info("Starting shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex)
-
- // Processing error protected with mutex
- var err error
- var errLock sync.Mutex
-
- wg := sync.WaitGroup{}
- wg.Add(len(concurrentlyProcessedShards))
- // Launch shard concurrent processing
- for j := range concurrentlyProcessedShards {
- shard := concurrentlyProcessedShards[j]
- w.a.V(1).Info("Starting shard on worker. Shard index: %d", start+startShardIndex+j)
- go func() {
- defer wg.Done()
- w.a.V(1).Info("Starting shard on goroutine. Shard index: %d", start+startShardIndex+j)
- if e := w.reconcileShardWithHosts(ctx, shard); e != nil {
- errLock.Lock()
- err = e
- errLock.Unlock()
- }
- w.a.V(1).Info("Finished shard on goroutine. Shard index: %d", start+startShardIndex+j)
- }()
- }
- w.a.V(1).Info("Starting to wait shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex)
- wg.Wait()
- w.a.V(1).Info("Finished to wait shards from index: %d on workers. Shards indexes [%d:%d)", start+startShardIndex, start+startShardIndex, start+endShardIndex)
- if err != nil {
- w.a.V(1).Warning("Skipping rest of shards due to an error: %v", err)
- return err
- }
- }
- return nil
-}
-
func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) (*statefulset.ReconcileOptions, *migrateTableOptions) {
w.a.V(1).
M(host).F().
diff --git a/pkg/controller/chi/worker-wait-exclude-include-restart.go b/pkg/controller/chi/worker-wait-exclude-include-restart.go
index 743a06e65..370aa2867 100644
--- a/pkg/controller/chi/worker-wait-exclude-include-restart.go
+++ b/pkg/controller/chi/worker-wait-exclude-include-restart.go
@@ -22,6 +22,7 @@ import (
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/chop"
+ a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain"
"github.com/altinity/clickhouse-operator/pkg/interfaces"
@@ -29,25 +30,25 @@ import (
)
// waitForIPAddresses waits for all pods to get IP address assigned
-func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInstallation) {
+func (w *worker) waitForIPAddresses(ctx context.Context, cr *api.ClickHouseInstallation) {
if util.IsContextDone(ctx) {
- log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", chi.GetName())
+ log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", cr.GetName())
return
}
- if chi.IsStopped() {
+ if cr.IsStopped() {
// No need to wait for stopped CHI
return
}
- l := w.a.V(1).M(chi)
+ l := w.a.V(1).M(cr)
l.F().S().Info("wait for IP addresses to be assigned to all pods")
// Let's limit polling time
start := time.Now()
timeout := 1 * time.Minute
- w.c.poll(ctx, chi, func(c *api.ClickHouseInstallation, e error) bool {
+ w.c.poll(ctx, cr, func(c *api.ClickHouseInstallation, e error) bool {
// TODO fix later
// status IPs list can be empty
// Instead of doing in status:
@@ -55,7 +56,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInst
// cur.EnsureStatus().SetPodIPs(podIPs)
// and here
// c.Status.GetPodIPs()
- podIPs := w.c.getPodsIPs(ctx, chi)
+ podIPs := w.c.getPodsIPs(ctx, cr)
if len(podIPs) >= len(c.Status.GetPods()) {
l.Info("all IP addresses are in place")
// Stop polling
@@ -321,14 +322,19 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error
w.a.V(1).
M(host).F().
+ WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileInProgress).
Info("Wait for host to catch replication lag - START "+
"Host/shard/cluster: %d/%d/%s",
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ // Host is alive but catching up - add to monitoring so metrics are collected during the wait
+ w.addHostToMonitoring(host)
+
err := w.waitHostHasNoReplicationDelay(ctx, host)
if err == nil {
w.a.V(1).
M(host).F().
+ WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileCompleted).
Info("Wait for host to catch replication lag - COMPLETED "+
"Host/shard/cluster: %d/%d/%s",
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName,
@@ -338,6 +344,7 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error
} else {
w.a.V(1).
M(host).F().
+ WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileFailed).
Info("Wait for host to catch replication lag - FAILED "+
"Host/shard/cluster: %d/%d/%s"+
"err: %v ",
diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go
index e13a17864..7d202baed 100644
--- a/pkg/controller/chi/worker.go
+++ b/pkg/controller/chi/worker.go
@@ -22,6 +22,8 @@ import (
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "github.com/altinity/queue"
+
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/common/types"
@@ -43,7 +45,6 @@ import (
commonNormalizer "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
"github.com/altinity/clickhouse-operator/pkg/model/managers"
"github.com/altinity/clickhouse-operator/pkg/util"
- "github.com/altinity/queue"
)
// FinalizerName specifies name of the finalizer to be used with CHI
@@ -302,11 +303,11 @@ func (w *worker) updateCHI(ctx context.Context, old, new *api.ClickHouseInstalla
new = n.(*api.ClickHouseInstallation)
}
- metrics.CHIRegister(ctx, new)
+ metrics.CRRegister(ctx, new)
if w.deleteCHI(ctx, old, new) {
// CHI is being deleted
- metrics.CHIUnregister(ctx, new)
+ metrics.CRUnregister(ctx, new)
return nil
}
@@ -363,7 +364,7 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *api
w.a.V(1).M(_cr).F().S().Info("finalize reconcile")
- // Update CHI object
+ // Update CR object
_ = w.finalizeCR(
ctx,
_cr,
@@ -528,19 +529,19 @@ func (w *worker) logHosts(cr api.ICustomResource) {
})
}
-func (w *worker) createTemplatedCR(_chi *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation {
- l := w.a.V(1).M(_chi).F()
+func (w *worker) createTemplatedCR(_cr *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation {
+ l := w.a.V(1).M(_cr).F()
- if _chi.HasAncestor() {
- l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_chi))
+ if _cr.HasAncestor() {
+ l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_cr))
} else {
- l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_chi))
+ l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_cr))
}
- chi := w.createTemplated(_chi, _opts...)
- chi.SetAncestor(w.createTemplated(_chi.GetAncestorT()))
+ cr := w.createTemplated(_cr, _opts...)
+ cr.SetAncestor(w.createTemplated(_cr.GetAncestorT()))
- return chi
+ return cr
}
func (w *worker) createTemplated(c *api.ClickHouseInstallation, _opts ...*commonNormalizer.Options[api.ClickHouseInstallation]) *api.ClickHouseInstallation {
@@ -548,6 +549,6 @@ func (w *worker) createTemplated(c *api.ClickHouseInstallation, _opts ...*common
if len(_opts) > 0 {
opts = _opts[0]
}
- chi, _ := w.normalizer.CreateTemplated(c, opts)
- return chi
+ cr, _ := w.normalizer.CreateTemplated(c, opts)
+ return cr
}
diff --git a/pkg/controller/chk/controller.go b/pkg/controller/chk/controller.go
index 2fc21653e..2456720b3 100644
--- a/pkg/controller/chk/controller.go
+++ b/pkg/controller/chk/controller.go
@@ -16,6 +16,7 @@ package chk
import (
"context"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
"time"
log "github.com/altinity/clickhouse-operator/pkg/announcer"
@@ -105,3 +106,13 @@ func (c *Controller) poll(ctx context.Context, cr api.ICustomResource, f func(c
}
}
}
+
+func ShouldEnqueue(cr *apiChk.ClickHouseKeeperInstallation) bool {
+ ns := cr.GetNamespace()
+ if !chop.Config().IsNamespaceWatched(ns) {
+ log.V(2).M(cr).Info("skip enqueue, namespace '%s' is not watched or is in deny list", ns)
+ return false
+ }
+
+ return true
+}
diff --git a/pkg/controller/chk/metrics/interface.go b/pkg/controller/chk/metrics/interface.go
new file mode 100644
index 000000000..1fb2d5182
--- /dev/null
+++ b/pkg/controller/chk/metrics/interface.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+type labelsSource interface {
+ GetName() string
+ GetNamespace() string
+ GetLabels() map[string]string
+ GetAnnotations() map[string]string
+}
diff --git a/pkg/controller/chk/metrics/pkg.go b/pkg/controller/chk/metrics/pkg.go
new file mode 100644
index 000000000..ba890c523
--- /dev/null
+++ b/pkg/controller/chk/metrics/pkg.go
@@ -0,0 +1,55 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "context"
+)
+
+func CRInitZeroValues(ctx context.Context, src labelsSource) {
+}
+
+func CRReconcilesStarted(ctx context.Context, src labelsSource) {
+}
+func CRReconcilesCompleted(ctx context.Context, src labelsSource) {
+}
+func CRReconcilesAborted(ctx context.Context, src labelsSource) {
+}
+func CRReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) {
+}
+
+func HostReconcilesStarted(ctx context.Context, src labelsSource) {
+}
+func HostReconcilesCompleted(ctx context.Context, src labelsSource) {
+}
+func HostReconcilesRestart(ctx context.Context, src labelsSource) {
+}
+func HostReconcilesErrors(ctx context.Context, src labelsSource) {
+}
+func HostReconcilesTimings(ctx context.Context, src labelsSource, seconds float64) {
+}
+
+func PodAdd(ctx context.Context) {
+}
+func PodUpdate(ctx context.Context) {
+}
+func PodDelete(ctx context.Context) {
+}
+
+func CRRegister(ctx context.Context, src labelsSource) {
+}
+
+func CRUnregister(ctx context.Context, src labelsSource) {
+}
diff --git a/pkg/controller/chk/worker-app-version.go b/pkg/controller/chk/worker-app-version.go
new file mode 100644
index 000000000..3593be3ec
--- /dev/null
+++ b/pkg/controller/chk/worker-app-version.go
@@ -0,0 +1,29 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+)
+
+func (w *worker) getTagBasedVersion(host *api.Host) *swversion.SoftWareVersion {
+ // Fetch tag from the image
+ var tagBasedVersion *swversion.SoftWareVersion
+ if tag, tagFound := w.task.Creator().GetAppImageTag(host); tagFound {
+ tagBasedVersion = swversion.NewSoftWareVersionFromTag(tag)
+ }
+ return tagBasedVersion
+}
diff --git a/pkg/controller/chk/worker-config-map.go b/pkg/controller/chk/worker-config-map.go
index 515d8f313..c2dc6e05b 100644
--- a/pkg/controller/chk/worker-config-map.go
+++ b/pkg/controller/chk/worker-config-map.go
@@ -21,7 +21,6 @@ import (
core "k8s.io/api/core/v1"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
"github.com/altinity/clickhouse-operator/pkg/util"
@@ -33,11 +32,6 @@ func (w *worker) reconcileConfigMap(
cr apiChi.ICustomResource,
configMap *core.ConfigMap,
) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
w.a.V(2).M(cr).S().P()
defer w.a.V(2).M(cr).E().P()
@@ -67,11 +61,6 @@ func (w *worker) reconcileConfigMap(
// updateConfigMap
func (w *worker) updateConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
updatedConfigMap, err := w.c.updateConfigMap(ctx, configMap)
if err == nil {
w.a.V(1).
@@ -95,11 +84,6 @@ func (w *worker) updateConfigMap(ctx context.Context, cr apiChi.ICustomResource,
// createConfigMap
func (w *worker) createConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
err := w.c.createConfigMap(ctx, configMap)
if err == nil {
w.a.V(1).
diff --git a/pkg/controller/chk/worker-deleter.go b/pkg/controller/chk/worker-deleter.go
index df73fa53a..c44149a4e 100644
--- a/pkg/controller/chk/worker-deleter.go
+++ b/pkg/controller/chk/worker-deleter.go
@@ -30,7 +30,7 @@ import (
func (w *worker) clean(ctx context.Context, cr api.ICustomResource) {
if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
+ log.V(1).Info("Reconcile clean is aborted. CR: %s ", cr.GetName())
return
}
@@ -45,9 +45,9 @@ func (w *worker) clean(ctx context.Context, cr api.ICustomResource) {
w.a.V(1).M(cr).F().Info("List of successfully reconciled objects:\n%s", w.task.RegistryReconciled())
objs := w.c.discovery(ctx, cr)
need := w.task.RegistryReconciled()
- w.a.V(1).M(cr).F().Info("Existing objects:\n%s", objs)
+ w.a.V(1).M(cr).F().Info("List of existing objects:\n%s", objs)
objs.Subtract(need)
- w.a.V(1).M(cr).F().Info("Non-reconciled objects:\n%s", objs)
+ w.a.V(1).M(cr).F().Info("List of non-reconciled objects:\n%s", objs)
if w.purge(ctx, cr, objs, w.task.RegistryFailed()) > 0 {
util.WaitContextDoneOrTimeout(ctx, 1*time.Minute)
}
@@ -63,7 +63,7 @@ func (w *worker) purge(
reconcileFailedObjs *model.Registry,
) (cnt int) {
if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
+ log.V(1).Info("Purge is aborted. CR: %s ", cr.GetName())
return cnt
}
diff --git a/pkg/controller/chk/worker-monitoring.go b/pkg/controller/chk/worker-monitoring.go
new file mode 100644
index 000000000..a30cfc8b4
--- /dev/null
+++ b/pkg/controller/chk/worker-monitoring.go
@@ -0,0 +1,27 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+)
+
+// prepareMonitoring prepares monitoring state before reconcile begins
+func (w *worker) prepareMonitoring(cr *api.ClickHouseKeeperInstallation) {
+}
+
+// addToMonitoring adds CR to monitoring
+func (w *worker) addToMonitoring(cr *api.ClickHouseKeeperInstallation) {
+}
diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go
index c440dd23c..304002e80 100644
--- a/pkg/controller/chk/worker-reconciler-chk.go
+++ b/pkg/controller/chk/worker-reconciler-chk.go
@@ -17,7 +17,7 @@ package chk
import (
"context"
"errors"
- "sync"
+ "fmt"
"time"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,7 +26,7 @@ import (
apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/common/types"
- "github.com/altinity/clickhouse-operator/pkg/controller/chi/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chk/metrics"
"github.com/altinity/clickhouse-operator/pkg/controller/common"
a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
"github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
@@ -46,24 +46,60 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee
common.LogOldAndNew("non-normalized yet (native)", old, new)
+ switch {
+ case w.isAfterFinalizerInstalled(old, new):
+ w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-1")
+ case w.isGenerationTheSame(old, new):
+ log.V(2).M(new).F().Info("isGenerationTheSame() - nothing to do here, exit")
+ return nil
+ }
+
w.a.M(new).S().P()
defer w.a.M(new).E().P()
- new = w.buildCR(ctx, new)
+ metrics.CRInitZeroValues(ctx, new)
+ metrics.CRReconcilesStarted(ctx, new)
+ startTime := time.Now()
- actionPlan := api.MakeActionPlan(new.GetAncestorT(), new)
- w.a.M(new).V(1).Info(actionPlan.Log("buildCR"))
+ new = w.buildCR(ctx, new)
switch {
- case actionPlan.HasActionsToDo():
+ case new.Spec.Suspend.Value():
+ // if CR is suspended, should skip reconciliation
+ w.a.M(new).F().Info("Suspended CR")
+ if new.EnsureStatus().GetStatus() == api.StatusInProgress || new.EnsureRuntime().ActionPlan.HasActionsToDo() {
+ // Either was mid-reconcile when suspended, or has pending changes suppressed by suspend — mark as Aborted
+ new.EnsureStatus().ReconcileAbort()
+ _ = w.c.updateCRObjectStatus(ctx, new, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ CopyStatusFieldGroup: types.CopyStatusFieldGroup{
+ FieldGroupMain: true,
+ },
+ },
+ })
+ w.a.V(1).
+ WithEvent(new, a.EventActionReconcile, a.EventReasonReconcileFailed).
+ WithAction(new).
+ M(new).F().
+ Warning("reconcile aborted due to suspend")
+ metrics.CRReconcilesAborted(ctx, new)
+ } else {
+ metrics.CRReconcilesCompleted(ctx, new)
+ }
+ return nil
+ case new.EnsureRuntime().ActionPlan.HasActionsToDo():
w.a.M(new).F().Info("ActionPlan has actions - continue reconcile")
+ case w.isAfterFinalizerInstalled(new.GetAncestorT(), new):
+ w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2")
default:
w.a.M(new).F().Info("ActionPlan has no actions - abort reconcile")
+ metrics.CRReconcilesCompleted(ctx, new)
return nil
}
- w.markReconcileStart(ctx, new, actionPlan)
- w.setHostStatusesPreliminary(ctx, new, actionPlan)
+ w.markReconcileStart(ctx, new)
+ w.prepareMonitoring(new)
+ w.setHostStatusesPreliminary(ctx, new)
if err := w.reconcile(ctx, new); err != nil {
// Something went wrong
@@ -74,6 +110,7 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee
err = common.ErrCRUDAbort
w.markReconcileCompletedUnsuccessfully(ctx, new, err)
if errors.Is(err, common.ErrCRUDAbort) {
+ metrics.CRReconcilesAborted(ctx, new)
}
} else {
// Reconcile successful
@@ -84,8 +121,12 @@ func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKee
}
w.clean(ctx, new)
+ w.addToMonitoring(new)
w.waitForIPAddresses(ctx, new)
w.finalizeReconcileAndMarkCompleted(ctx, new)
+
+ metrics.CRReconcilesCompleted(ctx, new)
+ metrics.CRReconcilesTimings(ctx, new, time.Since(startTime).Seconds())
}
return nil
@@ -114,6 +155,11 @@ func (w *worker) buildCR(ctx context.Context, _cr *apiChk.ClickHouseKeeperInstal
w.fillCurSTS(ctx, cr)
w.logSWVersion(ctx, cr)
+ actionPlan := api.MakeActionPlan(cr.GetAncestorT(), cr)
+ cr.EnsureRuntime().ActionPlan = actionPlan
+ cr.EnsureStatus().SetActionPlan(actionPlan)
+ w.a.V(1).M(cr).Info(actionPlan.Log("buildCR"))
+
return cr
}
@@ -154,7 +200,7 @@ func (w *worker) logSWVersion(ctx context.Context, cr *apiChk.ClickHouseKeeperIn
l.M(host).Info("Host software version: %s %s", host.GetName(), host.Runtime.Version.Render())
return nil
})
- l.M(cr).Info("CR software versions [min, max]: %s %s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render())
+ l.M(cr).Info("CR software versions min=%s max=%s", cr.GetMinVersion().Render(), cr.GetMaxVersion().Render())
}
// reconcile reconciles Custom Resource
@@ -180,7 +226,6 @@ func (w *worker) reconcile(ctx context.Context, cr *apiChk.ClickHouseKeeperInsta
ctx,
w.reconcileCRAuxObjectsPreliminary,
w.reconcileCluster,
- w.reconcileShardsAndHosts,
w.reconcileCRAuxObjectsFinal,
)
}
@@ -240,7 +285,7 @@ func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomReso
defer log.V(2).F().E().Info("second stage")
if cr.IsStopped() {
- // Stopped CHI must have no entry point
+ // Stopped CR must have no entry point
return nil
}
@@ -280,7 +325,7 @@ func (w *worker) reconcileCRAuxObjectsFinal(ctx context.Context, cr *apiChk.Clic
}
func (w *worker) includeAllHostsIntoCluster(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) {
- // Not appropriate
+ // Not applicable
}
// reconcileConfigMapCommon reconciles common ConfigMap
@@ -341,22 +386,16 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o
log.V(1).M(host).F().S().Info("reconcile StatefulSet start")
defer log.V(1).M(host).F().E().Info("reconcile StatefulSet end")
- version := w.getHostSoftwareVersion(ctx, host)
- host.Runtime.CurStatefulSet, _ = w.c.kube.STS().Get(ctx, host)
+ w.a.V(1).M(host).F().Info("Reconcile host STS: %s. App version: %s", host.GetName(), host.Runtime.Version.Render())
- w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version)
- // In case we have to force-restart host
- // We'll do it via replicas: 0 in StatefulSet.
+ // Start with force-restart host
if w.shouldForceRestartHost(ctx, host) {
- w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName())
- w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true)
- _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts)
- metrics.HostReconcilesRestart(ctx, host.GetCR())
- // At this moment StatefulSet has 0 replicas.
- // First stage of RollingUpdate completed.
+ w.a.V(1).M(host).F().Info("Reconcile host STS force restart: %s", host.GetName())
+ _ = w.hostForceRestart(ctx, host, opts)
}
- w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, host.IsStopped())
+ opts = w.prepareStsReconcileOptsWaitSection(host, opts)
// We are in place, where we can reconcile StatefulSet to desired configuration.
w.a.V(1).M(host).F().Info("Reconcile host STS: %s. Reconcile StatefulSet", host.GetName())
@@ -381,6 +420,36 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o
return err
}
+func (w *worker) hostForceRestart(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error {
+ w.a.V(1).M(host).F().Info("Reconcile host. Force restart: %s", host.GetName())
+
+ if host.IsStopped() || (w.hostSoftwareRestart(ctx, host) != nil) {
+ _ = w.hostScaleDown(ctx, host, opts)
+ }
+
+ metrics.HostReconcilesRestart(ctx, host.GetCR())
+
+ return nil
+}
+
+func (w *worker) hostSoftwareRestart(ctx context.Context, host *api.Host) error {
+ return fmt.Errorf("inapplicable so far")
+}
+
+func (w *worker) hostScaleDown(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error {
+ w.a.V(1).M(host).F().Info("Reconcile host. Host shutdown via scale down: %s", host.GetName())
+
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true)
+ err := w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts)
+ if err != nil {
+ w.a.V(1).M(host).F().Info("Host shutdown abort 1. Host: %s err: %v", host.GetName(), err)
+ return err
+ }
+
+ w.a.V(1).M(host).F().Info("Host shutdown success. Host: %s", host.GetName())
+ return nil
+}
+
// reconcileHostService reconciles host's Service
func (w *worker) reconcileHostService(ctx context.Context, host *api.Host) error {
service := w.task.Creator().CreateService(interfaces.ServiceHost, host).First()
@@ -419,6 +488,9 @@ func (w *worker) reconcileCluster(ctx context.Context, cluster *apiChk.Cluster)
if err := w.reconcileClusterPodDisruptionBudget(ctx, cluster); err != nil {
return err
}
+ if err := w.reconcileClusterShardsAndHosts(ctx, cluster); err != nil {
+ return err
+ }
return nil
}
@@ -454,15 +526,17 @@ func (w *worker) reconcileClusterPodDisruptionBudget(ctx context.Context, cluste
return nil
}
-// reconcileShardsAndHosts reconciles shards and hosts of each shard
-func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.ChkShard) error {
+// reconcileClusterShardsAndHosts reconciles shards and hosts of each shard
+func (w *worker) reconcileClusterShardsAndHosts(ctx context.Context, cluster *apiChk.Cluster) error {
+ shards := cluster.Layout.Shards[:]
+
// Sanity check - has to have shard(s)
if len(shards) == 0 {
return nil
}
- log.V(1).F().S().Info("reconcileShardsAndHosts start")
- defer log.V(1).F().E().Info("reconcileShardsAndHosts end")
+ log.V(1).F().S().Info("reconcileClusterShardsAndHosts start")
+ defer log.V(1).F().E().Info("reconcileClusterShardsAndHosts end")
opts := w.reconcileShardsAndHostsFetchOpts(ctx)
@@ -482,45 +556,17 @@ func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.C
return err
}
- // Since shard with 0 index is already done, we'll proceed with the 1-st
+ // Since shard with 0 index is already done, we'll proceed concurrently starting with the 1-st
startShard = 1
}
// Process shards using specified concurrency level while maintaining specified max concurrency percentage.
// Loop over shards.
- workersNum := w.getReconcileShardsWorkersNum(shards, opts)
- w.a.V(1).Info("Starting rest of shards on workers: %d", workersNum)
- for startShardIndex := startShard; startShardIndex < len(shards); startShardIndex += workersNum {
- endShardIndex := startShardIndex + workersNum
- if endShardIndex > len(shards) {
- endShardIndex = len(shards)
- }
- concurrentlyProcessedShards := shards[startShardIndex:endShardIndex]
-
- // Processing error protected with mutex
- var err error
- var errLock sync.Mutex
-
- wg := sync.WaitGroup{}
- wg.Add(len(concurrentlyProcessedShards))
- // Launch shard concurrent processing
- for j := range concurrentlyProcessedShards {
- shard := concurrentlyProcessedShards[j]
- go func() {
- defer wg.Done()
- if e := w.reconcileShardWithHosts(ctx, shard); e != nil {
- errLock.Lock()
- err = e
- errLock.Unlock()
- return
- }
- }()
- }
- wg.Wait()
- if err != nil {
- w.a.V(1).Warning("Skipping rest of shards due to an error: %v", err)
- return err
- }
+ workersNum := w.getReconcileShardsWorkersNum(cluster, opts)
+ w.a.V(1).Info("Starting rest of shards on workers. Workers num: %d", workersNum)
+ if err := w.runConcurrently(ctx, workersNum, startShard, shards[startShard:]); err != nil {
+ w.a.V(1).Info("Finished with ERROR rest of shards on workers: %d, err: %v", workersNum, err)
+ return err
}
w.a.V(1).Info("Finished successfully rest of shards on workers: %d", workersNum)
return nil
@@ -564,13 +610,15 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error {
w.a.V(2).M(host).S().P()
defer w.a.V(2).M(host).E().P()
+ metrics.HostReconcilesStarted(ctx, host.GetCR())
+ startTime := time.Now()
+
if host.IsFirstInCR() {
_ = w.reconcileCRServicePreliminary(ctx, host.GetCR())
defer w.reconcileCRServiceFinal(ctx, host.GetCR())
}
- // Create artifacts
- w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), host.Runtime.Version.Render())
if err := w.reconcileHostPrepare(ctx, host); err != nil {
return err
@@ -608,6 +656,10 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error {
},
},
})
+
+ metrics.HostReconcilesCompleted(ctx, host.GetCR())
+ metrics.HostReconcilesTimings(ctx, host.GetCR(), time.Since(startTime).Seconds())
+
return nil
}
@@ -629,12 +681,9 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
stsReconcileOpts *statefulset.ReconcileOptions
)
- //if !host.IsLast() {
- // stsReconcileOpts = stsReconcileOpts.SetDoNotWait()
- //}
-
// Reconcile ConfigMap
if err := w.reconcileConfigMapHost(ctx, host); err != nil {
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
w.a.V(1).
M(host).F().
Warning("Reconcile Host Main - unable to reconcile ConfigMap. Host: %s Err: %v", host.GetName(), err)
@@ -645,15 +694,26 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
w.a.V(1).M(host).F().Info("Reconcile PVCs and data loss for host: %s", host.GetName())
- // In case data loss or volumes missing detected we may need to specify additional reconcile options
+ // In case data loss or volumes missing detected we may
+ // 1. need to specify additional reconcile options
+ // 2. abort the reconcile completely
err := w.reconcileHostPVCs(ctx, host)
+ onDataLoss := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnDataLoss
switch {
case storage.ErrIsDataLoss(err):
+ if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort {
+ w.a.V(1).M(host).F().Warning("Data loss detected for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName())
+ return common.ErrCRUDAbort
+ }
stsReconcileOpts = w.hostPVCsDataLossDetectedOptions(host)
w.a.V(1).
M(host).F().
Info("Data loss detected for host: %s.", host.GetName())
case storage.ErrIsVolumeMissed(err):
+ if onDataLoss == api.OnStatefulSetRecreateOnDataLossActionAbort {
+ w.a.V(1).M(host).F().Warning("Data volume missed for host: %s. Aborting reconcile as configured (onDataLoss: abort)", host.GetName())
+ return common.ErrCRUDAbort
+ }
// stsReconcileOpts, migrateTableOpts = w.hostPVCsDataVolumeMissedDetectedOptions(host)
stsReconcileOpts = w.hostPVCsDataLossDetectedOptions(host)
w.a.V(1).
@@ -670,6 +730,7 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
// Reconcile StatefulSet
if err := w.reconcileHostStatefulSet(ctx, host, stsReconcileOpts); err != nil {
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
w.a.V(1).
M(host).F().
Warning("Reconcile Host Main - unable to reconcile StatefulSet. Host: %s Err: %v", host.GetName(), err)
@@ -693,6 +754,22 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
return nil
}
+func (w *worker) prepareStsReconcileOptsWaitSection(host *api.Host, opts *statefulset.ReconcileOptions) *statefulset.ReconcileOptions {
+ if host.GetCluster().GetReconcile().Host.Wait.Probes.GetStartup().IsTrue() {
+ opts = opts.SetWaitUntilStarted()
+ w.a.V(1).
+ M(host).F().
+ Warning("Setting option SetWaitUntilStarted ")
+ }
+ if host.GetCluster().GetReconcile().Host.Wait.Probes.GetReadiness().IsTrue() {
+ opts = opts.SetWaitUntilReady()
+ w.a.V(1).
+ M(host).F().
+ Warning("Setting option SetWaitUntilReady")
+ }
+ return opts
+}
+
func (w *worker) reconcileHostPVCs(ctx context.Context, host *api.Host) storage.ErrorDataPersistence {
return storage.NewStorageReconciler(
w.task,
@@ -718,6 +795,14 @@ func (w *worker) reconcileHostMainDomain(ctx context.Context, host *api.Host) er
// reconcileHostIncludeIntoAllActivities includes specified ClickHouse host into all activities
func (w *worker) reconcileHostIncludeIntoAllActivities(ctx context.Context, host *api.Host) error {
+ if !w.shouldIncludeHost(host) {
+ w.a.V(1).
+ M(host).F().
+ Info("No need to include host into cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return nil
+ }
+
// Include host back into all activities - such as cluster, service, etc
err := w.includeHost(ctx, host)
if err != nil {
@@ -728,5 +813,24 @@ func (w *worker) reconcileHostIncludeIntoAllActivities(ctx context.Context, host
return err
}
+ l := w.a.V(1).
+ WithEvent(host.GetCR(), a.EventActionReconcile, a.EventReasonReconcileCompleted).
+ WithAction(host.GetCR()).
+ M(host).F()
+
+ // In case host is unable to report its version we are done with inclusion
+ switch {
+ case host.IsStopped():
+ l.Info("Reconcile Host completed. Host is stopped: %s", host.GetName())
+ return nil
+ case host.IsTroubleshoot():
+ l.Info("Reconcile Host completed. Host is in troubleshoot mode: %s", host.GetName())
+ return nil
+ }
+
+ // Report host software version
+ version := w.getHostSoftwareVersion(ctx, host)
+ l.Info("Reconcile Host completed. Host: %s ClickHouse version running: %s", host.GetName(), version.Render())
+
return nil
}
diff --git a/pkg/controller/chk/worker-reconciler-helper.go b/pkg/controller/chk/worker-reconciler-helper.go
index d912d2ae3..d3e289f5e 100644
--- a/pkg/controller/chk/worker-reconciler-helper.go
+++ b/pkg/controller/chk/worker-reconciler-helper.go
@@ -16,21 +16,28 @@ package chk
import (
"context"
- apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller/common"
- "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
+ "sync"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
)
func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) *swversion.SoftWareVersion {
+ // Try to report tag-based version
+ if tagBasedVersion := w.getTagBasedVersion(host); tagBasedVersion.IsKnown() {
+ // Able to report version from the tag
+ return tagBasedVersion.SetDescription("parsed from the tag: '%s'", tagBasedVersion.GetOriginal())
+ }
+
// Unable to acquire any version - report min one
- return swversion.MaxVersion().SetDescription("so far so")
+ return swversion.MinVersion().SetDescription("min - unable to acquire neither from the tag nor from the app")
}
// getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shard reconcile
-func (w *worker) getReconcileShardsWorkersNum(shards []*apiChk.ChkShard, opts *common.ReconcileShardsAndHostsOptions) int {
+func (w *worker) getReconcileShardsWorkersNum(cluster *apiChk.Cluster, opts *common.ReconcileShardsAndHostsOptions) int {
return 1
}
@@ -45,6 +52,56 @@ func (w *worker) reconcileShardsAndHostsFetchOpts(ctx context.Context) *common.R
}
}
+func (w *worker) runConcurrently(ctx context.Context, workersNum int, startShardIndex int, shards []*apiChk.ChkShard) error {
+ if len(shards) == 0 {
+ return nil
+ }
+
+ type shardReconcile struct {
+ shard *apiChk.ChkShard
+ index int
+ }
+
+ ch := make(chan *shardReconcile)
+ wg := sync.WaitGroup{}
+
+ // Launch tasks feeder
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer close(ch)
+ for i, shard := range shards {
+ ch <- &shardReconcile{
+ shard,
+ startShardIndex + i,
+ }
+ }
+ }()
+
+ // Launch workers
+ var err error
+ var errLock sync.Mutex
+ for i := 0; i < workersNum; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for rq := range ch {
+ w.a.V(1).Info("Starting shard index: %d on worker", rq.index)
+ if e := w.reconcileShardWithHosts(ctx, rq.shard); e != nil {
+ errLock.Lock()
+ err = e
+ errLock.Unlock()
+ }
+ }
+ }()
+ }
+
+ w.a.V(1).Info("Starting to wait shards from index: %d on workers.", startShardIndex)
+ wg.Wait()
+ w.a.V(1).Info("Finished to wait shards from index: %d on workers.", startShardIndex)
+ return err
+}
+
func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) *statefulset.ReconcileOptions {
w.a.V(1).
M(host).F().
@@ -53,6 +110,7 @@ func (w *worker) hostPVCsDataLossDetectedOptions(host *api.Host) *statefulset.Re
// In case of data loss detection on existing volumes, we need to:
// 1. recreate StatefulSet
// 2. run tables migration again
+
stsReconcileOpts := statefulset.NewReconcileStatefulSetOptions().SetForceRecreate()
return stsReconcileOpts
}
diff --git a/pkg/controller/chk/worker-service.go b/pkg/controller/chk/worker-service.go
index f0ae7b12c..602fb0c52 100644
--- a/pkg/controller/chk/worker-service.go
+++ b/pkg/controller/chk/worker-service.go
@@ -21,6 +21,7 @@ import (
core "k8s.io/api/core/v1"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
a "github.com/altinity/clickhouse-operator/pkg/controller/common/announcer"
"github.com/altinity/clickhouse-operator/pkg/util"
@@ -91,15 +92,19 @@ func (w *worker) updateService(
// spec.resourceVersion is required in order to update an object
newService.ResourceVersion = curService.ResourceVersion
- //
- // Migrate ClusterIP to the new service
- //
- // spec.clusterIP field is immutable, need to use already assigned value
- // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
- // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
- // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
- // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
- newService.Spec.ClusterIP = curService.Spec.ClusterIP
+ if newService.Spec.ClusterIP == core.ClusterIPNone {
+ // In case if new service has no ClusterIP requested, we'll keep it unassigned.
+ // Otherwise we need to migrate IP address assigned earlier to new service in order to reuse it
+ log.V(1).Info("switch service %s to IP-less mode. ClusterIP=None", util.NamespacedName(newService))
+ } else {
+ // Migrate assigned IP value - ClusterIP - to the new service
+ // spec.clusterIP field is immutable, need to use already assigned value
+ // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
+ // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
+ newService.Spec.ClusterIP = curService.Spec.ClusterIP
+ }
//
// Migrate existing ports to the new service for NodePort and LoadBalancer services
@@ -114,9 +119,13 @@ func (w *worker) updateService(
// No changes in service type is allowed.
// Already exposed port details can not be changed.
- serviceTypeIsNodePort := (curService.Spec.Type == core.ServiceTypeNodePort) && (newService.Spec.Type == core.ServiceTypeNodePort)
- serviceTypeIsLoadBalancer := (curService.Spec.Type == core.ServiceTypeLoadBalancer) && (newService.Spec.Type == core.ServiceTypeLoadBalancer)
- if serviceTypeIsNodePort || serviceTypeIsLoadBalancer {
+ // Service type of new and cur service is the same.
+ // In case it is not the same service has to be just recreated.
+ // So we can check for one type only - let's check for type of new service
+ typeIsNodePort := newService.Spec.Type == core.ServiceTypeNodePort
+ typeIsLoadBalancer := newService.Spec.Type == core.ServiceTypeLoadBalancer
+ if typeIsNodePort || typeIsLoadBalancer {
+ // Migrate cur ports to new service
for i := range newService.Spec.Ports {
newPort := &newService.Spec.Ports[i]
for j := range curService.Spec.Ports {
diff --git a/pkg/controller/chk/worker-status-helpers.go b/pkg/controller/chk/worker-status-helpers.go
index 90e2ba556..d80998b01 100644
--- a/pkg/controller/chk/worker-status-helpers.go
+++ b/pkg/controller/chk/worker-status-helpers.go
@@ -40,6 +40,11 @@ func (w *worker) areUsableOldAndNew(old, new *apiChk.ClickHouseKeeperInstallatio
return true
}
+// isAfterFinalizerInstalled checks whether we are just installed finalizer
+func (w *worker) isAfterFinalizerInstalled(old, new *apiChk.ClickHouseKeeperInstallation) bool {
+ return false
+}
+
// isGenerationTheSame checks whether old and new CHI have the same generation
func (w *worker) isGenerationTheSame(old, new *apiChk.ClickHouseKeeperInstallation) bool {
if !w.areUsableOldAndNew(old, new) {
diff --git a/pkg/controller/chk/worker-exclude-include-wait.go b/pkg/controller/chk/worker-wait-exclude-include-restart.go
similarity index 76%
rename from pkg/controller/chk/worker-exclude-include-wait.go
rename to pkg/controller/chk/worker-wait-exclude-include-restart.go
index 5334d0c75..d2ff67bda 100644
--- a/pkg/controller/chk/worker-exclude-include-wait.go
+++ b/pkg/controller/chk/worker-wait-exclude-include-restart.go
@@ -24,18 +24,26 @@ import (
"github.com/altinity/clickhouse-operator/pkg/util"
)
-func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseKeeperInstallation) {
+// waitForIPAddresses waits for all pods to get IP address assigned
+func (w *worker) waitForIPAddresses(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) {
if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
+ log.V(1).Info("Reconcile is aborted. CR polling IP: %s ", cr.GetName())
return
}
- if chk.IsStopped() {
+
+ if cr.IsStopped() {
// No need to wait for stopped CHI
return
}
- w.a.V(1).M(chk).F().S().Info("wait for IP addresses to be assigned to all pods")
+
+ l := w.a.V(1).M(cr)
+ l.F().S().Info("wait for IP addresses to be assigned to all pods")
+
+ // Let's limit polling time
start := time.Now()
- w.c.poll(ctx, chk, func(c *apiChk.ClickHouseKeeperInstallation, e error) bool {
+ timeout := 1 * time.Minute
+
+ w.c.poll(ctx, cr, func(c *apiChk.ClickHouseKeeperInstallation, e error) bool {
// TODO fix later
// status IPs list can be empty
// Instead of doing in status:
@@ -43,19 +51,21 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseK
// cur.EnsureStatus().SetPodIPs(podIPs)
// and here
// c.Status.GetPodIPs()
- podIPs := w.c.getPodsIPs(ctx, chk)
+ podIPs := w.c.getPodsIPs(ctx, cr)
if len(podIPs) >= len(c.Status.GetPods()) {
+ l.Info("all IP addresses are in place")
// Stop polling
- w.a.V(1).M(c).Info("all IP addresses are in place")
return false
}
- if time.Since(start) > 1*time.Minute {
+ if time.Since(start) > timeout {
+ l.Warning("not all IP addresses are in place but time has elapsed")
// Stop polling
- w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed")
return false
}
+
+ l.Info("still waiting - not all IP addresses are in place yet")
+
// Continue polling
- w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet")
return true
})
}
@@ -70,13 +80,8 @@ func (w *worker) shouldIncludeHost(host *api.Host) bool {
return true
}
-// includeHost includes host back back into ClickHouse clusters
+// includeHost includes host back into ClickHouse clusters
func (w *worker) includeHost(ctx context.Context, host *api.Host) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
if !w.shouldIncludeHost(host) {
w.a.V(1).
M(host).F().
@@ -90,11 +95,6 @@ func (w *worker) includeHost(ctx context.Context, host *api.Host) error {
// includeHostIntoRaftCluster includes host into raft configuration
func (w *worker) includeHostIntoRaftCluster(ctx context.Context, host *api.Host) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
-
w.a.V(1).
M(host).F().
Info("going to include host. Host/shard/cluster: %d/%d/%s",
diff --git a/pkg/controller/chk/worker.go b/pkg/controller/chk/worker.go
index dbe402caa..567b9f85b 100644
--- a/pkg/controller/chk/worker.go
+++ b/pkg/controller/chk/worker.go
@@ -166,14 +166,35 @@ func (w *worker) shouldForceRestartHost(ctx context.Context, host *api.Host) boo
}
}
-func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation, ap api.IActionPlan) {
+func (w *worker) finalizeCR(
+ ctx context.Context,
+ obj meta.Object,
+ updateStatusOpts types.UpdateStatusOptions,
+ f func(*apiChk.ClickHouseKeeperInstallation),
+) error {
+ chi, err := w.buildCRFromObj(ctx, obj)
+ if err != nil {
+ log.V(1).Error("Unable to finalize CR: %s err: %v", util.NamespacedName(obj), err)
+ return err
+ }
+
+ if f != nil {
+ f(chi)
+ }
+
+ _ = w.c.updateCRObjectStatus(ctx, chi, updateStatusOpts)
+
+ return nil
+}
+
+func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) {
if util.IsContextDone(ctx) {
log.V(1).Info("Reconcile is aborted. cr: %s ", cr.GetName())
return
}
// Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress
- cr.EnsureStatus().ReconcileStart(ap.GetRemovedHostsNum())
+ cr.EnsureStatus().ReconcileStart(cr.EnsureRuntime().ActionPlan)
_ = w.c.updateCRObjectStatus(ctx, cr, types.UpdateStatusOptions{
CopyStatusOptions: types.CopyStatusOptions{
CopyStatusFieldGroup: types.CopyStatusFieldGroup{
@@ -188,7 +209,7 @@ func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKe
WithActions(cr).
M(cr).F().
Info("reconcile started, task id: %s", cr.GetSpecT().GetTaskID())
- w.a.V(2).M(cr).F().Info("action plan\n%s\n", ap.String())
+ w.a.V(2).M(cr).F().Info("action plan\n%s\n", cr.EnsureRuntime().ActionPlan.String())
}
func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *apiChk.ClickHouseKeeperInstallation) {
@@ -199,34 +220,23 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *api
w.a.V(1).M(_cr).F().S().Info("finalize reconcile")
- // Update CHI object
- if chi, err := w.createCRFromObjectMeta(_cr, true, commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]()); err == nil {
- w.a.V(1).M(chi).Info("updating endpoints for CR-2 %s", chi.Name)
- ips := w.c.getPodsIPs(ctx, chi)
- w.a.V(1).M(chi).Info("IPs of the CR-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]()
- opts.DefaultUserAdditionalIPs = ips
- if chi, err := w.createCRFromObjectMeta(_cr, true, opts); err == nil {
- w.a.V(1).M(chi).Info("Update users IPS-2")
- chi.SetAncestor(chi.GetTarget())
- chi.SetTarget(nil)
- chi.EnsureStatus().ReconcileComplete()
- // TODO unify with update endpoints
- w.newTask(chi, chi.GetAncestorT())
- //w.reconcileConfigMapCommonUsers(ctx, chi)
- w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
- CopyStatusOptions: types.CopyStatusOptions{
- CopyStatusFieldGroup: types.CopyStatusFieldGroup{
- FieldGroupWholeStatus: true,
- },
+ // Update CR object
+ _ = w.finalizeCR(
+ ctx,
+ _cr,
+ types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ CopyStatusFieldGroup: types.CopyStatusFieldGroup{
+ FieldGroupWholeStatus: true,
},
- })
- } else {
- w.a.M(_cr).F().Error("internal unable to find CR by %v err: %v", _cr.GetLabels(), err)
- }
- } else {
- w.a.M(_cr).F().Error("external unable to find CR by %v err %v", _cr.GetLabels(), err)
- }
+ },
+ },
+ func(c *apiChk.ClickHouseKeeperInstallation) {
+ c.SetAncestor(c.GetTarget())
+ c.SetTarget(nil)
+ c.EnsureStatus().ReconcileComplete()
+ },
+ )
w.a.V(1).
WithEvent(_cr, a.EventActionReconcile, a.EventReasonReconcileCompleted).
@@ -264,14 +274,14 @@ func (w *worker) markReconcileCompletedUnsuccessfully(ctx context.Context, cr *a
Warning("reconcile completed UNSUCCESSFULLY, task id: %s", cr.GetSpecT().GetTaskID())
}
-func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation, ap api.IActionPlan) {
+func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) {
if util.IsContextDone(ctx) {
log.V(1).Info("Reconcile is aborted. cr: %s ", cr.GetName())
return
}
existingObjects := w.c.discovery(ctx, cr)
- ap.WalkAdded(
+ cr.EnsureRuntime().ActionPlan.WalkAdded(
// Walk over added clusters
func(cluster api.ICluster) {
w.a.V(1).M(cr).Info("Walking over AP added clusters. Cluster: %s", cluster.GetName())
@@ -324,7 +334,7 @@ func (w *worker) setHostStatusesPreliminary(ctx context.Context, cr *apiChk.Clic
},
)
- ap.WalkModified(
+ cr.EnsureRuntime().ActionPlan.WalkModified(
func(cluster api.ICluster) {
w.a.V(1).M(cr).Info("Walking over AP modified clusters. Cluster: %s", cluster.GetName())
},
@@ -375,19 +385,19 @@ func (w *worker) logHosts(cr api.ICustomResource) {
})
}
-func (w *worker) createTemplatedCR(_chk *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation {
- l := w.a.V(1).M(_chk).F()
+func (w *worker) createTemplatedCR(_cr *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation {
+ l := w.a.V(1).M(_cr).F()
- if _chk.HasAncestor() {
- l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_chk))
+ if _cr.HasAncestor() {
+ l.Info("CR has an ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(_cr))
} else {
- l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_chk))
+ l.Info("CR has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(_cr))
}
- chk := w.createTemplated(_chk, _opts...)
- chk.SetAncestor(w.createTemplated(_chk.GetAncestorT()))
+ cr := w.createTemplated(_cr, _opts...)
+ cr.SetAncestor(w.createTemplated(_cr.GetAncestorT()))
- return chk
+ return cr
}
func (w *worker) createTemplated(c *apiChk.ClickHouseKeeperInstallation, _opts ...*commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation]) *apiChk.ClickHouseKeeperInstallation {
@@ -395,8 +405,8 @@ func (w *worker) createTemplated(c *apiChk.ClickHouseKeeperInstallation, _opts .
if len(_opts) > 0 {
opts = _opts[0]
}
- chk, _ := w.normalizer.CreateTemplated(c, opts)
- return chk
+ cr, _ := w.normalizer.CreateTemplated(c, opts)
+ return cr
}
// getRaftGeneratorOptions build base set of RaftOptions
@@ -417,25 +427,3 @@ func (w *worker) options() *config.FilesGeneratorOptions {
w.a.Info("RaftOptions: %s", opts)
return config.NewFilesGeneratorOptions().SetRaftOptions(opts)
}
-
-// createCRFromObjectMeta
-func (w *worker) createCRFromObjectMeta(
- meta meta.Object,
- isCHI bool,
- options *commonNormalizer.Options[apiChk.ClickHouseKeeperInstallation],
-) (*apiChk.ClickHouseKeeperInstallation, error) {
- w.a.V(3).M(meta).S().P()
- defer w.a.V(3).M(meta).E().P()
-
- chi, err := w.c.GetCHIByObjectMeta(meta, isCHI)
- if err != nil {
- return nil, err
- }
-
- chi, err = w.normalizer.CreateTemplated(chi, options)
- if err != nil {
- return nil, err
- }
-
- return chi, nil
-}
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go
index 901cd4704..c2e571753 100644
--- a/pkg/controller/common/statefulset/statefulset-reconciler.go
+++ b/pkg/controller/common/statefulset/statefulset-reconciler.go
@@ -167,17 +167,15 @@ func (r *Reconciler) ReconcileStatefulSet(
switch {
case opts.ForceRecreate():
// Force recreate prevails over all other requests
- _ = r.recreateStatefulSet(ctx, host, register, opts)
+ err = r.recreateStatefulSet(ctx, host, register, opts)
+ case apiErrors.IsNotFound(err):
+ // StatefulSet not found in k8s — create it
+ err = r.createStatefulSet(ctx, host, register, opts)
default:
- // We have (or had in the past) StatefulSet - try to update|recreate it
+ // We have StatefulSet - try to update|recreate it
err = r.updateStatefulSet(ctx, host, register, opts)
}
- if apiErrors.IsNotFound(err) {
- // StatefulSet not found - even during Update process - try to create it
- err = r.createStatefulSet(ctx, host, register, opts)
- }
-
// Host has to know current StatefulSet and Pod
host.Runtime.CurStatefulSet, _ = r.sts.Get(ctx, newStatefulSet)
@@ -252,47 +250,81 @@ func (r *Reconciler) updateStatefulSet(ctx context.Context, host *api.Host, regi
action := common.ErrCRUDRecreate
if k8s.IsStatefulSetReady(curStatefulSet) {
- action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host, opts)
+ if action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host, opts); action == nil {
+ // Straightforward success
+ if register {
+ host.GetCR().IEnsureStatus().HostUpdated()
+ _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ CopyStatusFieldGroup: types.CopyStatusFieldGroup{
+ FieldGroupMain: true,
+ },
+ },
+ })
+ }
+ r.a.V(1).
+ WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateCompleted).
+ WithAction(host.GetCR()).
+ M(host).F().
+ Info("Update StatefulSet(%s/%s) - completed", namespace, name)
+
+ // All is done here
+ return nil
+ }
}
+ // Something is incorrect, need to decide next moves
+
switch action {
- case nil:
- if register {
- host.GetCR().IEnsureStatus().HostUpdated()
- _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{
- CopyStatusOptions: types.CopyStatusOptions{
- CopyStatusFieldGroup: types.CopyStatusFieldGroup{
- FieldGroupMain: true,
- },
- },
- })
+ case common.ErrCRUDRecreate:
+ // Second attempt requested
+
+ onUpdateFailure := host.GetCluster().GetReconcile().StatefulSet.Recreate.OnUpdateFailure
+ if onUpdateFailure == api.OnStatefulSetRecreateOnUpdateFailureActionAbort {
+ r.a.V(1).M(host).Warning("Update StatefulSet(%s/%s) - would need recreate but aborting as configured (onUpdateFailure: abort)", namespace, name)
+ return common.ErrCRUDAbort
}
- r.a.V(1).
- WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateCompleted).
+
+ // Continue second attempt
+ r.a.WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateInProgress).
WithAction(host.GetCR()).
M(host).F().
- Info("Update StatefulSet(%s/%s) - completed", namespace, name)
- return nil
+ Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name)
+ common.DumpStatefulSetDiff(host, curStatefulSet, newStatefulSet)
+ return r.recreateStatefulSet(ctx, host, register, opts)
+
+ default:
+ // Decide on other non-successful cases
+ return r.shouldAbortOrContinueUpdateStatefulSet(action, host)
+ }
+}
+
+func (r *Reconciler) shouldAbortOrContinueUpdateStatefulSet(action error, host *api.Host) error {
+ newStatefulSet := host.Runtime.DesiredStatefulSet
+ namespace := newStatefulSet.Namespace
+ name := newStatefulSet.Name
+
+ switch action {
case common.ErrCRUDAbort:
+ // Abort
r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got abort. Abort", namespace, name)
return common.ErrCRUDAbort
+
case common.ErrCRUDIgnore:
+ // Continue
r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name)
return nil
- case common.ErrCRUDRecreate:
- r.a.WithEvent(host.GetCR(), a.EventActionUpdate, a.EventReasonUpdateInProgress).
- WithAction(host.GetCR()).
- M(host).F().
- Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name)
- common.DumpStatefulSetDiff(host, curStatefulSet, newStatefulSet)
- return r.recreateStatefulSet(ctx, host, register, opts)
+
case common.ErrCRUDUnexpectedFlow:
+ // Continue
r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
return nil
- }
- r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
- return nil
+ default:
+ // Continue
+ r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
+ return nil
+ }
}
// createStatefulSet
@@ -321,37 +353,53 @@ func (r *Reconciler) createStatefulSet(ctx context.Context, host *api.Host, regi
})
}
+ return r.shouldAbortOrContinueCreateStatefulSet(action, host)
+}
+
+func (r *Reconciler) shouldAbortOrContinueCreateStatefulSet(action error, host *api.Host) error {
+ statefulSet := host.Runtime.DesiredStatefulSet
switch action {
case nil:
+ // Continue
r.a.V(1).
WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateCompleted).
WithAction(host.GetCR()).
M(host).F().
Info("Create StatefulSet: %s - completed", util.NamespaceNameString(statefulSet))
return nil
+
case common.ErrCRUDAbort:
+ // Abort
r.a.WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateFailed).
WithAction(host.GetCR()).
WithError(host.GetCR()).
M(host).F().
Error("Create StatefulSet: %s - failed with error: %v", util.NamespaceNameString(statefulSet), action)
- return action
+ return common.ErrCRUDAbort
+
case common.ErrCRUDIgnore:
+ // Continue
r.a.WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateFailed).
WithAction(host.GetCR()).
M(host).F().
Warning("Create StatefulSet: %s - error ignored", util.NamespaceNameString(statefulSet))
return nil
+
case common.ErrCRUDRecreate:
+ // Continue
r.a.V(1).M(host).Warning("Got recreate action. Ignore and continue for now")
return nil
+
case common.ErrCRUDUnexpectedFlow:
+ // Continue
r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
return nil
- }
- r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
- return nil
+ default:
+ // Continue
+ r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
+ return nil
+ }
}
// createStatefulSet is an internal function, used in reconcileStatefulSet only
@@ -459,6 +507,7 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er
namespace := host.Runtime.Address.Namespace
log.V(1).M(host).F().Info("%s/%s", namespace, name)
+ // Fetch cur host's StatefulSet
var err error
host.Runtime.CurStatefulSet, err = r.sts.Get(ctx, host)
if err != nil {
@@ -471,7 +520,7 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er
return err
}
- // Scale StatefulSet down to 0 pods count.
+ // Scale cur host's StatefulSet down to 0 pods count.
// This is the proper and graceful way to delete StatefulSet
var zero int32 = 0
host.Runtime.CurStatefulSet.Spec.Replicas = &zero
@@ -486,7 +535,6 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er
// And now delete empty StatefulSet
if err := r.sts.Delete(ctx, namespace, name); err == nil {
log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name)
- // r.hostSTSPoller.WaitHostStatefulSetDeleted(host)
} else if apiErrors.IsNotFound(err) {
log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name)
} else {
diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
index 2fceb60f9..aea697f3a 100644
--- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
+++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
@@ -17,6 +17,8 @@ package clickhouse
import (
"context"
"database/sql"
+ "fmt"
+
"github.com/MakeNowJust/heredoc"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
@@ -28,17 +30,18 @@ const (
SELECT
database,
table,
- toString(is_session_expired) AS is_session_expired
+ '1' AS session_expired
FROM system.replicas
+ WHERE is_session_expired
`
- queryMetricsSQL = `
+ queryMetricsSQLTemplate = `
SELECT
concat('metric.', metric) AS metric,
toString(value) AS value,
'' AS description,
'gauge' AS type
- FROM merge('system','^(metrics|custom_metrics)$')
+ FROM %s
UNION ALL
SELECT
concat('metric.', metric) AS metric,
@@ -116,7 +119,7 @@ const (
toString(free_space) AS free_space,
toString(total_space) AS total_space
FROM system.disks
- WHERE type IN ('local','Local')
+ WHERE type IN ('local','Local')
`
queryDetachedPartsSQL = `
@@ -135,27 +138,43 @@ const (
`
)
-// ClickHouseMetricsFetcher specifies clickhouse fetcher object
-type ClickHouseMetricsFetcher struct {
+// MetricsFetcher specifies clickhouse fetcher object
+type MetricsFetcher struct {
connectionParams *clickhouse.EndpointConnectionParams
+ tablesRegexp string
}
-// NewClickHouseFetcher creates new clickhouse fetcher object
-func NewClickHouseFetcher(endpointConnectionParams *clickhouse.EndpointConnectionParams) *ClickHouseMetricsFetcher {
- return &ClickHouseMetricsFetcher{
+// NewMetricsFetcher creates new clickhouse fetcher object
+func NewMetricsFetcher(
+ endpointConnectionParams *clickhouse.EndpointConnectionParams,
+ tablesRegexp string,
+) *MetricsFetcher {
+ return &MetricsFetcher{
connectionParams: endpointConnectionParams,
+ tablesRegexp: tablesRegexp,
}
}
-func (f *ClickHouseMetricsFetcher) connection() *clickhouse.Connection {
+// connection is a connection getter
+func (f *MetricsFetcher) connection() *clickhouse.Connection {
return clickhouse.GetPooledDBConnection(f.connectionParams)
}
+// buildMetricsTableSource returns the FROM clause for the metrics query.
+// If tablesRegexp is set, it uses merge() to query tables matching the regexp.
+func (f *MetricsFetcher) buildMetricsTableSource() string {
+ if f.tablesRegexp == "" {
+ return "merge('system','^(metrics|custom_metrics)$')"
+ }
+ return fmt.Sprintf("merge('system','%s')", f.tablesRegexp)
+}
+
// getClickHouseQueryMetrics requests metrics data from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseQueryMetrics(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseQueryMetrics(ctx context.Context) (Table, error) {
+ metricsSQL := fmt.Sprintf(queryMetricsSQLTemplate, f.buildMetricsTableSource())
return f.clickHouseQueryScanRows(
ctx,
- queryMetricsSQL,
+ metricsSQL,
func(rows *sql.Rows, data *Table) error {
var metric, value, description, _type string
if err := rows.Scan(&metric, &value, &description, &_type); err == nil {
@@ -167,7 +186,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryMetrics(ctx context.Context
}
// getClickHouseSystemParts requests data sizes from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseSystemParts(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseSystemParts(ctx context.Context) (Table, error) {
return f.clickHouseQueryScanRows(
ctx,
querySystemPartsSQL,
@@ -189,7 +208,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseSystemParts(ctx context.Context)
}
// getClickHouseQuerySystemReplicas requests replica information from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemReplicas(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseQuerySystemReplicas(ctx context.Context) (Table, error) {
return f.clickHouseQueryScanRows(
ctx,
querySystemReplicasSQL,
@@ -204,7 +223,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemReplicas(ctx context.
}
// getClickHouseQueryMutations requests mutations information from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseQueryMutations(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseQueryMutations(ctx context.Context) (Table, error) {
return f.clickHouseQueryScanRows(
ctx,
queryMutationsSQL,
@@ -219,7 +238,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryMutations(ctx context.Conte
}
// getClickHouseQuerySystemDisks requests used disks information from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Context) (Table, error) {
return f.clickHouseQueryScanRows(
ctx,
querySystemDisksSQL,
@@ -234,7 +253,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQuerySystemDisks(ctx context.Con
}
// getClickHouseQueryDetachedParts requests detached parts reasons from ClickHouse
-func (f *ClickHouseMetricsFetcher) getClickHouseQueryDetachedParts(ctx context.Context) (Table, error) {
+func (f *MetricsFetcher) getClickHouseQueryDetachedParts(ctx context.Context) (Table, error) {
return f.clickHouseQueryScanRows(
ctx,
queryDetachedPartsSQL,
@@ -249,7 +268,7 @@ func (f *ClickHouseMetricsFetcher) getClickHouseQueryDetachedParts(ctx context.C
}
// clickHouseQueryScanRows scan all rows by external scan function
-func (f *ClickHouseMetricsFetcher) clickHouseQueryScanRows(
+func (f *MetricsFetcher) clickHouseQueryScanRows(
ctx context.Context,
sql string,
scanner ScanFunction,
diff --git a/pkg/metrics/clickhouse/collector.go b/pkg/metrics/clickhouse/collector.go
new file mode 100644
index 000000000..604b3c803
--- /dev/null
+++ b/pkg/metrics/clickhouse/collector.go
@@ -0,0 +1,145 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clickhouse
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ log "github.com/golang/glog"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+)
+
+// Collector collects metrics from a single ClickHouse host
+type Collector struct {
+ fetcher *MetricsFetcher
+ writer *CHIPrometheusWriter
+}
+
+// NewCollector creates a new Collector instance
+func NewCollector(fetcher *MetricsFetcher, writer *CHIPrometheusWriter) *Collector {
+ return &Collector{
+ fetcher: fetcher,
+ writer: writer,
+ }
+}
+
+// CollectHostMetrics runs all metric collectors for a host in parallel
+func (c *Collector) CollectHostMetrics(ctx context.Context, host *metrics.WatchedHost) {
+ wg := sync.WaitGroup{}
+ wg.Add(6)
+ go func() { defer wg.Done(); c.collectSystemMetrics(ctx, host) }()
+ go func() { defer wg.Done(); c.collectSystemParts(ctx, host) }()
+ go func() { defer wg.Done(); c.collectSystemReplicas(ctx, host) }()
+ go func() { defer wg.Done(); c.collectMutations(ctx, host) }()
+ go func() { defer wg.Done(); c.collectSystemDisks(ctx, host) }()
+ go func() { defer wg.Done(); c.collectDetachedParts(ctx, host) }()
+ wg.Wait()
+}
+
+func (c *Collector) collectSystemMetrics(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying system metrics for host %s", host.Hostname)
+ start := time.Now()
+ metrics, err := c.fetcher.getClickHouseQueryMetrics(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d system metrics for host %s", elapsed, len(metrics), host.Hostname)
+ c.writer.WriteMetrics(metrics)
+ c.writer.WriteOKFetch("system.metrics")
+ } else {
+ log.Warningf("Error [%s] querying system.metrics for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("system.metrics")
+ }
+}
+
+func (c *Collector) collectSystemParts(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying table sizes for host %s", host.Hostname)
+ start := time.Now()
+ systemPartsData, err := c.fetcher.getClickHouseSystemParts(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d table sizes for host %s", elapsed, len(systemPartsData), host.Hostname)
+ c.writer.WriteTableSizes(systemPartsData)
+ c.writer.WriteOKFetch("table sizes")
+ c.writer.WriteSystemParts(systemPartsData)
+ c.writer.WriteOKFetch("system parts")
+ } else {
+ log.Warningf("Error [%s] querying system.parts for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("table sizes")
+ c.writer.WriteErrorFetch("system parts")
+ }
+}
+
+func (c *Collector) collectSystemReplicas(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying system replicas for host %s", host.Hostname)
+ start := time.Now()
+ systemReplicas, err := c.fetcher.getClickHouseQuerySystemReplicas(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d system replicas for host %s", elapsed, len(systemReplicas), host.Hostname)
+ c.writer.WriteSystemReplicas(systemReplicas)
+ c.writer.WriteOKFetch("system.replicas")
+ } else {
+ log.Warningf("Error [%s] querying system.replicas for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("system.replicas")
+ }
+}
+
+func (c *Collector) collectMutations(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying mutations for host %s", host.Hostname)
+ start := time.Now()
+ mutations, err := c.fetcher.getClickHouseQueryMutations(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d mutations for %s", elapsed, len(mutations), host.Hostname)
+ c.writer.WriteMutations(mutations)
+ c.writer.WriteOKFetch("system.mutations")
+ } else {
+ log.Warningf("Error [%s] querying system.mutations for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("system.mutations")
+ }
+}
+
+func (c *Collector) collectSystemDisks(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying disks for host %s", host.Hostname)
+ start := time.Now()
+ disks, err := c.fetcher.getClickHouseQuerySystemDisks(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d disks for host %s", elapsed, len(disks), host.Hostname)
+ c.writer.WriteSystemDisks(disks)
+ c.writer.WriteOKFetch("system.disks")
+ } else {
+ log.Warningf("Error [%s] querying system.disks for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("system.disks")
+ }
+}
+
+func (c *Collector) collectDetachedParts(ctx context.Context, host *metrics.WatchedHost) {
+ log.V(1).Infof("Querying detached parts for host %s", host.Hostname)
+ start := time.Now()
+ detachedParts, err := c.fetcher.getClickHouseQueryDetachedParts(ctx)
+ elapsed := time.Since(start)
+ if err == nil {
+ log.V(1).Infof("Extracted [%s] %d detached parts info for host %s", elapsed, len(detachedParts), host.Hostname)
+ c.writer.WriteDetachedParts(detachedParts)
+ c.writer.WriteOKFetch("system.detached_parts")
+ } else {
+ log.Warningf("Error [%s] querying system.detached_parts for host %s err: %s", elapsed, host.Hostname, err)
+ c.writer.WriteErrorFetch("system.detached_parts")
+ }
+}
diff --git a/pkg/metrics/clickhouse/chi_index.go b/pkg/metrics/clickhouse/cr_index.go
similarity index 65%
rename from pkg/metrics/clickhouse/chi_index.go
rename to pkg/metrics/clickhouse/cr_index.go
index 29904d6db..5cb299850 100644
--- a/pkg/metrics/clickhouse/chi_index.go
+++ b/pkg/metrics/clickhouse/cr_index.go
@@ -16,17 +16,21 @@ package clickhouse
import "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
-type chInstallationsIndex map[string]*metrics.WatchedCR
+type crInstallationsIndex map[string]*metrics.WatchedCR
-func (i chInstallationsIndex) slice() []*metrics.WatchedCR {
+func newCRInstallationsIndex() crInstallationsIndex {
+ return make(map[string]*metrics.WatchedCR)
+}
+
+func (i crInstallationsIndex) slice() []*metrics.WatchedCR {
res := make([]*metrics.WatchedCR, 0)
- for _, chi := range i {
- res = append(res, chi)
+ for _, cr := range i {
+ res = append(res, cr)
}
return res
}
-func (i chInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) {
+func (i crInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) {
if i == nil {
return nil, false
}
@@ -36,14 +40,14 @@ func (i chInstallationsIndex) get(key string) (*metrics.WatchedCR, bool) {
return nil, false
}
-func (i chInstallationsIndex) set(key string, value *metrics.WatchedCR) {
+func (i crInstallationsIndex) set(key string, value *metrics.WatchedCR) {
if i == nil {
return
}
i[key] = value
}
-func (i chInstallationsIndex) remove(key string) {
+func (i crInstallationsIndex) remove(key string) {
if i == nil {
return
}
@@ -52,9 +56,9 @@ func (i chInstallationsIndex) remove(key string) {
}
}
-func (i chInstallationsIndex) walk(f func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) {
- // Loop over ClickHouseInstallations
- for _, chi := range i {
- chi.WalkHosts(f)
+func (i crInstallationsIndex) walk(f func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) {
+ // Loop over Custom Resources
+ for _, cr := range i {
+ cr.WalkHosts(f)
}
}
diff --git a/pkg/metrics/clickhouse/exporter.go b/pkg/metrics/clickhouse/exporter.go
index b4eecac0d..2e2196dd1 100644
--- a/pkg/metrics/clickhouse/exporter.go
+++ b/pkg/metrics/clickhouse/exporter.go
@@ -16,9 +16,6 @@ package clickhouse
import (
"context"
- "encoding/json"
- "fmt"
- "net/http"
"sync"
"time"
@@ -42,35 +39,22 @@ import (
// Exporter implements prometheus.Collector interface
type Exporter struct {
collectorTimeout time.Duration
-
- // chInstallations maps CHI name to list of hostnames (of string type) of this installation
- chInstallations chInstallationsIndex
-
- mutex sync.RWMutex
- toRemoveFromWatched sync.Map
+ registry *CRRegistry
}
// Type compatibility
var _ prometheus.Collector = &Exporter{}
// NewExporter returns a new instance of Exporter type
-func NewExporter(collectorTimeout time.Duration) *Exporter {
+func NewExporter(registry *CRRegistry, collectorTimeout time.Duration) *Exporter {
return &Exporter{
- chInstallations: make(map[string]*metrics.WatchedCR),
+ registry: registry,
collectorTimeout: collectorTimeout,
}
}
-// getWatchedCHIs
-func (e *Exporter) getWatchedCHIs() []*metrics.WatchedCR {
- return e.chInstallations.slice()
-}
-
// Collect implements prometheus.Collector Collect method
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
- // Run cleanup on each collect
- e.cleanup()
-
if ch == nil {
log.Warning("Prometheus channel is closed. Unable to write metrics")
return
@@ -83,23 +67,19 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
log.V(1).Infof("Collect completed [%s]", time.Since(start))
}()
- // Collect should have timeout
+ // Collection process should have limited duration
ctx, cancel := context.WithTimeout(context.Background(), e.collectorTimeout)
defer cancel()
- // This method may be called concurrently and must therefore be implemented in a concurrency safe way
- e.mutex.Lock()
- defer e.mutex.Unlock()
-
log.V(1).Infof("Launching host collectors [%s]", time.Since(start))
var wg = sync.WaitGroup{}
- e.chInstallations.walk(func(chi *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) {
+ e.registry.Walk(func(cr *metrics.WatchedCR, _ *metrics.WatchedCluster, host *metrics.WatchedHost) {
wg.Add(1)
- go func(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, ch chan<- prometheus.Metric) {
+ go func(ctx context.Context, cr *metrics.WatchedCR, host *metrics.WatchedHost, ch chan<- prometheus.Metric) {
defer wg.Done()
- e.collectHostMetrics(ctx, chi, host, ch)
- }(ctx, chi, host, ch)
+ e.collectHostMetrics(ctx, cr, host, ch)
+ }(ctx, cr, host, ch)
})
wg.Wait()
}
@@ -109,45 +89,17 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(e, ch)
}
-// enqueueToRemoveFromWatched
-func (e *Exporter) enqueueToRemoveFromWatched(chi *metrics.WatchedCR) {
- e.toRemoveFromWatched.Store(chi, struct{}{})
-}
-
-// cleanup cleans all pending for cleaning
-func (e *Exporter) cleanup() {
- // Clean up all pending for cleaning CHIs
- log.V(2).Info("Starting cleanup")
- e.toRemoveFromWatched.Range(func(key, value interface{}) bool {
- switch key.(type) {
- case *metrics.WatchedCR:
- e.toRemoveFromWatched.Delete(key)
- e.removeFromWatched(key.(*metrics.WatchedCR))
- log.V(1).Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*metrics.WatchedCR).Name, key.(*metrics.WatchedCR).Namespace)
- }
- return true
- })
- log.V(2).Info("Completed cleanup")
-}
-
-// removeFromWatched deletes record from Exporter.chInstallation map identified by chiName key
-func (e *Exporter) removeFromWatched(chi *metrics.WatchedCR) {
- e.mutex.Lock()
- defer e.mutex.Unlock()
- log.V(1).Infof("Remove ClickHouseInstallation (%s/%s)", chi.Namespace, chi.Name)
- e.chInstallations.remove(chi.IndexKey())
-}
-
-// updateWatched updates Exporter.chInstallation map with values from chInstances slice
-func (e *Exporter) updateWatched(chi *metrics.WatchedCR) {
- e.mutex.Lock()
- defer e.mutex.Unlock()
- log.V(1).Infof("Update ClickHouseInstallation (%s/%s): %s", chi.Namespace, chi.Name, chi)
- e.chInstallations.set(chi.IndexKey(), chi)
+// collectHostMetrics collects metrics from one host and writes them into chan
+func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, c chan<- prometheus.Metric) {
+ collector := NewCollector(
+ e.newHostFetcher(host),
+ NewCHIPrometheusWriter(c, chi, host),
+ )
+ collector.CollectHostMetrics(ctx, host)
}
-// newFetcher returns new Metrics Fetcher for specified host
-func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsFetcher {
+// newHostFetcher returns new Metrics Fetcher for specified host
+func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *MetricsFetcher {
// Make base cluster connection params
clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config())
// Adjust base cluster connection params with per-host props
@@ -167,208 +119,10 @@ func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsF
clusterConnectionParams.Port = int(host.HTTPSPort)
}
- return NewClickHouseFetcher(clusterConnectionParams.NewEndpointConnectionParams(host.Hostname))
-}
-
-// collectHostMetrics collects metrics from one host and writes them into chan
-func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCR, host *metrics.WatchedHost, c chan<- prometheus.Metric) {
- fetcher := e.newHostFetcher(host)
- writer := NewCHIPrometheusWriter(c, chi, host)
-
- wg := sync.WaitGroup{}
- wg.Add(6)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostSystemMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostSystemPartsMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostSystemReplicasMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostMutationsMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostSystemDisksMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
- e.collectHostDetachedPartsMetrics(ctx, host, fetcher, writer)
- wg.Done()
- }(ctx, host, fetcher, writer)
- wg.Wait()
-}
-
-func (e *Exporter) collectHostSystemMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying system metrics for host %s", host.Hostname)
- start := time.Now()
- metrics, err := fetcher.getClickHouseQueryMetrics(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d system metrics for host %s", elapsed, len(metrics), host.Hostname)
- writer.WriteMetrics(metrics)
- writer.WriteOKFetch("system.metrics")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.metrics for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("system.metrics")
- }
-}
-
-func (e *Exporter) collectHostSystemPartsMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying table sizes for host %s", host.Hostname)
- start := time.Now()
- systemPartsData, err := fetcher.getClickHouseSystemParts(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d table sizes for host %s", elapsed, len(systemPartsData), host.Hostname)
- writer.WriteTableSizes(systemPartsData)
- writer.WriteOKFetch("table sizes")
- writer.WriteSystemParts(systemPartsData)
- writer.WriteOKFetch("system parts")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.parts for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("table sizes")
- writer.WriteErrorFetch("system parts")
- }
-}
-
-func (e *Exporter) collectHostSystemReplicasMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying system replicas for host %s", host.Hostname)
- start := time.Now()
- systemReplicas, err := fetcher.getClickHouseQuerySystemReplicas(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d system replicas for host %s", elapsed, len(systemReplicas), host.Hostname)
- writer.WriteSystemReplicas(systemReplicas)
- writer.WriteOKFetch("system.replicas")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.replicas for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("system.replicas")
- }
-}
-
-func (e *Exporter) collectHostMutationsMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying mutations for host %s", host.Hostname)
- start := time.Now()
- mutations, err := fetcher.getClickHouseQueryMutations(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d mutations for %s", elapsed, len(mutations), host.Hostname)
- writer.WriteMutations(mutations)
- writer.WriteOKFetch("system.mutations")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.mutations for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("system.mutations")
- }
-}
-
-func (e *Exporter) collectHostSystemDisksMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying disks for host %s", host.Hostname)
- start := time.Now()
- disks, err := fetcher.getClickHouseQuerySystemDisks(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d disks for host %s", elapsed, len(disks), host.Hostname)
- writer.WriteSystemDisks(disks)
- writer.WriteOKFetch("system.disks")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.disks for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("system.disks")
- }
-}
-
-func (e *Exporter) collectHostDetachedPartsMetrics(
- ctx context.Context,
- host *metrics.WatchedHost,
- fetcher *ClickHouseMetricsFetcher,
- writer *CHIPrometheusWriter,
-) {
- log.V(1).Infof("Querying detached parts for host %s", host.Hostname)
- start := time.Now()
- detachedParts, err := fetcher.getClickHouseQueryDetachedParts(ctx)
- elapsed := time.Since(start)
- if err == nil {
- log.V(1).Infof("Extracted [%s] %d detached parts info for host %s", elapsed, len(detachedParts), host.Hostname)
- writer.WriteDetachedParts(detachedParts)
- writer.WriteOKFetch("system.detached_parts")
- } else {
- // In case of an error fetching data from clickhouse store CHI name in e.cleanup
- log.Warningf("Error [%s] querying system.detached_parts for host %s err: %s", elapsed, host.Hostname, err)
- writer.WriteErrorFetch("system.detached_parts")
- }
-}
-
-// getWatchedCHI serves HTTP request to get list of watched CHIs
-func (e *Exporter) getWatchedCHI(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- _ = json.NewEncoder(w).Encode(e.getWatchedCHIs())
-}
-
-// fetchCHI decodes chi from the request
-func (e *Exporter) fetchCHI(r *http.Request) (*metrics.WatchedCR, error) {
- chi := &metrics.WatchedCR{}
- if err := json.NewDecoder(r.Body).Decode(chi); err == nil {
- if chi.IsValid() {
- return chi, nil
- }
- }
-
- return nil, fmt.Errorf("unable to parse CHI from request")
-}
-
-// updateWatchedCHI serves HTTPS request to add CHI to the list of watched CHIs
-func (e *Exporter) updateWatchedCHI(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- if chi, err := e.fetchCHI(r); err == nil {
- e.updateWatched(chi)
- } else {
- http.Error(w, err.Error(), http.StatusNotAcceptable)
- }
-}
-
-// deleteWatchedCHI serves HTTP request to delete CHI from the list of watched CHIs
-func (e *Exporter) deleteWatchedCHI(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- if chi, err := e.fetchCHI(r); err == nil {
- e.enqueueToRemoveFromWatched(chi)
- } else {
- http.Error(w, err.Error(), http.StatusNotAcceptable)
- }
+ return NewMetricsFetcher(
+ clusterConnectionParams.NewEndpointConnectionParams(host.Hostname),
+ chop.Config().ClickHouse.Metrics.TablesRegexp,
+ )
}
// DiscoveryWatchedCHIs discovers all ClickHouseInstallation objects available for monitoring and adds them to watched list
@@ -391,7 +145,7 @@ func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *c
}
func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.ClickHouseInstallation) {
- if e.shouldSkipDiscoveredCR(chi) {
+ if !e.shouldWatchCR(chi) {
log.V(1).Infof("Skip discovered CHI: %s/%s", chi.Namespace, chi.Name)
return
}
@@ -403,15 +157,15 @@ func (e *Exporter) processDiscoveredCR(kubeClient kube.Interface, chi *api.Click
normalized, _ := normalizer.CreateTemplated(chi, normalizerCommon.NewOptions[api.ClickHouseInstallation]())
- watchedCHI := metrics.NewWatchedCR(normalized)
- e.updateWatched(watchedCHI)
+ watchedCR := metrics.NewWatchedCR(normalized)
+ e.registry.AddCR(watchedCR)
}
-func (e *Exporter) shouldSkipDiscoveredCR(chi *api.ClickHouseInstallation) bool {
+func (e *Exporter) shouldWatchCR(chi *api.ClickHouseInstallation) bool {
if chi.IsStopped() {
- log.V(1).Infof("CHI %s/%s is stopped, skip it", chi.Namespace, chi.Name)
- return true
+ log.V(1).Infof("CHI %s/%s is stopped, unable to watch it", chi.Namespace, chi.Name)
+ return false
}
- return false
+ return true
}
diff --git a/pkg/metrics/clickhouse/registry.go b/pkg/metrics/clickhouse/registry.go
new file mode 100644
index 000000000..ef7e69257
--- /dev/null
+++ b/pkg/metrics/clickhouse/registry.go
@@ -0,0 +1,133 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clickhouse
+
+import (
+ "fmt"
+ "sync"
+
+ log "github.com/golang/glog"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+)
+
+// CRRegistry is a thread-safe storage for watched Custom Resources
+type CRRegistry struct {
+ index crInstallationsIndex
+ mutex sync.RWMutex
+}
+
+// NewCRRegistry creates a new CRRegistry instance
+func NewCRRegistry() *CRRegistry {
+ return &CRRegistry{
+ index: newCRInstallationsIndex(),
+ }
+}
+
+// AddCR adds or updates a CR in the registry
+func (r *CRRegistry) AddCR(cr *metrics.WatchedCR) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ log.V(1).Infof("Registry: Add CR (%s/%s): %s", cr.Namespace, cr.Name, cr)
+ r.index.set(cr.IndexKey(), cr)
+}
+
+// AddHost adds a host to an existing CR in the registry
+func (r *CRRegistry) AddHost(req *HostRequest) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ crKey := (&metrics.WatchedCR{Namespace: req.CRNamespace, Name: req.CRName}).IndexKey()
+ cr, ok := r.index.get(crKey)
+ if !ok || cr == nil {
+ return fmt.Errorf("CR not found: %s", crKey)
+ }
+
+ // Find or create cluster
+ var cluster *metrics.WatchedCluster
+ for _, c := range cr.Clusters {
+ if c.Name == req.ClusterName {
+ cluster = c
+ break
+ }
+ }
+ if cluster == nil {
+ cluster = &metrics.WatchedCluster{Name: req.ClusterName}
+ cr.Clusters = append(cr.Clusters, cluster)
+ }
+
+ // Add or update host
+ found := false
+ for i, h := range cluster.Hosts {
+ if h.Hostname == req.Host.Hostname {
+ cluster.Hosts[i] = req.Host
+ found = true
+ break
+ }
+ }
+ if !found {
+ cluster.Hosts = append(cluster.Hosts, req.Host)
+ }
+
+ log.V(1).Infof("Registry: Add Host %s to CR (%s/%s) cluster %s", req.Host.Hostname, req.CRNamespace, req.CRName, req.ClusterName)
+ return nil
+}
+
+// RemoveCR removes a CR from the registry
+func (r *CRRegistry) RemoveCR(cr *metrics.WatchedCR) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ log.V(1).Infof("Registry: Remove CR (%s/%s)", cr.Namespace, cr.Name)
+ r.index.remove(cr.IndexKey())
+}
+
+// RemoveHost removes a host from a CR in the registry
+func (r *CRRegistry) RemoveHost(req *HostRequest) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ crKey := (&metrics.WatchedCR{Namespace: req.CRNamespace, Name: req.CRName}).IndexKey()
+ cr, ok := r.index.get(crKey)
+ if !ok || cr == nil {
+ log.V(1).Infof("Registry: Cannot remove host, CR not found: %s", crKey)
+ return
+ }
+
+ for _, cluster := range cr.Clusters {
+ if cluster.Name == req.ClusterName {
+ for i, h := range cluster.Hosts {
+ if h.Hostname == req.Host.Hostname {
+ cluster.Hosts = append(cluster.Hosts[:i], cluster.Hosts[i+1:]...)
+ log.V(1).Infof("Registry: Remove Host %s from CR (%s/%s) cluster %s", req.Host.Hostname, req.CRNamespace, req.CRName, req.ClusterName)
+ return
+ }
+ }
+ }
+ }
+}
+
+// List returns all watched CRs as a slice
+func (r *CRRegistry) List() []*metrics.WatchedCR {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.index.slice()
+}
+
+// Walk iterates over all hosts while holding an exclusive lock
+func (r *CRRegistry) Walk(fn func(*metrics.WatchedCR, *metrics.WatchedCluster, *metrics.WatchedHost)) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.index.walk(fn)
+}
diff --git a/pkg/metrics/clickhouse/rest_client.go b/pkg/metrics/clickhouse/rest_client.go
index 55510d3a7..87c467d2c 100644
--- a/pkg/metrics/clickhouse/rest_client.go
+++ b/pkg/metrics/clickhouse/rest_client.go
@@ -14,14 +14,28 @@
package clickhouse
-import "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+import (
+ "net/http"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+)
// InformMetricsExporterAboutWatchedCHI informs exporter about new watched CHI
func InformMetricsExporterAboutWatchedCHI(chi *metrics.WatchedCR) error {
- return makeRESTCall(chi, "POST")
+ return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, http.MethodPost)
}
// InformMetricsExporterToDeleteWatchedCHI informs exporter to delete/forget watched CHI
func InformMetricsExporterToDeleteWatchedCHI(chi *metrics.WatchedCR) error {
- return makeRESTCall(chi, "DELETE")
+ return makeRESTCall(&RESTRequest{Type: RequestTypeCR, CR: chi}, http.MethodDelete)
+}
+
+// InformMetricsExporterAboutWatchedHost informs exporter about new watched host
+func InformMetricsExporterAboutWatchedHost(host *HostRequest) error {
+ return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, http.MethodPost)
+}
+
+// InformMetricsExporterToDeleteWatchedHost informs exporter to delete/forget watched host
+func InformMetricsExporterToDeleteWatchedHost(host *HostRequest) error {
+ return makeRESTCall(&RESTRequest{Type: RequestTypeHost, Host: host}, http.MethodDelete)
}
diff --git a/pkg/metrics/clickhouse/rest_machinery.go b/pkg/metrics/clickhouse/rest_machinery.go
index 767a9ce7e..3fb59e1ca 100644
--- a/pkg/metrics/clickhouse/rest_machinery.go
+++ b/pkg/metrics/clickhouse/rest_machinery.go
@@ -18,25 +18,23 @@ import (
"bytes"
"encoding/json"
"fmt"
- "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
"io"
"net/http"
)
-func makeRESTCall(chi *metrics.WatchedCR, method string) error {
+func makeRESTCall(restReq *RESTRequest, method string) error {
url := "http://127.0.0.1:8888/chi"
- json, err := json.Marshal(chi)
+ payload, err := json.Marshal(restReq)
if err != nil {
return err
}
- req, err := http.NewRequest(method, url, bytes.NewBuffer(json))
+ httpReq, err := http.NewRequest(method, url, bytes.NewBuffer(payload))
if err != nil {
return err
}
- //req.SetBasicAuth(s.Username, s.Password)
- _, err = doRequest(req)
+ _, err = doRequest(httpReq)
return err
}
diff --git a/pkg/metrics/clickhouse/rest_server.go b/pkg/metrics/clickhouse/rest_server.go
index 12027507e..8fe7be886 100644
--- a/pkg/metrics/clickhouse/rest_server.go
+++ b/pkg/metrics/clickhouse/rest_server.go
@@ -15,33 +15,170 @@
package clickhouse
import (
+ "encoding/json"
"fmt"
"net/http"
"time"
log "github.com/golang/glog"
- // log "k8s.io/klog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+)
+
+// Request type constants
+const (
+ RequestTypeCR = "cr"
+ RequestTypeHost = "host"
)
-// StartMetricsREST start Prometheus metrics exporter in background
+// RESTRequest wraps different request types for POST/DELETE operations
+type RESTRequest struct {
+ Type string `json:"type"` // "cr" or "host"
+ CR *metrics.WatchedCR `json:"cr,omitempty"`
+ Host *HostRequest `json:"host,omitempty"`
+}
+
+// HostRequest contains host details with parent context
+type HostRequest struct {
+ CRNamespace string `json:"crNamespace"`
+ CRName string `json:"crName"`
+ ClusterName string `json:"clusterName"`
+ Host *metrics.WatchedHost `json:"host"`
+}
+
+// IsValid checks if HostRequest has all required fields
+func (r *HostRequest) IsValid() bool {
+ return r.CRNamespace != "" && r.CRName != "" && r.ClusterName != "" && r.Host != nil && r.Host.Hostname != ""
+}
+
+// RESTServer provides HTTP API for managing watched CRs and Hosts
+type RESTServer struct {
+ registry *CRRegistry
+}
+
+// NewRESTServer creates a new RESTServer instance
+func NewRESTServer(registry *CRRegistry) *RESTServer {
+ return &RESTServer{
+ registry: registry,
+ }
+}
+
+// ServeHTTP implements http.Handler interface
+func (s *RESTServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/chi" {
+ http.Error(w, "404 not found.", http.StatusNotFound)
+ return
+ }
+
+ switch r.Method {
+ case http.MethodGet:
+ s.handleGet(w, r)
+ case http.MethodPost:
+ s.handlePost(w, r)
+ case http.MethodDelete:
+ s.handleDelete(w, r)
+ default:
+ _, _ = fmt.Fprintf(w, "Sorry, only GET, POST and DELETE methods are supported.")
+ }
+}
+
+// handleGet serves HTTP GET request to get list of watched CRs
+func (s *RESTServer) handleGet(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ _ = json.NewEncoder(w).Encode(s.registry.List())
+}
+
+// handlePost serves HTTP POST request to add CR or Host
+func (s *RESTServer) handlePost(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ req, err := s.decodeRequest(r)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotAcceptable)
+ return
+ }
+
+ switch req.Type {
+ case RequestTypeCR:
+ s.registry.AddCR(req.CR)
+ case RequestTypeHost:
+ if err := s.registry.AddHost(req.Host); err != nil {
+ http.Error(w, err.Error(), http.StatusNotAcceptable)
+ return
+ }
+ default:
+ http.Error(w, fmt.Sprintf("unknown request type: %s", req.Type), http.StatusNotAcceptable)
+ }
+}
+
+// handleDelete serves HTTP DELETE request to delete CR or Host
+func (s *RESTServer) handleDelete(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ req, err := s.decodeRequest(r)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotAcceptable)
+ return
+ }
+
+ switch req.Type {
+ case RequestTypeCR:
+ s.registry.RemoveCR(req.CR)
+ case RequestTypeHost:
+ s.registry.RemoveHost(req.Host)
+ default:
+ http.Error(w, fmt.Sprintf("unknown request type: %s", req.Type), http.StatusNotAcceptable)
+ }
+}
+
+// decodeRequest decodes RESTRequest from the HTTP request body
+func (s *RESTServer) decodeRequest(r *http.Request) (*RESTRequest, error) {
+ req := &RESTRequest{}
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("unable to parse request: %w", err)
+ }
+
+ switch req.Type {
+ case RequestTypeCR:
+ if req.CR == nil || !req.CR.IsValid() {
+ return nil, fmt.Errorf("invalid CR in request")
+ }
+ case RequestTypeHost:
+ if req.Host == nil || !req.Host.IsValid() {
+ return nil, fmt.Errorf("invalid Host in request")
+ }
+ default:
+ return nil, fmt.Errorf("unknown request type: %s", req.Type)
+ }
+
+ return req, nil
+}
+
+// StartMetricsREST starts Prometheus metrics exporter and REST API server
func StartMetricsREST(
metricsAddress string,
metricsPath string,
collectorTimeout time.Duration,
-
chiListAddress string,
chiListPath string,
) *Exporter {
log.V(1).Infof("Starting metrics exporter at '%s%s'\n", metricsAddress, metricsPath)
- exporter := NewExporter(collectorTimeout)
+ // Create shared registry
+ registry := NewCRRegistry()
+
+ // Create and register Prometheus exporter
+ exporter := NewExporter(registry, collectorTimeout)
prometheus.MustRegister(exporter)
+ // Create REST server
+ restServer := NewRESTServer(registry)
+
+ // Setup HTTP handlers
http.Handle(metricsPath, promhttp.Handler())
- http.Handle(chiListPath, exporter)
+ http.Handle(chiListPath, restServer)
+ // Start HTTP servers
go http.ListenAndServe(metricsAddress, nil)
if metricsAddress != chiListAddress {
go http.ListenAndServe(chiListAddress, nil)
@@ -49,22 +186,3 @@ func StartMetricsREST(
return exporter
}
-
-// ServeHTTP is an interface method to serve HTTP requests
-func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path != "/chi" {
- http.Error(w, "404 not found.", http.StatusNotFound)
- return
- }
-
- switch r.Method {
- case "GET":
- e.getWatchedCHI(w, r)
- case "POST":
- e.updateWatchedCHI(w, r)
- case "DELETE":
- e.deleteWatchedCHI(w, r)
- default:
- _, _ = fmt.Fprintf(w, "Sorry, only GET, POST and DELETE methods are supported.")
- }
-}
diff --git a/pkg/model/chi/config/generator.go b/pkg/model/chi/config/generator.go
index 34dcd9b12..163d2a5fa 100644
--- a/pkg/model/chi/config/generator.go
+++ b/pkg/model/chi/config/generator.go
@@ -33,6 +33,9 @@ const (
// Pattern for string path used in XXX
DistributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl"
+ // Pattern for string path used in XXX
+ DistributedDDLReplicasPathPattern = "/clickhouse/%s/task_queue/replicas"
+
// Special auto-generated clusters. Each of these clusters lay over all replicas in CHI
// 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas.
// 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas.
@@ -178,6 +181,7 @@ func (c *Generator) getHostZookeeper(host *chi.Host) string {
// X
util.Iline(b, 4, "")
util.Iline(b, 4, " %s", c.getDistributedDDLPath())
+ util.Iline(b, 4, " %s", c.getDistributedDDLReplicasPath())
if c.opts.DistributedDDL.HasProfile() {
util.Iline(b, 4, " %s", c.opts.DistributedDDL.GetProfile())
}
@@ -545,6 +549,11 @@ func (c *Generator) getDistributedDDLPath() string {
return fmt.Sprintf(DistributedDDLPathPattern, c.cr.GetName())
}
+// getDistributedDDLReplicasPath returns string path used in XXX
+func (c *Generator) getDistributedDDLReplicasPath() string {
+ return fmt.Sprintf(DistributedDDLReplicasPathPattern, c.cr.GetName())
+}
+
// getRemoteServersReplicaHostname returns hostname (podhostname + service or FQDN) for "remote_servers.xml"
// based on .Spec.Defaults.ReplicasUseFQDN
func (c *Generator) getRemoteServersReplicaHostname(host *chi.Host) string {
diff --git a/pkg/model/chi/namer/patterns.go b/pkg/model/chi/namer/patterns.go
index 621398ff4..c9a74d5a3 100644
--- a/pkg/model/chi/namer/patterns.go
+++ b/pkg/model/chi/namer/patterns.go
@@ -62,8 +62,9 @@ const (
const (
// patternNamespaceDomain presents Domain Name pattern of a namespace
// In this pattern "%s" is substituted namespace name's value
- // Ex.: my-dev-namespace.svc.cluster.local
- patternNamespaceDomain = "%s.svc.cluster.local"
+ // Trailing dot forces absolute DNS lookup, avoiding slow search-suffix resolution with ndots:5
+ // Ex.: my-dev-namespace.svc.cluster.local.
+ patternNamespaceDomain = "%s.svc.cluster.local."
// ServiceName.domain.name
patternServiceFQDN = "%s" + "." + patternNamespaceDomain
diff --git a/pkg/model/chi/normalizer/const.go b/pkg/model/chi/normalizer/const.go
index 599967f18..82df80377 100644
--- a/pkg/model/chi/normalizer/const.go
+++ b/pkg/model/chi/normalizer/const.go
@@ -26,4 +26,10 @@ const (
// reconciled concurrently, if the number of shard reconciliation threads is greater than or equal to the number
// of shards in the cluster.
defaultReconcileShardsMaxConcurrencyPercent = 50
+
+ // defaultStatefulSetUpdateTimeout specifies the default timeout in seconds for StatefulSet update
+ defaultStatefulSetUpdateTimeout = 300
+
+ // defaultStatefulSetUpdatePollInterval specifies the default poll interval in seconds for StatefulSet update
+ defaultStatefulSetUpdatePollInterval = 5
)
diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go
index ec4d51709..37e3bd25b 100644
--- a/pkg/model/chi/normalizer/normalizer.go
+++ b/pkg/model/chi/normalizer/normalizer.go
@@ -155,6 +155,7 @@ func (n *Normalizer) normalizeSpec() {
n.req.GetTarget().GetSpecT().Stop = n.normalizeStop(n.req.GetTarget().GetSpecT().Stop)
n.req.GetTarget().GetSpecT().Restart = n.normalizeRestart(n.req.GetTarget().GetSpecT().Restart)
n.req.GetTarget().GetSpecT().Troubleshoot = n.normalizeTroubleshoot(n.req.GetTarget().GetSpecT().Troubleshoot)
+ n.req.GetTarget().GetSpecT().Suspend = n.normalizeSuspend(n.req.GetTarget().GetSpecT().Suspend)
n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern)
n.req.GetTarget().GetSpecT().Templating = n.normalizeTemplating(n.req.GetTarget().GetSpecT().Templating)
n.normalizeReconciling()
@@ -255,6 +256,17 @@ func (n *Normalizer) normalizeTroubleshoot(troubleshoot *types.StringBool) *type
return types.NewStringBool(false)
}
+// normalizeSuspend normalizes .spec.suspend
+func (n *Normalizer) normalizeSuspend(suspend *types.StringBool) *types.StringBool {
+ if suspend.IsValid() {
+ // It is bool, use as it is
+ return suspend
+ }
+
+ // In case it is unknown value - just use set it to false
+ return types.NewStringBool(false)
+}
+
func isNamespaceDomainPatternValid(namespaceDomainPattern *types.String) bool {
if strings.Count(namespaceDomainPattern.Value(), "%s") > 1 {
return false
@@ -411,6 +423,11 @@ func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiRec
reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime)
reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+ // StatefulSet
+ // Inherit from chop Config
+ reconcile.InheritStatefulSetFrom(chop.Config().Reconcile)
+ reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet)
+
// Host
// Inherit from chop Config
reconcile.InheritHostFrom(chop.Config().Reconcile.Host)
@@ -429,9 +446,34 @@ func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi
return runtime
}
+func (n *Normalizer) normalizeReconcileStatefulSet(sts chi.ReconcileStatefulSet) chi.ReconcileStatefulSet {
+ // Create
+ if sts.Create.OnFailure == "" {
+ sts.Create.OnFailure = chi.OnStatefulSetCreateFailureActionDelete
+ }
+ // Update
+ if sts.Update.Timeout == 0 {
+ sts.Update.Timeout = defaultStatefulSetUpdateTimeout
+ }
+ if sts.Update.PollInterval == 0 {
+ sts.Update.PollInterval = defaultStatefulSetUpdatePollInterval
+ }
+ if sts.Update.OnFailure == "" {
+ sts.Update.OnFailure = chi.OnStatefulSetUpdateFailureActionRollback
+ }
+ // Recreate
+ if sts.Recreate.OnDataLoss == "" {
+ sts.Recreate.OnDataLoss = chi.OnStatefulSetRecreateOnDataLossActionRecreate
+ }
+ if sts.Recreate.OnUpdateFailure == "" {
+ sts.Recreate.OnUpdateFailure = chi.OnStatefulSetRecreateOnUpdateFailureActionRecreate
+ }
+ return sts
+}
+
func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost {
// Normalize
- rh = rh.Normalize()
+ rh = rh.Normalize(types.NewStringBool(true), false)
return rh
}
@@ -993,8 +1035,18 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa
return clusterLayout
}
-func (n *Normalizer) normalizeClusterReconcile(reconcile chi.ClusterReconcile) chi.ClusterReconcile {
+func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) *chi.ClusterReconcile {
+ reconcile = reconcile.Ensure()
+
+ // Inherit from CHI-level reconcile settings (fill empty values only)
+ if chiReconcile := n.req.GetTarget().GetSpecT().Reconcile; chiReconcile != nil {
+ reconcile.Runtime = reconcile.Runtime.MergeFrom(chiReconcile.Runtime, chi.MergeTypeFillEmptyValues)
+ reconcile.StatefulSet = reconcile.StatefulSet.MergeFrom(chiReconcile.StatefulSet)
+ reconcile.Host = reconcile.Host.MergeFrom(chiReconcile.Host)
+ }
+
reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+ reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet)
reconcile.Host = n.normalizeReconcileHost(reconcile.Host)
return reconcile
}
diff --git a/pkg/model/chk/creator/probe.go b/pkg/model/chk/creator/probe.go
index 046af8eb8..ed61f0579 100644
--- a/pkg/model/chk/creator/probe.go
+++ b/pkg/model/chk/creator/probe.go
@@ -34,7 +34,7 @@ func NewProbeManager() *ProbeManager {
func (m *ProbeManager) CreateProbe(what interfaces.ProbeType, host *api.Host) *core.Probe {
switch what {
case interfaces.ProbeDefaultStartup:
- return nil
+ return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultLiveness:
return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultReadiness:
diff --git a/pkg/model/chk/namer/patterns.go b/pkg/model/chk/namer/patterns.go
index dc2bf682a..701e74864 100644
--- a/pkg/model/chk/namer/patterns.go
+++ b/pkg/model/chk/namer/patterns.go
@@ -62,8 +62,9 @@ const (
const (
// patternNamespaceDomain presents Domain Name pattern of a namespace
// In this pattern "%s" is substituted namespace name's value
- // Ex.: my-dev-namespace.svc.cluster.local
- patternNamespaceDomain = "%s.svc.cluster.local"
+ // Trailing dot forces absolute DNS lookup, avoiding slow search-suffix resolution with ndots:5
+ // Ex.: my-dev-namespace.svc.cluster.local.
+ patternNamespaceDomain = "%s.svc.cluster.local."
// ServiceName.domain.name
patternServiceFQDN = "%s" + "." + patternNamespaceDomain
diff --git a/pkg/model/chk/normalizer/const.go b/pkg/model/chk/normalizer/const.go
new file mode 100644
index 000000000..82df80377
--- /dev/null
+++ b/pkg/model/chk/normalizer/const.go
@@ -0,0 +1,35 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package normalizer
+
+const (
+ // defaultReconcileShardsThreadsNumber specifies the default number of threads usable for concurrent shard reconciliation
+ // within a single cluster reconciliation. Defaults to 1, which means strictly sequential shard reconciliation.
+ defaultReconcileShardsThreadsNumber = 1
+
+ // defaultReconcileShardsMaxConcurrencyPercent specifies the maximum integer percentage of shards that may be reconciled
+ // concurrently during cluster reconciliation. This counterbalances the fact that this is an operator setting,
+ // that different clusters will have different shard counts, and that the shard concurrency capacity is specified
+ // above in terms of a number of threads to use (up to). Example: overriding to 100 means all shards may be
+ // reconciled concurrently, if the number of shard reconciliation threads is greater than or equal to the number
+ // of shards in the cluster.
+ defaultReconcileShardsMaxConcurrencyPercent = 50
+
+ // defaultStatefulSetUpdateTimeout specifies the default timeout in seconds for StatefulSet update
+ defaultStatefulSetUpdateTimeout = 300
+
+ // defaultStatefulSetUpdatePollInterval specifies the default poll interval in seconds for StatefulSet update
+ defaultStatefulSetUpdatePollInterval = 5
+)
diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go
index 881b060d3..02314f3b8 100644
--- a/pkg/model/chk/normalizer/normalizer.go
+++ b/pkg/model/chk/normalizer/normalizer.go
@@ -143,6 +143,8 @@ func (n *Normalizer) normalizeTarget() (*chk.ClickHouseKeeperInstallation, error
func (n *Normalizer) normalizeSpec() {
// Walk over Spec datatype fields
n.req.GetTarget().GetSpecT().TaskID = n.normalizeTaskID(n.req.GetTarget().GetSpecT().TaskID)
+ n.req.GetTarget().GetSpecT().Stop = n.normalizeStop(n.req.GetTarget().GetSpecT().Stop)
+ n.req.GetTarget().GetSpecT().Suspend = n.normalizeSuspend(n.req.GetTarget().GetSpecT().Suspend)
n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern)
n.normalizeReconciling()
n.req.GetTarget().GetSpecT().Reconcile = n.normalizeReconcile(n.req.GetTarget().GetSpecT().Reconcile)
@@ -208,6 +210,28 @@ func (n *Normalizer) normalizeTaskID(taskID *types.Id) *types.Id {
return types.NewAutoId()
}
+// normalizeStop normalizes .spec.stop
+func (n *Normalizer) normalizeStop(stop *types.StringBool) *types.StringBool {
+ if stop.IsValid() {
+ // It is bool, use as it is
+ return stop
+ }
+
+ // In case it is unknown value - just use set it to false
+ return types.NewStringBool(false)
+}
+
+// normalizeSuspend normalizes .spec.suspend
+func (n *Normalizer) normalizeSuspend(suspend *types.StringBool) *types.StringBool {
+ if suspend.IsValid() {
+ // It is bool, use as it is
+ return suspend
+ }
+
+ // In case it is unknown value - just use set it to false
+ return types.NewStringBool(false)
+}
+
func isNamespaceDomainPatternValid(namespaceDomainPattern *types.String) bool {
if strings.Count(namespaceDomainPattern.Value(), "%s") > 1 {
return false
@@ -337,13 +361,73 @@ func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiRec
// No normalization yet
// Runtime
- // No normalization yet
+ // Inherit from chop Config
+ reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime)
+ reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+
+ // StatefulSet
+ // Inherit from chop Config
+ reconcile.InheritStatefulSetFrom(chop.Config().Reconcile)
+ reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet)
// Host
- // No normalization yet
+ // Inherit from chop Config
+ reconcile.InheritHostFrom(chop.Config().Reconcile.Host)
+ reconcile.Host = n.normalizeReconcileHost(reconcile.Host)
+
return reconcile
}
+func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi.ReconcileRuntime {
+ if runtime.ReconcileShardsThreadsNumber == 0 {
+ runtime.ReconcileShardsThreadsNumber = defaultReconcileShardsThreadsNumber
+ }
+ if runtime.ReconcileShardsMaxConcurrencyPercent == 0 {
+ runtime.ReconcileShardsMaxConcurrencyPercent = defaultReconcileShardsMaxConcurrencyPercent
+ }
+ return runtime
+}
+
+func (n *Normalizer) normalizeReconcileStatefulSet(sts chi.ReconcileStatefulSet) chi.ReconcileStatefulSet {
+ // Create
+ if sts.Create.OnFailure == "" {
+ sts.Create.OnFailure = chi.OnStatefulSetCreateFailureActionDelete
+ }
+ // Update
+ if sts.Update.Timeout == 0 {
+ sts.Update.Timeout = defaultStatefulSetUpdateTimeout
+ }
+ if sts.Update.PollInterval == 0 {
+ sts.Update.PollInterval = defaultStatefulSetUpdatePollInterval
+ }
+ if sts.Update.OnFailure == "" {
+ sts.Update.OnFailure = chi.OnStatefulSetUpdateFailureActionRollback
+ }
+ // Recreate
+ if sts.Recreate.OnDataLoss == "" {
+ sts.Recreate.OnDataLoss = chi.OnStatefulSetRecreateOnDataLossActionRecreate
+ }
+ if sts.Recreate.OnUpdateFailure == "" {
+ sts.Recreate.OnUpdateFailure = chi.OnStatefulSetRecreateOnUpdateFailureActionRecreate
+ }
+ return sts
+}
+
+func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost {
+ // Normalize
+ rh = rh.Normalize(types.NewStringBool(false), true)
+ // Enable startup probe wait so operator waits for each pod's Keeper process
+ // to start (ruok/imok) before proceeding to the next host.
+ // This prevents simultaneous pod restarts that would cause quorum loss.
+ // Readiness wait stays false to avoid deadlock on fresh clusters
+ // where Raft quorum doesn't exist yet.
+ if rh.Wait.Probes == nil {
+ rh.Wait.Probes = &chi.ReconcileHostWaitProbes{}
+ }
+ rh.Wait.Probes.Startup = types.NewStringBool(true)
+ return rh
+}
+
func (n *Normalizer) normalizeReconcileCleanup(cleanup *chi.Cleanup) *chi.Cleanup {
if cleanup == nil {
cleanup = chi.NewCleanup()
@@ -545,6 +629,8 @@ func (n *Normalizer) normalizeClusterStage1(cluster *chk.Cluster) *chk.Cluster {
func (n *Normalizer) normalizeClusterStage2(cluster *chk.Cluster) *chk.Cluster {
// Inherit from .spec.configuration.files
cluster.InheritFilesFrom(n.req.GetTarget())
+ // Inherit from .spec.reconciling
+ cluster.InheritClusterReconcileFrom(n.req.GetTarget())
// Inherit from .spec.defaults
cluster.InheritTemplatesFrom(n.req.GetTarget())
@@ -553,6 +639,7 @@ func (n *Normalizer) normalizeClusterStage2(cluster *chk.Cluster) *chk.Cluster {
cluster.PDBManaged = n.normalizePDBManaged(cluster.PDBManaged)
cluster.PDBMaxUnavailable = n.normalizePDBMaxUnavailable(cluster.PDBMaxUnavailable)
+ cluster.Reconcile = n.normalizeClusterReconcile(cluster.Reconcile)
n.appendClusterSecretEnvVar(cluster)
@@ -668,6 +755,15 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa
return clusterLayout
}
+func (n *Normalizer) normalizeClusterReconcile(reconcile *chi.ClusterReconcile) *chi.ClusterReconcile {
+ reconcile = reconcile.Ensure()
+
+ reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+ reconcile.StatefulSet = n.normalizeReconcileStatefulSet(reconcile.StatefulSet)
+ reconcile.Host = n.normalizeReconcileHost(reconcile.Host)
+ return reconcile
+}
+
// ensureClusterLayoutShards ensures slice layout.Shards is in place
func (n *Normalizer) ensureClusterLayoutShards(layout *chk.ChkClusterLayout) {
// Disposition of shards in slice would be
@@ -709,7 +805,7 @@ func (n *Normalizer) normalizeShardStage2(shard *chk.ChkShard, cluster *chk.Clus
shard.Files = n.normalizeConfigurationFiles(shard.Files)
shard.InheritTemplatesFrom(cluster)
// Internal replication uses ReplicasCount thus it has to be normalized after shard ReplicaCount normalized
- //n.normalizeShardInternalReplication(shard)
+ n.normalizeShardInternalReplication(shard)
}
// normalizeReplicaStage1 normalizes a replica - walks over all fields
@@ -825,3 +921,14 @@ func (n *Normalizer) normalizeReplicaHosts(replica *chk.ChkReplica, cluster *chk
replica.Hosts = append(replica.Hosts, host)
}
}
+
+// normalizeShardInternalReplication ensures reasonable values in
+// .spec.configuration.clusters.layout.shards.internalReplication
+func (n *Normalizer) normalizeShardInternalReplication(shard *chk.ChkShard) {
+ // Shards with replicas are expected to have internal replication on by default
+ //defaultInternalReplication := false
+ //if shard.ReplicasCount > 1 {
+ // defaultInternalReplication = true
+ //}
+ //shard.InternalReplication = shard.InternalReplication.Normalize(defaultInternalReplication)
+}
diff --git a/pkg/util/dump.go b/pkg/util/dump.go
index 38de501e7..2125e2603 100644
--- a/pkg/util/dump.go
+++ b/pkg/util/dump.go
@@ -15,6 +15,8 @@
package util
import (
+ "reflect"
+
dumper "github.com/sanity-io/litter"
)
@@ -27,14 +29,18 @@ func Dump(obj interface{}) (out string) {
}()
d := dumper.Options{
- Separator: " ",
- StrictGo: true,
- //Compact :true,
- //StripPackageNames :true,
+ Separator: " ",
+ StrictGo: false,
+ Compact: true,
+ StripPackageNames: true,
//HidePrivateFields: true,
//FieldExclusions: regexp.MustCompile(`^(XXX_.*)$`), // XXX_ is a prefix of fields generated by protoc-gen-go
//HideZeroValues :true,
//DisablePointerReplacement : true,
+ // Skip fields tagged testdiff:"ignore" — consistent with messagediff behavior
+ FieldFilter: func(f reflect.StructField, _ reflect.Value) bool {
+ return f.Tag.Get("testdiff") != "ignore"
+ },
}
return d.Sdump(obj)
}
diff --git a/pkg/util/messagediff.go b/pkg/util/messagediff.go
index dec3b87d6..7c0374349 100644
--- a/pkg/util/messagediff.go
+++ b/pkg/util/messagediff.go
@@ -64,14 +64,12 @@ func PrintPath(path *messagediff.Path, defaultPath string) (res string) {
}
func PrintTrimmedValue(value any) string {
- valueFull := fmt.Sprintf("%s", Dump(value))
- ln := len(valueFull)
- if (0 < ln) && (ln < 300) {
- return valueFull
- } else {
- valueShort := fmt.Sprintf("%+v", value)
- return valueShort
+ const maxLen = 256
+ str := Dump(value)
+ if len(str) <= maxLen {
+ return str
}
+ return str[:maxLen] + "...(value truncated for brevity)"
}
// MessageDiffItemString stringifies one map[*messagediff.Path]interface{} item
diff --git a/release b/release
index 3f44db947..4e8f395fa 100644
--- a/release
+++ b/release
@@ -1 +1 @@
-0.25.6
+0.26.0
diff --git a/releases b/releases
index 76765ebc9..6c88ef957 100644
--- a/releases
+++ b/releases
@@ -1,3 +1,4 @@
+0.25.6
0.25.5
0.25.4
0.25.3
diff --git a/tests/e2e/kubectl.py b/tests/e2e/kubectl.py
index 3906bed34..4cde2cd89 100644
--- a/tests/e2e/kubectl.py
+++ b/tests/e2e/kubectl.py
@@ -7,7 +7,7 @@
from testflows.asserts import error
# from testflows.connect import Shell
-# import e2e.settings as settings
+import e2e.settings as settings
import e2e.yaml_manifest as yaml_manifest
import e2e.util as util
@@ -73,6 +73,9 @@ def delete_kind(kind, name, ns=None, ok_to_fail=False, shell=None):
def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False, shell=None):
+ if settings.no_cleanup:
+ print(f"NO_CLEANUP is set, skipping delete_chi: {chi}")
+ return
delete_kind("chi", chi, ns=ns, ok_to_fail=ok_to_fail, shell=shell)
if wait:
wait_objects(
@@ -98,6 +101,9 @@ def delete_chi(chi, ns=None, wait=True, ok_undeleted = False, ok_to_fail=False,
def delete_chk(chk, ns=None, wait=True, ok_to_fail=False, shell=None):
+ if settings.no_cleanup:
+ print(f"NO_CLEANUP is set, skipping delete_chk: {chk}")
+ return
delete_kind("chk", chk, ns=ns, ok_to_fail=ok_to_fail, shell=shell)
@@ -583,6 +589,12 @@ def get_pod_ports(chi_name, pod_name="", ns=None, shell=None):
ports.append(p["containerPort"])
return ports
+def get_operator_pod(ns=None, shell=None):
+ out = launch(f"get pod -l app=clickhouse-operator -o=custom-columns=field:.metadata.name", ns=ns, ok_to_fail=True, shell=shell).splitlines()
+ if len(out) > 1:
+ return out[1]
+ else:
+ return ""
def check_pod_ports(chi_name, ports, ns=None, shell=None):
pod_ports = get_pod_ports(chi_name, ns=ns, shell=shell)
diff --git a/tests/e2e/manifests/chi/test-003-complex-layout.yaml b/tests/e2e/manifests/chi/test-003-complex-layout.yaml
index 590c36258..dfcfcfb62 100644
--- a/tests/e2e/manifests/chi/test-003-complex-layout.yaml
+++ b/tests/e2e/manifests/chi/test-003-complex-layout.yaml
@@ -15,10 +15,10 @@ spec:
replicas:
- name: replica0-0
settings:
- display_name: replica0
+ default_replica_name: myreplica0
- name: replica0-1
settings:
- display_name: replica1
+ default_replica_name: myreplica1
- name: shard1
replicas:
- name: replica1-0
diff --git a/tests/e2e/manifests/chi/test-042-abort-1.yaml b/tests/e2e/manifests/chi/test-042-abort-1.yaml
new file mode 100644
index 000000000..780b12d30
--- /dev/null
+++ b/tests/e2e/manifests/chi/test-042-abort-1.yaml
@@ -0,0 +1,31 @@
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: test-010042-2
+spec:
+ reconcile:
+ statefulSet:
+ recreate:
+ onUpdateFailure: abort
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: clickhouse/clickhouse-server:24.8
+ volumeClaimTemplates:
+ - name: data-volume-claim
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Mi
+ defaults:
+ templates:
+ podTemplate: default
+ dataVolumeClaimTemplate: data-volume-claim
+ configuration:
+ clusters:
+ - name: default
diff --git a/tests/e2e/manifests/chi/test-042-abort-2.yaml b/tests/e2e/manifests/chi/test-042-abort-2.yaml
new file mode 100644
index 000000000..fec9dbee7
--- /dev/null
+++ b/tests/e2e/manifests/chi/test-042-abort-2.yaml
@@ -0,0 +1,31 @@
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: test-010042-2
+spec:
+ reconcile:
+ statefulSet:
+ recreate:
+ onUpdateFailure: abort
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: clickhouse/clickhouse-server:25.3
+ volumeClaimTemplates:
+ - name: data-volume-claim
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Mi
+ defaults:
+ templates:
+ podTemplate: default
+ dataVolumeClaimTemplate: data-volume-claim
+ configuration:
+ clusters:
+ - name: default
diff --git a/tests/e2e/manifests/chi/test-042-abort-3.yaml b/tests/e2e/manifests/chi/test-042-abort-3.yaml
new file mode 100644
index 000000000..9352411c8
--- /dev/null
+++ b/tests/e2e/manifests/chi/test-042-abort-3.yaml
@@ -0,0 +1,31 @@
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: test-010042-2
+spec:
+ reconcile:
+ statefulSet:
+ recreate:
+ onUpdateFailure: abort
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: clickhouse/clickhouse-server:25.8
+ volumeClaimTemplates:
+ - name: data-volume-claim
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Mi
+ defaults:
+ templates:
+ podTemplate: default
+ dataVolumeClaimTemplate: data-volume-claim
+ configuration:
+ clusters:
+ - name: default
diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml
index f741412a9..84f7fce5c 100644
--- a/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml
+++ b/tests/e2e/manifests/chit/tpl-clickhouse-stable.yaml
@@ -13,6 +13,6 @@ spec:
spec:
containers:
- name: clickhouse-pod
- image: altinity/clickhouse-server:25.3.6.10034.altinitystable
+ image: altinity/clickhouse-server:25.8.16.10001.altinitystable
imagePullPolicy: IfNotPresent
diff --git a/tests/e2e/manifests/chk/test-020000-chk-sa.yaml b/tests/e2e/manifests/chk/test-020000-chk-sa.yaml
new file mode 100644
index 000000000..8db86ffa6
--- /dev/null
+++ b/tests/e2e/manifests/chk/test-020000-chk-sa.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: test-020000-chk-sa
diff --git a/tests/e2e/manifests/chk/test-020000-chk.yaml b/tests/e2e/manifests/chk/test-020000-chk.yaml
index fd3d413ad..ba6c00750 100644
--- a/tests/e2e/manifests/chk/test-020000-chk.yaml
+++ b/tests/e2e/manifests/chk/test-020000-chk.yaml
@@ -25,6 +25,7 @@ spec:
- name: clickhouse-keeper
imagePullPolicy: IfNotPresent
image: "clickhouse/clickhouse-keeper:25.3"
+ serviceAccountName: test-020000-chk-sa
volumeClaimTemplates:
- name: default
spec:
diff --git a/tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml b/tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml
similarity index 71%
rename from tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml
rename to tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml
index 59ae8e336..e26252b3b 100644
--- a/tests/e2e/manifests/chi/test-049-clickhouse-keeper-upgrade.yaml
+++ b/tests/e2e/manifests/chk/test-020003-chi-chk-upgrade.yaml
@@ -1,17 +1,16 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
- name: test-049-clickhouse-keeper-upgrade
+ name: test-020003-chi-chk-upgrade
spec:
useTemplates:
- name: clickhouse-version
configuration:
zookeeper:
nodes:
- - host: keeper-clickhouse-keeper
+ - host: keeper-test-020003-chk
port: 2181
clusters:
- name: default
layout:
- shardsCount: 1
replicasCount: 2
\ No newline at end of file
diff --git a/tests/e2e/manifests/chk/test-020003-chk-2.yaml b/tests/e2e/manifests/chk/test-020003-chk-2.yaml
new file mode 100644
index 000000000..e667080ce
--- /dev/null
+++ b/tests/e2e/manifests/chk/test-020003-chk-2.yaml
@@ -0,0 +1,20 @@
+apiVersion: "clickhouse-keeper.altinity.com/v1"
+kind: "ClickHouseKeeperInstallation"
+metadata:
+ name: test-020003-chk
+spec:
+ defaults:
+ templates:
+ podTemplate: default
+ configuration:
+ clusters:
+ - name: keeper
+ layout:
+ replicasCount: 3
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ image: "clickhouse/clickhouse-keeper:25.8"
diff --git a/tests/e2e/manifests/chk/test-020003-chk.yaml b/tests/e2e/manifests/chk/test-020003-chk.yaml
new file mode 100644
index 000000000..000ce9736
--- /dev/null
+++ b/tests/e2e/manifests/chk/test-020003-chk.yaml
@@ -0,0 +1,20 @@
+apiVersion: "clickhouse-keeper.altinity.com/v1"
+kind: "ClickHouseKeeperInstallation"
+metadata:
+ name: test-020003-chk
+spec:
+ defaults:
+ templates:
+ podTemplate: default
+ configuration:
+ clusters:
+ - name: keeper
+ layout:
+ replicasCount: 3
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ image: "clickhouse/clickhouse-keeper:25.3"
diff --git a/tests/e2e/manifests/chk/test-020008-chi-fips.yaml b/tests/e2e/manifests/chk/test-020008-chi-fips.yaml
new file mode 100644
index 000000000..27cc1a7b2
--- /dev/null
+++ b/tests/e2e/manifests/chk/test-020008-chi-fips.yaml
@@ -0,0 +1,24 @@
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: test-020008-chi-fips
+spec:
+ defaults:
+ templates:
+ podTemplate: fips
+ configuration:
+ zookeeper:
+ nodes:
+ - host: keeper-test-020008-chk-fips
+ port: 2181
+ clusters:
+ - name: default
+ layout:
+ replicasCount: 2
+ templates:
+ podTemplates:
+ - name: fips
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: altinity/clickhouse-server:24.3.5.48.altinityfips
\ No newline at end of file
diff --git a/tests/e2e/manifests/chk/test-020008-chk-fips.yaml b/tests/e2e/manifests/chk/test-020008-chk-fips.yaml
new file mode 100644
index 000000000..7851ee61f
--- /dev/null
+++ b/tests/e2e/manifests/chk/test-020008-chk-fips.yaml
@@ -0,0 +1,20 @@
+apiVersion: "clickhouse-keeper.altinity.com/v1"
+kind: "ClickHouseKeeperInstallation"
+metadata:
+ name: test-020008-chk-fips
+spec:
+ defaults:
+ templates:
+ podTemplate: fips
+ configuration:
+ clusters:
+ - name: keeper
+ layout:
+ replicasCount: 1
+ templates:
+ podTemplates:
+ - name: fips
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ image: altinity/clickhouse-keeper:24.3.5.48.altinityfips
diff --git a/tests/e2e/run_tests_keeper.sh b/tests/e2e/run_tests_keeper.sh
index 68318a3ff..3597b30bb 100755
--- a/tests/e2e/run_tests_keeper.sh
+++ b/tests/e2e/run_tests_keeper.sh
@@ -1,10 +1,18 @@
#!/bin/bash
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-pip3 install -r "$CUR_DIR/../image/requirements.txt"
+source "${CUR_DIR}/test_common.sh"
-export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}"
-export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
-export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
-ONLY="${ONLY:-"*"}"
-python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_keeper/${ONLY}" --native
+common_install_pip_requirements
+common_export_test_env
+
+RUN_ALL_FLAG=$(common_convert_run_all)
+
+python3 "${COMMON_DIR}/../regression.py" \
+ --only="/regression/e2e.test_keeper/${ONLY}" \
+ ${RUN_ALL_FLAG} \
+ -o short \
+ --trim-results on \
+ --debug \
+ --native
diff --git a/tests/e2e/run_tests_keeper_local.sh b/tests/e2e/run_tests_keeper_local.sh
new file mode 100755
index 000000000..ac41f284d
--- /dev/null
+++ b/tests/e2e/run_tests_keeper_local.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+source "${CUR_DIR}/test_common.sh"
+
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}"
+
+common_minikube_reset
+common_preload_images "${PRELOAD_IMAGES_KEEPER[@]}"
+common_build_and_load_images && \
+common_run_test_script "run_tests_keeper.sh"
diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh
index 7a98760a1..a37a24413 100755
--- a/tests/e2e/run_tests_local.sh
+++ b/tests/e2e/run_tests_local.sh
@@ -1,165 +1,118 @@
#!/bin/bash
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+source "${CUR_DIR}/test_common.sh"
-OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}"
-OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}"
-OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}"
-METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}"
-METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}"
-IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}"
-OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}"
-OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
-ONLY="${ONLY:-"*"}"
-MINIKUBE_RESET="${MINIKUBE_RESET:-""}"
-VERBOSITY="${VERBOSITY:-"2"}"
-# We may want run all tests to the end ignoring failed tests in the process
-RUN_ALL="${RUN_ALL:-""}"
-
-# Possible options are:
-# 1. operator
-# 2. keeper
-# 3. metrics
+# Test component select options:
+# - operator
+# - keeper
+# - metrics
+# Can be set via env var for non-interactive use: WHAT=metrics ./run_tests_local.sh
WHAT="${WHAT}"
-# Possible options are:
-# 1. replace
-# 2. apply
-KUBECTL_MODE="${KUBECTL_MODE:-"apply"}"
+# Repeat mode options:
+# - success = repeat until success
+# - failure = repeat until failure
+# - not specified / empty = single run
+# Usage: REPEAT_UNTIL=success ./run_tests_local.sh
+REPEAT_UNTIL="${REPEAT_UNTIL:-""}"
#
-#
+# Interactive menu (or non-interactive if WHAT is already set)
#
function select_test_goal() {
local specified_goal="${1}"
- if [[ ! -z "${specified_goal}" ]]; then
- echo "Having specified explicitly: ${specified_goal}"
- return 0
- else
- echo "What would you like to start? Possible options:"
- echo " 1 - test operator"
- echo " 2 - test keeper"
- echo " 3 - test metrics"
- echo -n "Enter your choice (1, 2, 3): "
- read COMMAND
- # Trim EOL from the command received
- COMMAND=$(echo "${COMMAND}" | tr -d '\n\t\r ')
- case "${COMMAND}" in
- "1")
- echo "picking operator"
- return 1
- ;;
- "2")
- echo "piking keeper"
- return 2
- ;;
- "3")
- echo "picking metrics"
- return 3
- ;;
- *)
- echo "don't know what '${COMMAND}' is, so picking operator"
- return 1
- ;;
- esac
+ if [[ -n "${specified_goal}" ]]; then
+ echo "Having specified explicitly: ${specified_goal}" >&2
+ echo "${specified_goal}"
+ return
fi
-}
-#
-#
-#
-function goal_name() {
- local goal_code=${1}
- case "${goal_code}" in
- "0")
- echo "${WHAT}"
- ;;
- "1")
- echo "operator"
- ;;
- "2")
- echo "keeper"
- ;;
- "3")
- echo "metrics"
- ;;
+ echo "What would you like to start? Possible options:" >&2
+ echo " 1 - test operator" >&2
+ echo " 2 - test keeper" >&2
+ echo " 3 - test metrics" >&2
+ echo -n "Enter your choice (1, 2, 3): " >&2
+ read COMMAND
+ COMMAND=$(echo "${COMMAND}" | tr -d '\n\t\r ')
+ case "${COMMAND}" in
+ "1") echo "operator" ;;
+ "2") echo "keeper" ;;
+ "3") echo "metrics" ;;
*)
+ echo "don't know what '${COMMAND}' is, so picking operator" >&2
echo "operator"
;;
esac
}
-select_test_goal "${WHAT}"
-WHAT=$(goal_name $?)
+WHAT=$(select_test_goal "${WHAT}")
-echo "Provided command is: ${WHAT}"
-echo -n "Which means we are going to "
+# Map test goal to dedicated local script
case "${WHAT}" in
"operator")
- DEFAULT_EXECUTABLE="run_tests_operator.sh"
- echo "test OPERATOR"
+ LOCAL_SCRIPT="run_tests_operator_local.sh"
+ echo "Selected: test OPERATOR"
;;
"keeper")
- DEFAULT_EXECUTABLE="run_tests_keeper.sh"
- echo "test KEEPER"
+ LOCAL_SCRIPT="run_tests_keeper_local.sh"
+ echo "Selected: test KEEPER"
;;
"metrics")
- DEFAULT_EXECUTABLE="run_tests_metrics.sh"
- echo "test METRICS"
+ LOCAL_SCRIPT="run_tests_metrics_local.sh"
+ echo "Selected: test METRICS"
;;
*)
- echo "exit because I do not know what '${WHAT}' is"
+ echo "Unknown test type: '${WHAT}', exiting"
exit 1
;;
esac
-TIMEOUT=30
-echo "Press to start test immediately (if you agree with specified options)"
-echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically"
-read -t ${TIMEOUT}
-
-EXECUTABLE="${EXECUTABLE:-"${DEFAULT_EXECUTABLE}"}"
-MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}"
-
-if [[ ! -z "${MINIKUBE_RESET}" ]]; then
- SKIP_K9S="yes" ./run_minikube_reset.sh
+# Only wait for confirmation when running interactively (stdin is a terminal)
+if [ -t 0 ]; then
+ TIMEOUT=30
+ echo "Press to start test immediately (if you agree with specified options)"
+ echo "In case no input provided tests would start in ${TIMEOUT} seconds automatically"
+ read -t ${TIMEOUT}
fi
-if [[ ! -z "${MINIKUBE_PRELOAD_IMAGES}" ]]; then
- echo "pre-load images into minikube"
- IMAGES="
- clickhouse/clickhouse-server:23.3
- clickhouse/clickhouse-server:23.8
- clickhouse/clickhouse-server:24.3
- clickhouse/clickhouse-server:24.8
- clickhouse/clickhouse-server:25.3
- clickhouse/clickhouse-server:latest
- altinity/clickhouse-server:24.8.14.10459.altinitystable
- docker.io/zookeeper:3.8.4
- "
- for image in ${IMAGES}; do
- docker pull -q ${image} && \
- echo "pushing to minikube" && \
- minikube image load ${image} --overwrite=false --daemon=true
- done
- echo "images pre-loaded"
-fi
-
-#
-# Build images and run tests
-#
-echo "Build" && \
-VERBOSITY="${VERBOSITY}" ${CUR_DIR}/../../dev/image_build_all_dev.sh && \
-echo "Load images" && \
-minikube image load "${OPERATOR_IMAGE}" && \
-minikube image load "${METRICS_EXPORTER_IMAGE}" && \
-echo "Images prepared" && \
-OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \
-METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \
-OPERATOR_VERSION="${OPERATOR_VERSION}" \
-IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \
-OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \
-OPERATOR_INSTALL="${OPERATOR_INSTALL}" \
-ONLY="${ONLY}" \
-KUBECTL_MODE="${KUBECTL_MODE}" \
-RUN_ALL="${RUN_ALL}" \
-"${CUR_DIR}/${EXECUTABLE}"
+# Dispatch to the dedicated local script, with optional repeat mode
+case "${REPEAT_UNTIL}" in
+ "success")
+ # Repeat until tests pass
+ start=$(date)
+ run=1
+ echo "start run ${run}"
+ until "${CUR_DIR}/${LOCAL_SCRIPT}"; do
+ echo "run number ${run} failed"
+ echo "-------------------------------------------"
+ run=$((run+1))
+ echo "start run ${run}"
+ done
+ end=$(date)
+ echo "============================================="
+ echo "Run number ${run} succeeded"
+ echo "start time: ${start}"
+ echo "end time: ${end}"
+ ;;
+ "failure")
+ # Repeat until tests fail
+ start=$(date)
+ run=1
+ echo "start run ${run}"
+ while "${CUR_DIR}/${LOCAL_SCRIPT}"; do
+ echo "run number ${run} completed successfully"
+ echo "-------------------------------------------"
+ run=$((run+1))
+ echo "start run ${run}"
+ done
+ end=$(date)
+ echo "============================================="
+ echo "Run number ${run} failed"
+ echo "start time: ${start}"
+ echo "end time: ${end}"
+ ;;
+ *)
+ # Single run
+ "${CUR_DIR}/${LOCAL_SCRIPT}"
+ ;;
+esac
diff --git a/tests/e2e/run_tests_local_apply.sh b/tests/e2e/run_tests_local_apply.sh
deleted file mode 100755
index 9860aff25..000000000
--- a/tests/e2e/run_tests_local_apply.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-
-KUBECTL_MODE="apply" \
-"${CUR_DIR}/run_tests_local.sh"
diff --git a/tests/e2e/run_tests_local_replace.sh b/tests/e2e/run_tests_local_replace.sh
deleted file mode 100755
index cb109aad3..000000000
--- a/tests/e2e/run_tests_local_replace.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-
-KUBECTL_MODE="replace" \
-"${CUR_DIR}/run_tests_local.sh"
diff --git a/tests/e2e/run_tests_local_until_failed.sh b/tests/e2e/run_tests_local_until_failed.sh
deleted file mode 100755
index 75fae2ef3..000000000
--- a/tests/e2e/run_tests_local_until_failed.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-function start_run {
- local run_to_start=${1}
- echo "start run ${run_to_start}"
-}
-
-start=$(date)
-run=1
-start_run ${run}
-while ./run_tests_local.sh; do
- echo "run number ${run} completed successfully"
- echo "-------------------------------------------"
- echo "-------------------------------------------"
- echo "-------------------------------------------"
-
- run=$((run+1))
- start_run ${run}
-done
-end=$(date)
-
-echo "============================================="
-echo "Run number ${run} failed"
-echo "start time: ${start}"
-echo "end time: ${end}"
diff --git a/tests/e2e/run_tests_local_until_success.sh b/tests/e2e/run_tests_local_until_success.sh
deleted file mode 100755
index 5b1bd31d3..000000000
--- a/tests/e2e/run_tests_local_until_success.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-function start_run {
- local run_to_start=${1}
- echo "start run ${run_to_start}"
-}
-
-start=$(date)
-run=1
-start_run ${run}
-until ./run_tests_local.sh; do
- echo "run number ${run} failed"
- echo "-------------------------------------------"
- echo "-------------------------------------------"
- echo "-------------------------------------------"
-
- run=$((run+1))
- start_run ${run}
-done
-end=$(date)
-
-echo "============================================="
-echo "Run number ${run} succeeded"
-echo "start time: ${start}"
-echo "end time: ${end}"
diff --git a/tests/e2e/run_tests_metrics.sh b/tests/e2e/run_tests_metrics.sh
index 127cec49c..71a07234a 100755
--- a/tests/e2e/run_tests_metrics.sh
+++ b/tests/e2e/run_tests_metrics.sh
@@ -1,10 +1,19 @@
#!/bin/bash
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-pip3 install -r "$CUR_DIR/../image/requirements.txt"
+source "${CUR_DIR}/test_common.sh"
-export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}"
-export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
-export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
-ONLY="${ONLY:-"*"}"
-python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" --native
+common_install_pip_requirements
+common_export_test_env
+
+RUN_ALL_FLAG=$(common_convert_run_all)
+
+python3 "${COMMON_DIR}/../regression.py" \
+ --only="/regression/e2e.test_metrics_exporter/${ONLY}" \
+ ${RUN_ALL_FLAG} \
+ --parallel off \
+ -o short \
+ --trim-results on \
+ --debug \
+ --native
diff --git a/tests/e2e/run_tests_metrics_local.sh b/tests/e2e/run_tests_metrics_local.sh
new file mode 100755
index 000000000..6cc08fbc4
--- /dev/null
+++ b/tests/e2e/run_tests_metrics_local.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+source "${CUR_DIR}/test_common.sh"
+
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}"
+
+common_minikube_reset
+common_preload_images "${PRELOAD_IMAGES_METRICS[@]}"
+common_build_and_load_images && \
+common_run_test_script "run_tests_metrics.sh"
diff --git a/tests/e2e/run_tests_operator.sh b/tests/e2e/run_tests_operator.sh
index 2af4c1354..a7a8d9bfc 100755
--- a/tests/e2e/run_tests_operator.sh
+++ b/tests/e2e/run_tests_operator.sh
@@ -1,23 +1,18 @@
#!/bin/bash
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-pip3 install -r "$CUR_DIR/../image/requirements.txt"
+source "${CUR_DIR}/test_common.sh"
-export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}"
-export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
-export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
-RUN_ALL="${RUN_ALL:-""}"
-ONLY="${ONLY:-"*"}"
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
-# We may want run all tests to the end ignoring failed tests in the process
-if [[ ! -z "${RUN_ALL}" ]]; then
- RUN_ALL="--test-to-end"
-fi
+common_install_pip_requirements
+common_export_test_env
-python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" ${RUN_ALL} -o short --trim-results on --debug --native
-#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --test-to-end -o short --trim-results on --debug --native
-#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native
-#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* -o short --trim-results on --debug --native --native
-#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* --trim-results on --debug --native --native
-#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native --native
-#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native -o short --native
-#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/*32* --trim-results on --debug --native -o short --native
+RUN_ALL_FLAG=$(common_convert_run_all)
+
+python3 "${COMMON_DIR}/../regression.py" \
+ --only="/regression/e2e.test_operator/${ONLY}" \
+ ${RUN_ALL_FLAG} \
+ -o short \
+ --trim-results on \
+ --debug \
+ --native
diff --git a/tests/e2e/run_tests_operator_local.sh b/tests/e2e/run_tests_operator_local.sh
new file mode 100755
index 000000000..45a00dca3
--- /dev/null
+++ b/tests/e2e/run_tests_operator_local.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+source "${CUR_DIR}/test_common.sh"
+
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"IfNotPresent"}"
+
+common_minikube_reset
+common_preload_images "${PRELOAD_IMAGES_OPERATOR[@]}"
+common_build_and_load_images && \
+common_run_test_script "run_tests_operator.sh"
diff --git a/tests/e2e/run_tests_parallel.sh b/tests/e2e/run_tests_parallel.sh
index cebc337cf..90c50239f 100755
--- a/tests/e2e/run_tests_parallel.sh
+++ b/tests/e2e/run_tests_parallel.sh
@@ -1,14 +1,17 @@
#!/bin/bash
set -e
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
-pip3 install -r "$CUR_DIR/../image/requirements.txt"
+source "${CUR_DIR}/test_common.sh"
+
+IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
+
+common_install_pip_requirements
+common_export_test_env
+
rm -rfv /tmp/test*.log
pad="000"
MAX_PARALLEL=${MAX_PARALLEL:-5}
-export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}"
-export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
-
function run_test_parallel() {
test_names=("$@")
run_test_cmd=""
diff --git a/tests/e2e/settings.py b/tests/e2e/settings.py
index e3569edea..c1aa19bfe 100644
--- a/tests/e2e/settings.py
+++ b/tests/e2e/settings.py
@@ -80,4 +80,6 @@ def get_docker_compose_path():
minio_version = "latest"
+no_cleanup = os.environ.get("NO_CLEANUP", "").lower() in ("1", "true", "yes")
+
step_by_step = True if "STEP" in os.environ else False
diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py
index 22881fa1d..7e4ea4ed0 100644
--- a/tests/e2e/steps.py
+++ b/tests/e2e/steps.py
@@ -13,6 +13,7 @@
from testflows.asserts import error
import e2e.kubectl as kubectl
+import e2e.settings as settings
@TestStep(Given)
@@ -45,6 +46,9 @@ def create_test_namespace(self, force=False):
@TestStep(Finally)
def delete_test_namespace(self):
+ if settings.no_cleanup:
+ print(f"NO_CLEANUP is set, skipping namespace deletion: {self.context.test_namespace}")
+ return
shell = get_shell()
self.context.shell = shell
util.delete_namespace(namespace=self.context.test_namespace, delete_chi=True)
@@ -167,7 +171,7 @@ def check_metrics_monitoring(
ns=operator_namespace,
)
if expect_metric != "":
- lines = [m for m in out.splitlines() if m.startswith(expect_metric)]
+ lines = [m for m in out.splitlines() if m.startswith(expect_metric) and expect_labels in m]
if len(lines) > 0:
metric = lines[0]
print(f"have: {metric}")
diff --git a/tests/e2e/test_common.sh b/tests/e2e/test_common.sh
new file mode 100755
index 000000000..6f52d2abf
--- /dev/null
+++ b/tests/e2e/test_common.sh
@@ -0,0 +1,139 @@
+#!/bin/bash
+
+# Common library for test scripts. Source this file, do not execute it.
+# Usage: source "${CUR_DIR}/test_common.sh"
+
+COMMON_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+
+# =============================================================================
+# Variable defaults (all overridable via environment)
+# =============================================================================
+
+# Operator versioning
+OPERATOR_VERSION="${OPERATOR_VERSION:-"dev"}"
+OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO:-"altinity/clickhouse-operator"}"
+OPERATOR_IMAGE="${OPERATOR_IMAGE:-"${OPERATOR_DOCKER_REPO}:${OPERATOR_VERSION}"}"
+METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO:-"altinity/metrics-exporter"}"
+METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-"${METRICS_EXPORTER_DOCKER_REPO}:${OPERATOR_VERSION}"}"
+
+# NOTE: IMAGE_PULL_POLICY is intentionally NOT set here.
+# Test runners default to "Always" (CI), local scripts default to "IfNotPresent" (minikube).
+
+# Test execution
+OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-"test"}"
+OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}"
+ONLY="${ONLY:-"*"}"
+VERBOSITY="${VERBOSITY:-"2"}"
+RUN_ALL="${RUN_ALL:-""}"
+KUBECTL_MODE="${KUBECTL_MODE:-"apply"}"
+NO_CLEANUP="${NO_CLEANUP:-""}"
+
+# Minikube control
+MINIKUBE_RESET="${MINIKUBE_RESET:-""}"
+MINIKUBE_PRELOAD_IMAGES="${MINIKUBE_PRELOAD_IMAGES:-""}"
+
+# =============================================================================
+# Image lists for preloading into minikube
+# =============================================================================
+
+PRELOAD_IMAGES_OPERATOR=(
+ "clickhouse/clickhouse-server:23.3"
+ "clickhouse/clickhouse-server:23.8"
+ "clickhouse/clickhouse-server:24.3"
+ "clickhouse/clickhouse-server:24.8"
+ "clickhouse/clickhouse-server:25.3"
+ "clickhouse/clickhouse-server:latest"
+ "altinity/clickhouse-server:24.8.14.10459.altinitystable"
+ "docker.io/zookeeper:3.8.4"
+)
+
+PRELOAD_IMAGES_KEEPER=(
+ "clickhouse/clickhouse-server:23.3"
+ "clickhouse/clickhouse-server:23.8"
+ "clickhouse/clickhouse-server:24.3"
+ "clickhouse/clickhouse-server:24.8"
+ "clickhouse/clickhouse-server:25.3"
+ "clickhouse/clickhouse-server:latest"
+ "altinity/clickhouse-server:24.8.14.10459.altinitystable"
+ "docker.io/zookeeper:3.8.4"
+)
+
+PRELOAD_IMAGES_METRICS=(
+ "clickhouse/clickhouse-server:23.3"
+ "clickhouse/clickhouse-server:25.3"
+ "clickhouse/clickhouse-server:latest"
+)
+
+# =============================================================================
+# Functions
+# =============================================================================
+
+# Install Python dependencies needed by TestFlows
+function common_install_pip_requirements() {
+ pip3 install -r "${COMMON_DIR}/../image/requirements.txt"
+}
+
+# Convert RUN_ALL env var to --test-to-end flag.
+# Usage: RUN_ALL_FLAG=$(common_convert_run_all)
+function common_convert_run_all() {
+ if [[ -n "${RUN_ALL}" ]]; then
+ echo "--test-to-end"
+ fi
+}
+
+# Export the standard set of env vars that regression.py / settings.py expects
+function common_export_test_env() {
+ export OPERATOR_NAMESPACE
+ export OPERATOR_INSTALL
+ export IMAGE_PULL_POLICY
+ export NO_CLEANUP
+}
+
+# Reset minikube cluster if MINIKUBE_RESET is set
+function common_minikube_reset() {
+ if [[ -n "${MINIKUBE_RESET}" ]]; then
+ SKIP_K9S="yes" "${COMMON_DIR}/run_minikube_reset.sh"
+ fi
+}
+
+# Pull images and load them into minikube.
+# Only runs if MINIKUBE_PRELOAD_IMAGES is set.
+# Usage: common_preload_images "${PRELOAD_IMAGES_OPERATOR[@]}"
+function common_preload_images() {
+ if [[ -n "${MINIKUBE_PRELOAD_IMAGES}" ]]; then
+ echo "pre-load images into minikube"
+ for image in "$@"; do
+ docker pull -q "${image}" && \
+ echo "pushing ${image} to minikube" && \
+ minikube image load "${image}" --overwrite=false --daemon=true
+ done
+ echo "images pre-loaded"
+ fi
+}
+
+# Build operator + metrics-exporter docker images and load them into minikube
+function common_build_and_load_images() {
+ echo "Build" && \
+ VERBOSITY="${VERBOSITY}" "${COMMON_DIR}/../../dev/image_build_all_dev.sh" && \
+ echo "Load images" && \
+ minikube image load "${OPERATOR_IMAGE}" && \
+ minikube image load "${METRICS_EXPORTER_IMAGE}" && \
+ echo "Images prepared"
+}
+
+# Run a test runner script with all env vars forwarded.
+# Usage: common_run_test_script "run_tests_operator.sh"
+function common_run_test_script() {
+ local script="${1}"
+ OPERATOR_DOCKER_REPO="${OPERATOR_DOCKER_REPO}" \
+ METRICS_EXPORTER_DOCKER_REPO="${METRICS_EXPORTER_DOCKER_REPO}" \
+ OPERATOR_VERSION="${OPERATOR_VERSION}" \
+ IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY}" \
+ OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \
+ OPERATOR_INSTALL="${OPERATOR_INSTALL}" \
+ ONLY="${ONLY}" \
+ KUBECTL_MODE="${KUBECTL_MODE}" \
+ RUN_ALL="${RUN_ALL}" \
+ NO_CLEANUP="${NO_CLEANUP}" \
+ "${COMMON_DIR}/${script}"
+}
diff --git a/tests/e2e/test_metrics_exporter.py b/tests/e2e/test_metrics_exporter.py
index 923abd08a..9b60d6ec7 100644
--- a/tests/e2e/test_metrics_exporter.py
+++ b/tests/e2e/test_metrics_exporter.py
@@ -108,13 +108,13 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma
"hosts": [
{
"name": "0-0",
- "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local",
+ "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local.",
"tcpPort": 9000,
"httpPort": 8123
},
{
"name": "1-0",
- "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local",
+ "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local.",
"tcpPort": 9000,
"httpPort": 8123
}
diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py
index 9f02c5cf7..11f6daab5 100644
--- a/tests/e2e/test_operator.py
+++ b/tests/e2e/test_operator.py
@@ -98,8 +98,21 @@ def test_010003(self):
"service": 5,
},
"pdb": {"cluster1": 0, "cluster2": 1},
+ "do_not_delete": 1
},
)
+
+ chi = "test-003-complex-layout"
+ cluster = "cluster1"
+ with Then('Cluster settings should be different on replicas'):
+ replica0 = clickhouse.query(chi, "select value from system.server_settings where name = 'default_replica_name'",
+ host=f"chi-{chi}-{cluster}-replica0-0-0")
+ replica1 = clickhouse.query(chi, "select value from system.server_settings where name = 'default_replica_name'",
+ host=f"chi-{chi}-{cluster}-replica0-1-0")
+ print(replica0)
+ print(replica1)
+ assert replica0 == "myreplica0" and replica1 == "myreplica1"
+
with Finally("I clean up"):
delete_test_namespace()
@@ -124,8 +137,7 @@ def test_010004(self):
},
)
with Finally("I clean up"):
- with By("deleting test namespace"):
- delete_test_namespace()
+ delete_test_namespace()
@TestScenario
@@ -455,6 +467,7 @@ def test_010008_3(self):
with Finally("I clean up"):
delete_test_namespace()
+
@TestCheck
def test_operator_upgrade(self, manifest, service, version_from, version_to=None, shell=None):
if version_to is None:
@@ -556,7 +569,7 @@ def test_operator_upgrade(self, manifest, service, version_from, version_to=None
@Name("test_010009_1. Test operator upgrade")
@Requirements(RQ_SRS_026_ClickHouseOperator_Managing_UpgradingOperator("1.0"))
@Tags("NO_PARALLEL")
-def test_010009_1(self, version_from="0.25.2", version_to=None):
+def test_010009_1(self, version_from="0.25.6", version_to=None):
if version_to is None:
version_to = self.context.operator_version
@@ -572,7 +585,7 @@ def test_010009_1(self, version_from="0.25.2", version_to=None):
@TestScenario
@Name("test_010009_2. Test operator upgrade")
@Tags("NO_PARALLEL")
-def test_010009_2(self, version_from="0.25.2", version_to=None):
+def test_010009_2(self, version_from="0.25.6", version_to=None):
if version_to is None:
version_to = self.context.operator_version
@@ -1711,12 +1724,12 @@ def check_schema_propagation(replicas):
"do_not_delete": 1,
},
)
- with Then(f"Tables are deleted in {self.context.keeper_type}"):
+ with Then(f"Tables are deleted in {self.context.keeper_type}", flags=XFAIL):
out = clickhouse.query_with_error(
chi_name,
f"SELECT count() FROM system.zookeeper WHERE path ='/clickhouse/{cluster}/tables/0/default'",
)
- print(f"Found {out} replicated tables in {self.context.keeper_type}")
+ note(f"Found {out} replicated tables in {self.context.keeper_type}")
assert "DB::Exception: No node" in out or out == "0"
with Finally("I clean up"):
@@ -2917,7 +2930,7 @@ def test_010025(self):
cnt_local = clickhouse.query_with_error(
chi,
"SELECT count() FROM test_local_025",
- "chi-test-025-rescaling-default-0-1.test.svc.cluster.local",
+ "chi-test-025-rescaling-default-0-1.test.svc.cluster.local.",
)
cnt_lb = clickhouse.query_with_error(chi, "SELECT count() FROM test_local_025")
cnt_distr_lb = clickhouse.query_with_error(chi, "SELECT count() FROM test_distr_025")
@@ -3607,11 +3620,9 @@ def test_010034(self):
)
with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [1]"):
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
check_metrics_monitoring(
operator_namespace=operator_namespace,
- operator_pod=operator_pod,
+ operator_pod=kubectl.get_operator_pod(),
expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$",
)
@@ -3622,13 +3633,10 @@ def test_010034(self):
util.restart_operator()
kubectl.wait_chi_status(chi, "Completed")
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
-
with Then("check for `chi_clickhouse_metric_fetch_errors` is not zero"):
check_metrics_monitoring(
operator_namespace=operator_namespace,
- operator_pod=operator_pod,
+ operator_pod=kubectl.get_operator_pod(),
expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 1$",
)
@@ -3637,13 +3645,11 @@ def test_010034(self):
with And("Re-create operator pod in order to restart metrics exporter to update the configuration [2]"):
util.restart_operator()
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [2]"):
check_metrics_monitoring(
operator_namespace=operator_namespace,
- operator_pod=operator_pod,
+ operator_pod=kubectl.get_operator_pod(),
expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$",
)
@@ -3699,13 +3705,11 @@ def test_010034(self):
with And("Re-create operator pod in order to restart metrics exporter to update the configuration [3]"):
util.restart_operator()
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [3]"):
check_metrics_monitoring(
operator_namespace=operator_namespace,
- operator_pod=operator_pod,
+ operator_pod=kubectl.get_operator_pod(),
expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$",
)
@@ -3714,14 +3718,12 @@ def test_010034(self):
with And("Re-create operator pod in order to restart metrics exporter to update the configuration [4]"):
util.restart_operator()
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
# 0.21.2+
with Then("check for `chi_clickhouse_metric_fetch_errors` is zero [4]"):
check_metrics_monitoring(
operator_namespace=operator_namespace,
- operator_pod=operator_pod,
+ operator_pod=kubectl.get_operator_pod(),
expect_pattern="^chi_clickhouse_metric_fetch_errors{(.*?)} 0$",
)
@@ -3735,6 +3737,7 @@ def test_010034(self):
@TestScenario
@Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ReprovisioningVolume("1.0"))
@Name("test_010036. Check operator volume re-provisioning")
+@Tags("NO_PARALLEL")
def test_010036(self):
"""Check clickhouse operator recreates volumes and schema if volume is broken."""
create_shell_namespace_clickhouse_template()
@@ -4417,6 +4420,91 @@ def test_010042(self):
with Finally("I clean up"):
delete_test_namespace()
+@TestScenario
+@Name("test_010042_2. Test aborting changes that may recreate STS")
+def test_010042_2(self):
+ create_shell_namespace_clickhouse_template()
+
+ cluster = "default"
+ manifest = f"manifests/chi/test-042-abort-1.yaml"
+ chi = yaml_manifest.get_name(util.get_full_path(manifest))
+
+ with Given("CHI is created"):
+ kubectl.create_and_check(
+ manifest = "manifests/chi/test-042-abort-1.yaml",
+ check={
+ "pod_count": 1,
+ "do_not_delete": 1,
+ },
+ )
+
+ version_1 = "24.8"
+ version_2 = "25.3"
+ version_3 = "25.8"
+
+ with Then("CHI version is " + version_1):
+ ver = clickhouse.query(chi, "select version()")
+ assert version_1 in ver
+
+ with When("OnUpdateFailure is aborted"):
+ onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure")
+ if onUpdateFailure != 'abort':
+ cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"abort"}}]\''
+ kubectl.launch(cmd)
+ kubectl.wait_chi_status(chi, "InProgress")
+ kubectl.wait_chi_status(chi, "Completed")
+
+ with Then("Upgrade podTemplate.image to a different version should be allowed"):
+ kubectl.create_and_check(
+ manifest = "manifests/chi/test-042-abort-2.yaml",
+ check={
+ "pod_count": 1,
+ "do_not_delete": 1
+ },
+ )
+
+ with And("CHI version is nchanged to " + version_2):
+ ver = clickhouse.query(chi, "select version()")
+ assert version_2 in ver
+
+ with When("OnUpdateFailure is aborted"):
+ onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure")
+ if onUpdateFailure != 'abort':
+ cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"abort"}}]\''
+ kubectl.launch(cmd)
+ kubectl.wait_chi_status(chi, "InProgress")
+ kubectl.wait_chi_status(chi, "Completed")
+
+ with Then("Upgrade podTemplate.volumeClaimTemplate should fail"):
+ kubectl.create_and_check(
+ manifest = "manifests/chi/test-042-abort-3.yaml",
+ check={
+ "pod_count": 1,
+ "do_not_delete": 1,
+ "chi_status": "Aborted"
+ },
+ )
+
+ with And("CHI version is unchanged " + version_2):
+ ver = clickhouse.query(chi, "select version()")
+ assert version_2 in ver
+
+ with When("OnUpdateFailure is changed to recreate"):
+ onUpdateFailure = kubectl.get_field("chi", chi, ".spec.reconcile.statefulSet.recreate.onUpdateFailure")
+ if onUpdateFailure != 'recreate':
+ cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/reconcile/statefulSet/recreate/onUpdateFailure","value":"recreate"}}]\''
+ kubectl.launch(cmd)
+ kubectl.wait_chi_status(chi, "InProgress")
+ kubectl.wait_chi_status(chi, "Completed")
+
+ with Then("CHI reconcile should proceed, and CHI version is unchanged " + version_3):
+ ver = clickhouse.query(chi, "select version()")
+ assert version_3 in ver
+
+
+ with Finally("I clean up"):
+ delete_test_namespace()
+
@TestCheck
@Name("test_043. Logs container customizing")
@@ -4621,8 +4709,7 @@ def test_010046(self):
manifest = f"manifests/chi/test-046-0-clickhouse-operator-metrics.yaml"
chi = yaml_manifest.get_name(util.get_full_path(manifest))
operator_namespace = current().context.operator_namespace
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
+ operator_pod = kubectl.get_operator_pod()
with Given("CHI with 1 replica is installed"):
kubectl.create_and_check(
@@ -4842,11 +4929,10 @@ def test_labels(chi, type, key, value):
with Then("Check that exposed metrics do not have labels and annotations that are excluded"):
operator_namespace = current().context.operator_namespace
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
+ operator_pod = kubectl.get_operator_pod()
- # chi_clickhouse_metric_VersionInteger{chi="test-050",exclude_this_annotation="test-050-annotation",hostname="chi-test-050-default-0-0.test-050-e1884706-9a94-11ef-a786-367ddacfe5fd.svc.cluster.local",include_this_annotation="test-050-annotation",include_this_label="test-050-label",namespace="test-050-e1884706-9a94-11ef-a786-367ddacfe5fd"}
- expect_labels = f"chi=\"test-050\",hostname=\"chi-test-050-default-0-0.{operator_namespace}.svc.cluster.local\",include_this_annotation=\"test-050-annotation\",include_this_label=\"test-050-label\""
+ # chi_clickhouse_metric_VersionInteger{chi="test-050",exclude_this_annotation="test-050-annotation",hostname="chi-test-050-default-0-0.test-050-e1884706-9a94-11ef-a786-367ddacfe5fd.svc.cluster.local.",include_this_annotation="test-050-annotation",include_this_label="test-050-label",namespace="test-050-e1884706-9a94-11ef-a786-367ddacfe5fd"}
+ expect_labels = f"chi=\"test-050\",hostname=\"chi-test-050-default-0-0.{operator_namespace}.svc.cluster.local.\",include_this_annotation=\"test-050-annotation\",include_this_label=\"test-050-label\""
check_metrics_monitoring(
operator_namespace=operator_namespace,
operator_pod=operator_pod,
@@ -4965,34 +5051,64 @@ def test_010054(self):
},
)
- with Then("Add suspend attribute to CHI"):
+ with When("Add suspend attribute to CHI"):
cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\''
kubectl.launch(cmd)
- with Then(f"Update podTemplate to {new_version} and confirm that pod image is NOT updated"):
+ with Then(f"Update podTemplate to {new_version} and confirm that pod image is NOT updated"):
+ kubectl.create_and_check(
+ manifest="manifests/chi/test-006-ch-upgrade-2.yaml",
+ check={
+ "pod_count": 1,
+ "pod_image": old_version,
+ "chi_status": "Aborted",
+ "do_not_delete": 1,
+ },
+ )
+
+ with When("Remove suspend attribute from CHI"):
+ cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\''
+ kubectl.launch(cmd)
+
+ kubectl.wait_chi_status(chi, "InProgress")
+ kubectl.wait_chi_status(chi, "Completed")
+
+ with Then(f"Confirm that pod image is updated to {new_version}"):
+ kubectl.check_pod_image(chi, new_version)
+
+ with When(f"Update podTemplate to {old_version} back but do not wait for completion"):
kubectl.create_and_check(
- manifest="manifests/chi/test-006-ch-upgrade-2.yaml",
+ manifest="manifests/chi/test-006-ch-upgrade-1.yaml",
check={
- "pod_count": 1,
- "pod_image": old_version,
+ "chi_status": "InProgress",
"do_not_delete": 1,
},
)
- with Then("Remove suspend attribute from CHI"):
+ with And("Add suspend attribute to CHI"):
+ cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\''
+ kubectl.launch(cmd)
+
+ with Then(f"Reconcile should be interrupted and pod image should remain at {new_version}"):
+ kubectl.wait_chi_status(chi, "Aborted", retries=5)
+ kubectl.check_pod_image(chi, new_version)
+
+ with When("Remove suspend attribute from CHI"):
cmd = f'patch chi {chi} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\''
kubectl.launch(cmd)
- kubectl.wait_chi_status(chi, "InProgress")
- kubectl.wait_chi_status(chi, "Completed")
+ with Then("Reconcile should be resumed"):
+ kubectl.wait_chi_status(chi, "InProgress")
+ kubectl.wait_chi_status(chi, "Completed")
- with Then(f"Confirm that pod image is updated to {new_version}"):
- kubectl.check_pod_image(chi, new_version)
+ with And(f"Pod image should be reverted back to {old_version}"):
+ kubectl.check_pod_image(chi, old_version)
with Finally("I clean up"):
delete_test_namespace()
+
@TestScenario
@Name("test_010055. Test that restart rules can be merged from CHOP configuration")
def test_010055(self):
@@ -5128,16 +5244,35 @@ def test_010056(self):
assert out != "0"
with And("Replica still should be unready after reconcile timeout"):
- pod = kubectl.get("pod", f"chi-{chi}-{cluster}-0-1-0")
- ready = pod["metadata"]["labels"]["clickhouse.altinity.com/ready"]
+ ready = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-1-0", ".metadata.labels.clickhouse\.altinity\.com\/ready")
print(f"ready label={ready}")
assert ready != "yes", error("Replica should be unready")
+ with And("Replica should be included in the monitoring"): # as of 0.26.0
+ operator_namespace=current().context.operator_namespace
+ check_metrics_monitoring(
+ operator_namespace = current().context.operator_namespace,
+ operator_pod = kubectl.get_operator_pod(),
+ expect_metric = "chi_clickhouse_metric_VersionInteger",
+ expect_labels = f"chi-{chi}-{cluster}-0-1"
+ )
+ with And("Replica should report a replication queue"): # as of 0.26.0
+ operator_namespace=current().context.operator_namespace
+ check_metrics_monitoring(
+ operator_namespace = current().context.operator_namespace,
+ operator_pod = kubectl.get_operator_pod(),
+ expect_metric = "chi_clickhouse_metric_ReplicasSumQueueSize",
+ expect_labels = f"chi-{chi}-{cluster}-0-1"
+ )
+
with When("START REPLICATED SENDS"):
clickhouse.query(chi, "SYSTEM START REPLICATED SENDS", host=f"chi-{chi}-{cluster}-0-0-0")
- time.sleep(10)
- with Then("Replication delay should be zero"):
+ with Then("Replica should become ready"):
+ kubectl.wait_field("pod", f"chi-{chi}-{cluster}-0-1-0",
+ ".metadata.labels.clickhouse\.altinity\.com\/ready", value="yes")
+
+ with And("Replication delay should be zero"):
out = clickhouse.query(chi, "select max(absolute_delay) from system.replicas", host=f"chi-{chi}-{cluster}-0-1-0")
print(f"max(absolute_delay)={out}")
assert out == "0"
@@ -5193,8 +5328,7 @@ def test_010058(self): # Can be merged with test_034 potentially
with Given("Add rootCA to operator configuration"):
util.apply_operator_config("manifests/chopconf/test-058-chopconf.yaml")
- out = kubectl.launch("get pods -l app=clickhouse-operator", ns=current().context.operator_namespace).splitlines()[1]
- operator_pod = re.split(r"[\t\r\n\s]+", out)[0]
+ operator_pod = kubectl.get_operator_pod()
with Given("test-058-root-ca secret is installed"):
kubectl.apply(
@@ -5436,8 +5570,10 @@ def test_020000(self):
chk = yaml_manifest.get_name(util.get_full_path(chk_manifest))
with Given("Install CHK"):
+ kubectl.apply(util.get_full_path("manifests/chk/test-020000-chk-sa.yaml"))
kubectl.create_and_check(
- manifest=chk_manifest, kind="chk",
+ manifest=chk_manifest,
+ kind="chk",
check={
"pod_count": 1,
"pdb": {"keeper": 0},
@@ -5450,16 +5586,65 @@ def test_020000(self):
for o in chk_objects:
print(o)
+ with Then("Service account should be set"):
+ chk_pod_spec = kubectl.get_chk_pod_spec(chk)
+ assert chk_pod_spec["serviceAccountName"] == "test-020000-chk-sa"
+
with And("There should be a service for cluster a cluster"):
- kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless = True)
+ kubectl.check_service(f"keeper-{chk}-service", "ClusterIP", headless=True)
with And("There should be a service for first replica"):
- kubectl.check_service(f"keeper-{chk}-0", "ClusterIP", headless = True)
+ kubectl.check_service(f"keeper-{chk}-0", "ClusterIP", headless=True)
with And("There should be a PVC"):
- assert kubectl.get_count("pvc", label = f"-l clickhouse-keeper.altinity.com/chk={chk}") == 1
+ assert kubectl.get_count("pvc", label=f"-l clickhouse-keeper.altinity.com/chk={chk}") == 1
+
+ with When("Stop CHK"):
+ cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/stop","value":"yes"}}]\''
+ kubectl.launch(cmd)
+ kubectl.wait_chk_status(chk, "InProgress")
+ kubectl.wait_chk_status(chk, "Completed")
+ with Then("STS should be there but no running pods"):
+ label = f"-l clickhouse-keeper.altinity.com/chk={chk}"
+ assert kubectl.get_count('sts', label = label) == 1
+ assert kubectl.get_count('pod', label = label) == 0
+
+ with When("Resume CHK"):
+ cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/stop","value":"no"}}]\''
+ kubectl.launch(cmd)
+ kubectl.wait_chk_status(chk, "InProgress")
+ kubectl.wait_chk_status(chk, "Completed")
+ with Then("Both STS and Pod should be up"):
+ label = f"-l clickhouse-keeper.altinity.com/chk={chk}"
+ assert kubectl.get_count('sts', label = label) == 1
+ assert kubectl.get_count('pod', label = label) == 1
+
+ with When("Suspend CHK"):
+ cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"add","path":"/spec/suspend","value":"yes"}}]\''
+ kubectl.launch(cmd)
+
+ with Then("Stop CHK one more time"):
+ cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"replace","path":"/spec/stop","value":"yes"}}]\''
+ kubectl.launch(cmd)
+ time.sleep(15) # wait in case there was some sync issue
+ kubectl.wait_chk_status(chk, "Completed")
+ with Then("Stop should be ignored. Both STS and Pod should be up"):
+ label = f"-l clickhouse-keeper.altinity.com/chk={chk}"
+ assert kubectl.get_count('sts', label = label) == 1
+ assert kubectl.get_count('pod', label = label) == 1
+
+ with When("Unsuspend CHK"):
+ cmd = f'patch chk {chk} --type=\'json\' --patch=\'[{{"op":"remove","path":"/spec/suspend"}}]\''
+ kubectl.launch(cmd)
+
+ with Then("Reconcile should trigger"):
+ kubectl.wait_chk_status(chk, "InProgress")
+ kubectl.wait_chk_status(chk, "Completed")
- kubectl.delete_chk(chk)
+ with Then("And CHK should be stopped"):
+ label = f"-l clickhouse-keeper.altinity.com/chk={chk}"
+ assert kubectl.get_count('sts', label = label) == 1
+ assert kubectl.get_count('pod', label = label) == 0
with Finally("I clean up"):
delete_test_namespace()
@@ -5488,18 +5673,19 @@ def test_020001(self):
objects[ch_kind] = kubectl.get_obj_names_grepped("pod,service,sts,pvc,cm,pdb,secret", grep=ch_name)
print(*objects[ch_kind], sep='\n')
- if ch_kind == 'chi':
- kubectl.delete_chi(ch_name)
- else:
- kubectl.delete_chk(ch_name)
+ with When(f"Delete {ch_kind}"):
+ if ch_kind == 'chi':
+ kubectl.delete_chi(ch_name)
+ else:
+ kubectl.delete_chk(ch_name)
- with Then("There should not objects with overallped names"):
+ with Then("There should not be objects with overlapped names"):
overlap = list(set(objects['chi']) & set(objects['chk']))
- if len(overlap)>0:
+ if len(overlap) > 0:
print("Overlapped objects:")
print(*overlap, sep='\n')
- assert len(overlap) == 0
+ assert len(overlap) == 0, f"{len(overlap)} overlapping resource(s):\n" + "\n".join(f" {o}" for o in overlap)
with Finally("I clean up"):
delete_test_namespace()
@@ -5534,49 +5720,63 @@ def test_020002(self):
@TestScenario
@Name("test_020003. Clickhouse-keeper upgrade")
+@Tags("NO_PARALLEL")
def test_020003(self):
"""Check that clickhouse-operator support upgrading clickhouse-keeper version
when clickhouse-keeper defined with ClickHouseKeeperInstallation."""
create_shell_namespace_clickhouse_template()
- util.require_keeper(keeper_type="chk",
- keeper_manifest="clickhouse-keeper-3-node-for-test-only.yaml")
- manifest = f"manifests/chi/test-049-clickhouse-keeper-upgrade.yaml"
- chi = yaml_manifest.get_name(util.get_full_path(manifest))
+
+ chk_manifest = f"manifests/chk/test-020003-chk.yaml"
+ chk_manifest_upgraded = f"manifests/chk/test-020003-chk-2.yaml"
+ chi_manifest = f"manifests/chk/test-020003-chi-chk-upgrade.yaml"
+ chi = yaml_manifest.get_name(util.get_full_path(chi_manifest))
+ chk = yaml_manifest.get_name(util.get_full_path(chk_manifest))
+
cluster = "default"
keeper_version_from = "25.3"
keeper_version_to = "25.8"
- with Given("CHI with 2 replicas"):
+
+ with Given("CHK with 3 replicas"):
kubectl.create_and_check(
- manifest=manifest,
+ manifest=chk_manifest,
+ kind = "chk",
check={
- "pod_count": 2,
+ "pod_count": 3,
"do_not_delete": 1,
},
)
- with And("Make sure Keeper is ready"):
- kubectl.wait_chk_status('clickhouse-keeper', 'Completed')
+
+ with And("CHI with 2 replicas"):
+ kubectl.create_and_check(
+ manifest=chi_manifest,
+ check={
+ "pod_count": 2,
+ "do_not_delete": 1,
+ },
+ )
check_replication(chi, {0, 1}, 1)
with When(f"I check clickhouse-keeper version is {keeper_version_from}"):
assert keeper_version_from in \
- kubectl.get_field('pod', 'chk-clickhouse-keeper-test-0-0-0', '.spec.containers[0].image'), error()
+ kubectl.get_field('pod', 'chk-test-020003-chk-keeper-0-0-0', '.spec.containers[0].image'), error()
with Then(f"I change keeper version to {keeper_version_to}"):
- cmd = f"""patch chk clickhouse-keeper --type='json' --patch='[{{"op":"replace","path":"/spec/templates/podTemplates/0/spec/containers/0/image","value":"clickhouse/clickhouse-keeper:{keeper_version_to}"}}]'"""
- kubectl.launch(cmd)
-
- with Then("I wait CHK status 1"):
- kubectl.wait_chk_status('clickhouse-keeper', 'InProgress')
- with Then("I wait CHK status 2"):
- kubectl.wait_chk_status('clickhouse-keeper', 'Completed')
+ kubectl.create_and_check(
+ manifest=chk_manifest_upgraded,
+ kind = "chk",
+ check={
+ "pod_count": 3,
+ "do_not_delete": 1,
+ },
+ )
with When(f"I check clickhouse-keeper version is changed to {keeper_version_to}"):
- kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-0-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5)
- kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-1-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5)
- kubectl.wait_field('pod', 'chk-clickhouse-keeper-test-0-2-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=5)
+ kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-0-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1)
+ kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-1-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1)
+ kubectl.wait_field('pod', 'chk-test-020003-chk-keeper-0-2-0', '.spec.containers[0].image', f'clickhouse/clickhouse-keeper:{keeper_version_to}', retries=1)
with Then("Wait for ClickHouse to connect to Keeper properly"):
for attempt in retries(timeout=180, delay=5):
@@ -5781,6 +5981,7 @@ def test_020004_1(self):
@TestScenario
@Name("test_020005. Clickhouse-keeper scale-up/scale-down")
+@Tags("NO_PARALLEL")
def test_020005(self):
"""Check that clickhouse-operator support scale-up/scale-down without service interruption"""
@@ -5866,6 +6067,7 @@ def test_020005(self):
with Finally("I clean up"):
delete_test_namespace()
+
@TestScenario
@Name("test_020006. Test https://github.com/Altinity/clickhouse-operator/issues/1863")
def test_020006(self):
@@ -5883,11 +6085,10 @@ def test_020006(self):
}
)
- kubectl.delete_chk(chk)
-
with Finally("I clean up"):
delete_test_namespace()
+
@TestScenario
@Name("test_020007. Test fractional CPU requests/limits handling for CHK")
def test_020007(self):
@@ -5918,7 +6119,47 @@ def test_020007(self):
kubectl.force_chk_reconcile(chk, "reconcile2")
- kubectl.delete_chk(chk)
+ with Finally("I clean up"):
+ delete_test_namespace()
+
+@TestScenario
+@Name("test_020008. Test FIPS versions are properly supported by both in CHI and CHK")
+def test_020008(self):
+ create_shell_namespace_clickhouse_template()
+
+ chk_manifest = f"manifests/chk/test-020008-chk-fips.yaml"
+ chi_manifest = f"manifests/chk/test-020008-chi-fips.yaml"
+ chi = yaml_manifest.get_name(util.get_full_path(chi_manifest))
+ chk = yaml_manifest.get_name(util.get_full_path(chk_manifest))
+
+ cluster = "default"
+
+ with Given("CHK with FIPS versions"):
+ kubectl.create_and_check(
+ manifest=chk_manifest,
+ kind = "chk",
+ check={
+ "pod_count": 1,
+ "do_not_delete": 1,
+ },
+ )
+
+
+ with And("CHI with FIPS version"):
+ kubectl.create_and_check(
+ manifest=chi_manifest,
+ check={
+ "pod_count": 2,
+ "do_not_delete": 1,
+ },
+ )
+
+ with Then("Clickhouse version is a FIPS one"):
+ ver = clickhouse.query(chi, 'select version()')
+ print(ver)
+ assert "fips" in ver
+
+ check_replication(chi, {0, 1}, 1)
with Finally("I clean up"):
delete_test_namespace()
diff --git a/tests/e2e/util.py b/tests/e2e/util.py
index 8b4142cdb..76685cb9e 100644
--- a/tests/e2e/util.py
+++ b/tests/e2e/util.py
@@ -146,7 +146,8 @@ def wait_clickhouse_cluster_ready(chi):
pod=pod,
)
for host in chi["status"]["fqdns"]:
- svc_short_name = host.replace(f".{current().context.test_namespace}.svc.cluster.local", "")
+ svc_short_name = host.replace(f".{current().context.test_namespace}.svc.cluster.local.", "")
+ svc_short_name = svc_short_name.replace(f".{current().context.test_namespace}.svc.cluster.local", "")
if svc_short_name not in cluster_response:
with Then("Not ready, sleep 5 seconds"):
all_pods_ready = False
diff --git a/tests/regression.py b/tests/regression.py
index 6f4542b6d..496747970 100755
--- a/tests/regression.py
+++ b/tests/regression.py
@@ -7,8 +7,7 @@
xfails = {
# test_operator.py
- "/regression/e2e.test_operator/test_010021*": [(Fail, "Storage test are flaky on github")],
- "/regression/e2e.test_operator/test_020003*": [(Fail, "Keeper upgrade is flaky")],
+ "/regression/e2e.test_operator/test_010021*": [(Fail, "Storage test is flaky on github")],
"/regression/e2e.test_operator/test_020005*": [(Fail, "Keeper scale-up/scale-down is flaky")],
# test_clickhouse.py
"/regression/e2e.test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")],