diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index a1cd34ffb..9377dabdb 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -108,12 +108,22 @@ jobs: test_mode="--test-to-end" fi - ~/venv/qa/bin/python3 ./tests/regression.py --only=/regression/e2e.test_operator/${ONLY} $test_mode --trim-results on -o short --no-colors --native --log ./tests/raw.log + for test_file in ./tests/e2e/test_operator*.py; do + name=$(basename "$test_file" .py | sed 's/^test_//') + run_cmd="~/venv/qa/bin/python3 ./tests/regression.py --only=/regression/e2e?test_${name}/${ONLY} $test_mode --no-colors --trim-results on --debug --native --log ./tests/raw_${name}.log && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform compact ./tests/raw_${name}.log ./tests/compact_${name}.log.txt && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform nice ./tests/raw_${name}.log ./tests/nice_${name}.log.txt && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform short ./tests/raw_${name}.log ./tests/short_${name}.log.txt && " + run_cmd+="bash -xec '~/venv/qa/bin/tfs --no-colors report results -a '${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/' ./tests/raw_${name}.log - --confidential --copyright 'Altinity Inc.' --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report_${name}.html'" + + run_tests+=( + "${run_cmd}" + ) + done + printf "%s\n" "${run_tests[@]}" | xargs -P 2 -I {} bash -xec '{}' + + ls -la ./tests/*.html test_result=$? - ~/venv/qa/bin/tfs --no-colors transform compact ./tests/raw.log ./tests/compact.log - ~/venv/qa/bin/tfs --no-colors transform nice ./tests/raw.log ./tests/nice.log.txt - ~/venv/qa/bin/tfs --no-colors transform short ./tests/raw.log ./tests/short.log.txt - ~/venv/qa/bin/tfs --no-colors report results -a "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/" ./tests/raw.log - --confidential --copyright "Altinity Inc." --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report.html echo "test_result=$test_result" >> $GITHUB_OUTPUT exit "$test_result" diff --git a/.gitignore b/.gitignore index 4942d09b9..68e76c489 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,8 @@ venv # Tests cached files tests/image/cache +tests/*.log.txt +tests/*.html # Skip tmp folder /tmp/ diff --git a/Vagrantfile b/Vagrantfile index a5e28b618..3570b934b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -121,15 +121,13 @@ Vagrant.configure(2) do |config| # docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) test" - apt-get install --no-install-recommends -y docker-ce pigz + apt-get install --no-install-recommends -y docker-ce docker-ce-cli containerd.io docker-compose-plugin pigz - # docker compose apt-get install -y --no-install-recommends python3-distutils curl -sL https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py python3 /tmp/get-pip.py - pip3 install -U setuptools - pip3 install -U docker-compose + pip3 install -U -r ./tests/image/requirements.txt # k9s CLI K9S_VERSION=$(curl -sL https://github.com/derailed/k9s/releases/latest -H "Accept: application/json" | jq -r .tag_name) @@ -170,7 +168,8 @@ Vagrant.configure(2) do |config| # K8S_VERSION=${K8S_VERSION:-1.23.1} # K8S_VERSION=${K8S_VERSION:-1.24.8} # K8S_VERSION=${K8S_VERSION:-1.25.4} - K8S_VERSION=${K8S_VERSION:-1.31.1} +# K8S_VERSION=${K8S_VERSION:-1.31.1} + K8S_VERSION=${K8S_VERSION:-1.31.2} export VALIDATE_YAML=true killall kubectl || true diff --git a/config/chi/templates.d/001-templates.json.example b/config/chi/templates.d/001-templates.json.example index 34468ecd2..bcab13d1a 100644 --- a/config/chi/templates.d/001-templates.json.example +++ b/config/chi/templates.d/001-templates.json.example @@ -29,7 +29,7 @@ "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/builder/templates-config/chi/templates.d/001-templates.json.example b/deploy/builder/templates-config/chi/templates.d/001-templates.json.example index 34468ecd2..bcab13d1a 100644 --- a/deploy/builder/templates-config/chi/templates.d/001-templates.json.example +++ b/deploy/builder/templates-config/chi/templates.d/001-templates.json.example @@ -29,7 +29,7 @@ "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml index ac148e098..81627f02d 100644 --- a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml +++ b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml @@ -382,13 +382,13 @@ spec: containers: - name: clickhouse-keeper imagePullPolicy: Always - image: "clickhouse/clickhouse-keeper:latest-alpine" + image: "clickhouse/clickhouse-keeper:24.8" resources: requests: memory: "256M" cpu: "1" limits: - memory: "4Gi" + memory: "1Gi" cpu: "2" volumeMounts: - name: clickhouse-keeper-settings diff --git a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml index 84fe373d1..db16591c8 100644 --- a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml +++ b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml @@ -382,13 +382,13 @@ spec: containers: - name: clickhouse-keeper imagePullPolicy: Always - image: "clickhouse/clickhouse-keeper:latest-alpine" + image: "clickhouse/clickhouse-keeper:24.8" resources: requests: memory: "256M" cpu: "1" limits: - memory: "4Gi" + memory: "1Gi" cpu: "2" volumeMounts: - name: clickhouse-keeper-settings diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 4c3ac44ec..ddd09efdf 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -699,7 +699,7 @@ configs: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml index ba5b2e919..e1939a0ae 100644 --- a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml +++ b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml @@ -82,7 +82,7 @@ spec: containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent - image: "docker.io/zookeeper:3.8.4" + image: "docker.io/zookeeper:latest" ports: - containerPort: 2181 name: client diff --git a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml index dad2d73b6..fbbc4b733 100644 --- a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml +++ b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml @@ -82,7 +82,7 @@ spec: containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent - image: "docker.io/zookeeper:3.8.4" + image: "docker.io/zookeeper:latest" ports: - containerPort: 2181 name: client diff --git a/docs/chi-examples/99-clickhouseinstallation-max.yaml b/docs/chi-examples/99-clickhouseinstallation-max.yaml index 7eea7110c..e8b90beac 100644 --- a/docs/chi-examples/99-clickhouseinstallation-max.yaml +++ b/docs/chi-examples/99-clickhouseinstallation-max.yaml @@ -674,7 +674,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: default-volume-claim mountPath: /var/lib/clickhouse @@ -686,7 +686,7 @@ spec: memory: "64Mi" cpu: "100m" - name: clickhouse-log - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/sh" - "-c" @@ -704,7 +704,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: default-volume-claim mountPath: /var/lib/clickhouse @@ -716,7 +716,7 @@ spec: memory: "64Mi" cpu: "100m" - name: clickhouse-log - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/sh" - "-c" diff --git a/docs/security_hardening.md b/docs/security_hardening.md index 770b8400d..7bf5d4fe9 100644 --- a/docs/security_hardening.md +++ b/docs/security_hardening.md @@ -10,7 +10,17 @@ With the default settings, the ClickHouse operator deploys ClickHouse with two u The '**default**' user is used to connect to ClickHouse instance from a pod where it is running, and also for distributed queries. It is deployed with an **empty password** that was a long-time default for ClickHouse out-of-the-box installation. -To secure it, the operator applies network security rules that restrict connections to the pods running the ClickHouse cluster, and nothing else. +For security purposes, we recommend that you disable the `default` user altogether. As an example, create a file named `remove_default_user.xml` and place it in the `users.d` directory. This markup does the trick: + +```xml + + + + + +``` + +However, if you do use the `default` user, the operator applies network security rules that restrict connections to the pods running the ClickHouse cluster, and nothing else. Before version **0.19.0** `hostRegexp` was applied that captured pod names. This did not work correctly in some Kubernetes distributions, such as GKE. In later versions, the operator additionally applies a restrictive set of pod IP addresses and rebuilds this set if the IP address of a pod changes for whatever reason. diff --git a/go.mod b/go.mod index a519eb44a..32f26b6d7 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/altinity/clickhouse-operator -go 1.25.1 +go 1.25 replace ( github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.0 @@ -40,7 +40,7 @@ require ( go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/sdk/metric v1.24.0 golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e - golang.org/x/sync v0.12.0 + golang.org/x/sync v0.17.0 gopkg.in/d4l3k/messagediff.v1 v1.2.1 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/controller-runtime v0.15.1 @@ -88,14 +88,14 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.38.0 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.44.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/tools v0.37.0 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index cfaad41cb..0ead42583 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -583,8 +583,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -603,8 +603,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -653,20 +653,20 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -726,8 +726,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tests/README.md b/tests/README.md index 62b071d22..a5dd1fb46 100644 --- a/tests/README.md +++ b/tests/README.md @@ -10,7 +10,7 @@ To execute tests, you will need: * TestFlows Python library (`pip3 install -r ./tests/image/requirements.txt`) * To run tests in docker container (approximately 2 times slower, but does not require any additional configuration): - `docker` - - `docker-compose` + - `docker compose` - `python3` * To run tests natively on your machine: - `kubectl` @@ -37,7 +37,7 @@ To execute the test suite (that currently involves only operator tests, not test ```bash pip3 install -U -r ./tests/image/requirements.txt docker pull registry.gitlab.com/altinity-public/container-images/clickhouse-operator-test-runner:latest -COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e.test_operator/*" +COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e?test_operator/*" ``` To execute tests natively (not in docker), you need to add `--native` parameter. @@ -47,7 +47,7 @@ Tests running in parallel by default, to run it consistently, add `--parallel of If you need only one certain test, you may execute ```bash -COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e.test_operator/test_009*" +COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e?test_operator/test_009*" ``` where `009` may be substituted by the number of the test you need. Tests --- numbers and names correspondence may be found in `tests/regression.py` and `tests/test_*.py` source code files. diff --git a/tests/docker-compose/docker-compose.yml b/tests/docker-compose/docker-compose.yml index dbd3ba276..578565660 100644 --- a/tests/docker-compose/docker-compose.yml +++ b/tests/docker-compose/docker-compose.yml @@ -21,7 +21,7 @@ services: - NET_ADMIN # dummy service which does nothing, but allows to postpone - # 'docker-compose up -d' till all dependecies will go healthy + # 'docker compose up -d' till all dependecies will go healthy all_services_ready: image: hello-world privileged: true diff --git a/tests/e2e/clickhouse.py b/tests/e2e/clickhouse.py index 27bd949ad..40487d6a8 100644 --- a/tests/e2e/clickhouse.py +++ b/tests/e2e/clickhouse.py @@ -27,12 +27,12 @@ def query( pwd_str = "" if pwd == "" else f"--password={pwd}" user_str = "" if user == "" else f"--user={user}" - for i in [1,2,3]: # re-tries for "Unknown stream id" error + for _ in [1,2,3]: # re-tries for "Unknown stream id" error if with_error: res = kubectl.launch( f"exec {pod_name} -n {current().context.test_namespace} -c {container}" f" --" - f" clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" + f" clickhouse-client --receive_timeout={timeout} -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" f' --query="{sql}"' f" 2>&1", timeout=timeout, @@ -43,7 +43,7 @@ def query( res = kubectl.launch( f"exec {pod_name} -n {current().context.test_namespace} -c {container}" f" -- " - f"clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" + f"clickhouse-client --receive_timeout={timeout} -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" f'--query="{sql}"', timeout=timeout, ns=current().context.test_namespace, diff --git a/tests/e2e/manifests/chi/test-011-secured-default-2.yaml b/tests/e2e/manifests/chi/test-011-secured-default-2.yaml index 47682f6c0..a150e3693 100644 --- a/tests/e2e/manifests/chi/test-011-secured-default-2.yaml +++ b/tests/e2e/manifests/chi/test-011-secured-default-2.yaml @@ -6,6 +6,8 @@ spec: useTemplates: - name: clickhouse-version configuration: + profiles: + readonly/receive_timeout: 60 users: default/profile: readonly clusters: diff --git a/tests/e2e/manifests/chi/test-022-broken-image.yaml b/tests/e2e/manifests/chi/test-022-broken-image.yaml index 9c59f7c48..9a4d03b97 100644 --- a/tests/e2e/manifests/chi/test-022-broken-image.yaml +++ b/tests/e2e/manifests/chi/test-022-broken-image.yaml @@ -5,14 +5,14 @@ metadata: spec: defaults: templates: - podTemplate: v20.3 + podTemplate: broken-image templates: podTemplates: - - name: v20.3 + - name: broken-image spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3-broken + image: clickhouse/clickhouse-server:24.8-broken configuration: clusters: - name: default diff --git a/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml b/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml index 05adbe3db..848b1b6f9 100644 --- a/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml +++ b/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3-broken + image: clickhouse/clickhouse-server:24.8-broken defaults: templates: podTemplate: clickhouse-new \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml b/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml index 02f64aa1b..0aa3e130a 100644 --- a/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml +++ b/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml @@ -18,6 +18,14 @@ spec: prometheus/metrics: true prometheus/events: true prometheus/asynchronous_metrics: true + # tune for low memory + mark_cache_size: 67108864 + merge_tree/parts_to_throw_insert: 300 + merge_tree/parts_to_delay_insert: 150 +# merge_tree/merge_max_block_size: 1024 +# merge_tree/max_bytes_to_merge_at_max_space_in_pool: 1073741824 +# merge_tree/number_of_free_entries_in_pool_to_lower_max_size_of_merge: 0 +# background_schedule_pool_size: 128 zookeeper: nodes: diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml index 15875565f..645548681 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml @@ -17,5 +17,5 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:latest imagePullPolicy: Always diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml index dff9aa0d8..35cb84a80 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest - name: clickhouse-backup image: nginx:latest diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml index 1e89ac90a..7e9aec01e 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml @@ -25,14 +25,15 @@ spec: fsGroup: 101 containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest + imagePullPolicy: Always command: - clickhouse-server - --config-file=/etc/clickhouse-server/config.xml - name: clickhouse-backup - image: altinity/clickhouse-backup:2.4.15 - imagePullPolicy: IfNotPresent + image: altinity/clickhouse-backup:latest + imagePullPolicy: Always command: - bash - -xc @@ -49,7 +50,7 @@ spec: - name: BACKUPS_TO_KEEP_REMOTE value: "3" - name: S3_ENDPOINT - value: https://minio.minio + value: http://minio.minio - name: S3_BUCKET value: clickhouse-backup - name: S3_PATH diff --git a/tests/e2e/manifests/chit/tpl-test-031.yaml b/tests/e2e/manifests/chit/tpl-test-031.yaml index d248ba8aa..1f3f4c6da 100644 --- a/tests/e2e/manifests/chit/tpl-test-031.yaml +++ b/tests/e2e/manifests/chit/tpl-test-031.yaml @@ -16,4 +16,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/tests/e2e/run_tests_keeper.sh b/tests/e2e/run_tests_keeper.sh index 68318a3ff..1a4ec361e 100755 --- a/tests/e2e/run_tests_keeper.sh +++ b/tests/e2e/run_tests_keeper.sh @@ -7,4 +7,4 @@ export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_keeper/${ONLY}" --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_keeper/${ONLY}" --native diff --git a/tests/e2e/run_tests_metrics.sh b/tests/e2e/run_tests_metrics.sh index 127cec49c..a167f3c17 100755 --- a/tests/e2e/run_tests_metrics.sh +++ b/tests/e2e/run_tests_metrics.sh @@ -7,4 +7,4 @@ export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_metrics_exporter/${ONLY}" --native diff --git a/tests/e2e/run_tests_operator.sh b/tests/e2e/run_tests_operator.sh index 37230bf53..083642a7f 100755 --- a/tests/e2e/run_tests_operator.sh +++ b/tests/e2e/run_tests_operator.sh @@ -13,11 +13,11 @@ if [[ ! -z "${RUN_ALL_TESTS}" ]]; then RUN_ALL_TESTS="--test-to-end" fi -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" ${RUN_ALL_TESTS} -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --test-to-end -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* -o short --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native -o short --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/*32* --trim-results on --debug --native -o short --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_operator/${ONLY}" ${RUN_ALL_TESTS} -o short --trim-results on --debug --native +#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_operator/${ONLY}" --test-to-end -o short --trim-results on --debug --native +#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/* -o short --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/* --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/test_008_2* --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/test_008_2* --trim-results on --debug --native -o short --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/*32* --trim-results on --debug --native -o short --native diff --git a/tests/e2e/run_tests_parallel.sh b/tests/e2e/run_tests_parallel.sh index cebc337cf..a7020b219 100755 --- a/tests/e2e/run_tests_parallel.sh +++ b/tests/e2e/run_tests_parallel.sh @@ -2,69 +2,29 @@ set -e CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" pip3 install -r "$CUR_DIR/../image/requirements.txt" -rm -rfv /tmp/test*.log -pad="000" -MAX_PARALLEL=${MAX_PARALLEL:-5} -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" - -function run_test_parallel() { - test_names=("$@") - run_test_cmd="" - delete_ns_cmd="" - create_ns_cmd="" - for test_name in "${test_names[@]}"; do - ns=$(echo ${test_name} | tr '_' '-') - delete_ns_cmd+="(kubectl delete ns $ns --ignore-not-found --now --timeout=600s);" - create_ns_cmd+="kubectl create ns $ns;" - # TODO randomization, currently need to avoid same 'No such file or directory: '/tmp/testflows.x.x.x.x.log' - # sleep $(echo "scale=2; $((1 + $RANDOM % 100)) / 100" | bc -l) && - run_test_cmd+="( OPERATOR_NAMESPACE=${ns} TEST_NAMESPACE=${ns} python3 $CUR_DIR/../regression.py --only=/regression/e2e.test_operator/${test_name}* --no-color --native &>/tmp/${test_name}.log && date && echo ${test_name} PASS && kubectl delete ns $ns --timeout=600s) || (echo \"TEST ${test_name} FAILED EXIT_CODE=\$?\" && cat /tmp/${test_name}.log && exit 255);" - done - echo "${delete_ns_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce - echo "${create_ns_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce - set +e - echo "${run_test_cmd}" | xargs -P ${MAX_PARALLEL} -r --verbose -d ";" -n 1 bash -ce - if [[ "0" != "$?" ]]; then - echo "TEST FAILED LOOK TO LOGS ABOVE" - pkill -e -f "python.+regression" - exit 1 - fi - set -e -} - -is_crd_present=$(kubectl get crd -o name | grep clickhouse.altinity.com | wc -l) -delete_chi_cmd="" -if [[ "0" != "${is_crd_present}" && "0" != $(kubectl get chi --all-namespaces -o name | wc -l ) ]]; then - while read chi; do - delete_chi_cmd+="kubectl delete chi -n ${chi};" - done < <(kubectl get chi --all-namespaces -o custom-columns=name:.metadata.namespace,name:.metadata.name | tail -n +2) - echo "${delete_chi_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce -fi -if [[ "0" != "${is_crd_present}" ]]; then - kubectl delete crd clickhouseinstallations.clickhouse.altinity.com clickhouseinstallationtemplates.clickhouse.altinity.com clickhouseoperatorconfigurations.clickhouse.altinity.com -fi -kubectl apply -f "${CUR_DIR}/../../deploy/operator/parts/crd.yaml" - -test_list=() -test_ids=(34 6 35 11 32 1 2 3 4 5 7 10 12 13 15 17 18 22 24 25 26 27 29 33 16 23) -for i in "${test_ids[@]}"; do - test_list+=( "test_${pad:${#i}}${i}" ) +export NO_WAIT=1 +"${CUR_DIR}/../../deploy/prometheus/create-prometheus.sh" +"${CUR_DIR}/../../deploy/minio/create-minio.sh" +ONLY="*" +for test_file in ${CUR_DIR}/test_*.py; do + name=$(basename "$test_file" .py | sed 's/^test_//') + run_cmd="python3 ./tests/regression.py --only=/regression/e2e?test_${name}/${ONLY} --trim-results on -o short --native --log ./tests/raw_${name}.log && " + run_cmd+="tfs --no-colors transform compact ./tests/raw_${name}.log ./tests/compact_${name}.log && " + run_cmd+="tfs --no-colors transform nice ./tests/raw_${name}.log ./tests/nice_${name}.log.txt && " + run_cmd+="tfs --no-colors transform short ./tests/raw_${name}.log ./tests/short_${name}.log.txt && " + run_cmd+="bash -xec 'tfs --no-colors report results -a 'local run' ./tests/raw_${name}.log - --confidential --copyright 'Altinity Inc.' --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report_${name}.html'" + + run_tests+=( + "${run_cmd}" + ) done -MAX_PARALLEL=5 -run_test_parallel "${test_list[@]}" - -# allow parallel long test_XXX_X -test_list=("test_019" "test_014" "test_008" "test_020" "test_021" "test_028") -MAX_PARALLEL=5 -run_test_parallel "${test_list[@]}" - -# following test require sequenced execution (test_009 upgrade operator, test_030 delete crd) -test_list=("test_009" "test_030" "test_031") -MAX_PARALLEL=1 -run_test_parallel "${test_list[@]}" +printf "%s\n" "${run_tests[@]}" | xargs -P 2 -I {} bash -xec '{}' +test_result=$? date -echo "ALL TESTS PASSED" - +if [[ "$test_result" == "0" ]]; then + echo "ALL TESTS PASSED" +else + echo "TESTS FAILED LOOK ./tests/*.log" +fi diff --git a/tests/e2e/settings.py b/tests/e2e/settings.py index e3569edea..c7ff64375 100644 --- a/tests/e2e/settings.py +++ b/tests/e2e/settings.py @@ -25,7 +25,7 @@ def get_docker_compose_path(): kubectl_cmd = ( "kubectl" if current().context.native else - f"docker-compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" + f"docker compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" ) kubectl_cmd = os.getenv("KUBECTL_CMD") if "KUBECTL_CMD" in os.environ else kubectl_cmd diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py index 22881fa1d..37ece526a 100644 --- a/tests/e2e/steps.py +++ b/tests/e2e/steps.py @@ -31,7 +31,7 @@ def get_shell(self, timeout=600): def create_test_namespace(self, force=False): """Create unique test namespace for test.""" - random_namespace = self.name[self.name.find('test_0'):self.name.find('. ')].replace("_", "-") + "-" + str(uuid.uuid1()) + random_namespace = self.name[self.name.find('test_0'):self.name.find('# ')].replace("_", "-") + "-" + str(uuid.uuid1()) if not force: # (self.cflags & PARALLEL) and not force: self.context.test_namespace = random_namespace @@ -76,7 +76,7 @@ def set_settings(self): self.context.kubectl_cmd = ( "kubectl" if current().context.native - else f"docker-compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" + else f"docker compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" ) self.context.kubectl_cmd = define("kubectl_cmd", os.getenv("KUBECTL_CMD") if "KUBECTL_CMD" in os.environ else self.context.kubectl_cmd) diff --git a/tests/e2e/test_backup_alerts.py b/tests/e2e/test_backup_alerts.py index 490bc5536..b3618cd52 100644 --- a/tests/e2e/test_backup_alerts.py +++ b/tests/e2e/test_backup_alerts.py @@ -1,3 +1,6 @@ +import os +os.environ["TEST_NAMESPACE"]="test-backup-alerts" + import json import random import time @@ -25,7 +28,7 @@ def get_minio_spec(): def exec_on_backup_container( backup_pod, cmd, - ns=settings.test_namespace, + ns, ok_to_fail=False, timeout=60, container="clickhouse-backup", @@ -60,12 +63,13 @@ def is_expected_backup_status(command_name, command_is_done, st, expected_status return False, command_is_done -def wait_backup_command_status(backup_pod, command_name, expected_status="success", err_status="error"): +def wait_backup_command_status(backup_pod, command_name, ns, expected_status="success", err_status="error"): command_is_done = False with Then(f'wait "{command_name}" with status "{expected_status}"'): while command_is_done is False: status_lines = exec_on_backup_container( - backup_pod, f'curl -sL "http://127.0.0.1:7171/backup/status"' + backup_pod, f'curl -sL "http://127.0.0.1:7171/backup/status"', + ns=ns ).splitlines() for line in status_lines: st = json.loads(line) @@ -161,7 +165,7 @@ def test_minio_setup(self, chi, minio_spec): @TestScenario -@Name("test_backup_is_success. Basic backup scenario") +@Name("test_backup_is_success# Basic backup scenario") def test_backup_is_success(self, chi, minio_spec): _, _, backup_pod, _ = alerts.random_pod_choice_for_callbacks(chi) backup_name = prepare_table_for_backup(backup_pod, chi) @@ -173,21 +177,26 @@ def test_backup_is_success(self, chi, minio_spec): "clickhouse_backup_successful_backups|clickhouse_backup_successful_creates", ns=self.context.test_namespace ) - list_before = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list") + list_before = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list", self.context.test_namespace) exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace,) exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/upload/{backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"upload {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"upload {backup_name}", expected_status="success", ns=self.context.test_namespace) with Then("list of backups shall changed"): - list_after = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list") + list_after = exec_on_backup_container( + backup_pod, "curl -sL http://127.0.0.1:7171/backup/list", + ns=self.context.test_namespace + ) assert list_before != list_after, error("backup is not created") with Then("successful backup count shall increased"): @@ -202,13 +211,13 @@ def test_backup_is_success(self, chi, minio_spec): @TestScenario -@Name("test_backup_is_down. ClickHouseBackupDown and ClickHouseBackupRecentlyRestart alerts") +@Name("test_backup_is_down# ClickHouseBackupDown and ClickHouseBackupRecentlyRestart alerts") def test_backup_is_down(self, chi, minio_spec): reboot_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) def reboot_backup_container(): kubectl.launch( - f"exec -n {settings.test_namespace} {reboot_pod} -c clickhouse-backup -- kill 1", + f"exec -n {self.context.test_namespace} {reboot_pod} -c clickhouse-backup -- kill 1", ok_to_fail=True, ) @@ -257,7 +266,7 @@ def reboot_backup_container(): @TestScenario -@Name("test_backup_failed. Check ClickHouseBackupFailed alerts") +@Name("test_backup_failed# Check ClickHouseBackupFailed alerts") def test_backup_failed(self, chi, minio_spec): backup_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) backup_prefix = prepare_table_for_backup(backup_pod, chi) @@ -268,20 +277,20 @@ def create_fail_backup(): backup_name = backup_prefix + "-" + str(random.randint(1, 4096)) backup_dir = f"/var/lib/clickhouse/backup/{backup_name}/shadow/default/test_backup" kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- bash -c 'mkdir -v -m 0400 -p {backup_dir}'", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- bash -c 'mkdir -v -m 0400 -p {backup_dir}'", ) kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", ) - wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="error") + wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="error", ns=self.context.test_namespace) def create_success_backup(): backup_name = backup_prefix + "-" + str(random.randint(1, 4096)) kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", ) - wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) with When("clickhouse-backup create failed"): fired = alerts.wait_alert_state( @@ -307,7 +316,7 @@ def create_success_backup(): @TestScenario -@Name("test_backup_duration. Check ClickHouseBackupTooShort and ClickHouseBackupTooLong alerts") +@Name("test_backup_duration# Check ClickHouseBackupTooShort and ClickHouseBackupTooLong alerts") def test_backup_duration(self, chi, minio_spec): short_pod, _, long_pod, _ = alerts.random_pod_choice_for_callbacks(chi) apply_fake_backup("prepare fake backup duration metric") @@ -374,7 +383,7 @@ def test_backup_duration(self, chi, minio_spec): @TestScenario -@Name("test_backup_size. Check ClickHouseBackupSizeChanged alerts") +@Name("test_backup_size# Check ClickHouseBackupSizeChanged alerts") def test_backup_size(self, chi, minio_spec): decrease_pod, _, increase_pod, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -395,8 +404,9 @@ def test_backup_size(self, chi, minio_spec): exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) if decrease: clickhouse.query( chi["metadata"]["name"], @@ -425,7 +435,7 @@ def test_backup_size(self, chi, minio_spec): @TestScenario -@Name("test_backup_not_run. Check ClickhouseBackupDoesntRunTooLong alert") +@Name("test_backup_not_run# Check ClickhouseBackupDoesntRunTooLong alert") def test_backup_not_run(self, chi, minio_spec): not_run_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) apply_fake_backup("prepare fake backup for time metric") @@ -462,14 +472,16 @@ def test_backup_not_run(self, chi, minio_spec): exec_on_backup_container( not_run_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(not_run_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(not_run_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) exec_on_backup_container( not_run_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/upload/{backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(not_run_pod, f"upload {backup_name}", expected_status="success") + wait_backup_command_status(not_run_pod, f"upload {backup_name}", expected_status="success", ns=self.context.test_namespace) with Then("check ClickhouseBackupDoesntRunTooLong gone away"): resolved = alerts.wait_alert_state( @@ -502,7 +514,7 @@ def test(self): minio_spec = get_minio_spec() with Module("backup_alerts"): - test_cases = [ + all_tests = [ test_backup_is_success, test_backup_is_down, test_backup_failed, @@ -510,5 +522,7 @@ def test(self): test_backup_size, test_backup_not_run, ] - for t in test_cases: + for t in all_tests: Scenario(test=t)(chi=chi, minio_spec=minio_spec) + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) diff --git a/tests/e2e/test_clickhouse.py b/tests/e2e/test_clickhouse.py index 7ede862b1..181df4f24 100644 --- a/tests/e2e/test_clickhouse.py +++ b/tests/e2e/test_clickhouse.py @@ -1,17 +1,20 @@ +import os +os.environ["TEST_NAMESPACE"]="test-clickhouse" + import time import e2e.clickhouse as clickhouse import e2e.kubectl as kubectl import e2e.yaml_manifest as yaml_manifest -import e2e.settings as settings import e2e.util as util +import e2e.steps as steps from testflows.core import * from testflows.asserts import error @TestScenario -@Name("test_ch_001. Insert quorum") +@Name("test_ch_001: Insert quorum") def test_ch_001(self): util.require_keeper(keeper_type=self.context.keeper_type) quorum_template = "manifests/chit/tpl-clickhouse-stable.yaml" @@ -19,7 +22,7 @@ def test_ch_001(self): kubectl.launch( f"delete chit {chit_data['metadata']['name']}", - ns=settings.test_namespace, + ns=self.context.test_namespace, ok_to_fail=True, ) kubectl.create_and_check( @@ -32,7 +35,7 @@ def test_ch_001(self): ) chi = yaml_manifest.get_name(util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml")) - chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi) + chi_data = kubectl.get("chi", ns=self.context.test_namespace, name=chi) util.wait_clickhouse_cluster_ready(chi_data) host0 = "chi-test-ch-001-insert-quorum-default-0-0" @@ -112,9 +115,7 @@ def test_ch_001(self): with When("Resume fetches for t2 at replica1"): clickhouse.query(chi, "system start fetches default.t2", host=host1) i = 0 - while ( - "2" - != clickhouse.query( + while ("2" != clickhouse.query( chi, "select active_replicas from system.replicas where database='default' and table='t1'", pod=host0, @@ -156,7 +157,7 @@ def test_ch_001(self): @TestScenario -@Name("test_ch_002. Row-level security") +@Name("test_ch_002: Row-level security") def test_ch_002(self): kubectl.create_and_check( "manifests/chi/test-ch-002-row-level.yaml", @@ -198,16 +199,23 @@ def test_ch_002(self): @TestFeature @Name("e2e.test_clickhouse") def test(self): - util.clean_namespace(delete_chi=False) + with Given("set settings"): + steps.set_settings() + with Given("I create shell"): + shell = steps.get_shell() + self.context.shell = shell + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) + util.install_operator_if_not_exist() all_tests = [ test_ch_001, test_ch_002, ] - run_test = all_tests - # placeholder for selective test running - # run_test = [test_ch_002] + # all_tests = [test_ch_002] - for t in run_test: + for t in all_tests: Scenario(test=t)() + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) diff --git a/tests/e2e/test_examples.py b/tests/e2e/test_examples.py index 1c8d07d19..839f11fd7 100644 --- a/tests/e2e/test_examples.py +++ b/tests/e2e/test_examples.py @@ -1,10 +1,14 @@ +import os +os.environ["TEST_NAMESPACE"]="test-examples" + from testflows.core import * import e2e.kubectl as kubectl import e2e.util as util +import e2e.steps as steps @TestScenario -@Name("test_examples01_1. Empty installation, creates 1 node") +@Name("test_examples01_1: Empty installation, creates 1 node") def test_examples01_1(self): kubectl.create_and_check( manifest="../../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml", @@ -19,7 +23,7 @@ def test_examples01_1(self): @TestScenario -@Name("test_examples01_2. 1 shard 2 replicas") +@Name("test_examples01_2: 1 shard 2 replicas") def test_examples01_2(self): kubectl.create_and_check( manifest="../../docs/chi-examples/01-simple-layout-02-1shard-2repl.yaml", @@ -34,7 +38,7 @@ def test_examples01_2(self): @TestScenario -@Name("test_examples02_1. Persistent volume mapping via defaults") +@Name("test_examples02_1: Persistent volume mapping via defaults") def test_examples02_1(self): kubectl.create_and_check( manifest="../../docs/chi-examples/03-persistent-volume-01-default-volume.yaml", @@ -49,13 +53,13 @@ def test_examples02_1(self): @TestScenario -@Name("test_examples02_2. Persistent volume mapping via podTemplate") +@Name("test_examples02_2: Persistent volume mapping via podTemplate") def test_examples02_2(self): kubectl.create_and_check( manifest="../../docs/chi-examples/03-persistent-volume-02-pod-template.yaml", check={ "pod_count": 1, - "pod_image": "clickhouse/clickhouse-server:23.8", + "pod_image": "clickhouse/clickhouse-server:24.8", "pod_volumes": { "/var/lib/clickhouse", "/var/log/clickhouse-server", @@ -67,7 +71,17 @@ def test_examples02_2(self): @TestFeature @Name("e2e.test_examples") def test(self): + with Given("set settings"): + steps.set_settings() + self.context.test_namespace = "test-examples" + self.context.operator_namespace = "test-examples" + with Given("I create shell"): + shell = steps.get_shell() + self.context.shell = shell + util.clean_namespace(delete_chi=False) + util.install_operator_if_not_exist() + examples = [ test_examples01_1, test_examples01_2, @@ -76,3 +90,5 @@ def test(self): ] for t in examples: Scenario(test=t)() + + util.clean_namespace(delete_chi=False) diff --git a/tests/e2e/test_keeper.py b/tests/e2e/test_keeper.py index 71d3251a2..3f3c54e65 100644 --- a/tests/e2e/test_keeper.py +++ b/tests/e2e/test_keeper.py @@ -336,7 +336,7 @@ def test_keeper_rescale_outline( @TestScenario -@Name("test_zookeeper_rescale. Check ZK scale-up / scale-down cases") +@Name("test_zookeeper_rescale# Check ZK scale-up / scale-down cases") def test_zookeeper_rescale(self): test_keeper_rescale_outline( keeper_type="zookeeper", @@ -347,7 +347,7 @@ def test_zookeeper_rescale(self): @TestScenario -@Name("test_clickhouse_keeper_rescale. Check KEEPER scale-up / scale-down cases") +@Name("test_clickhouse_keeper_rescale# Check KEEPER scale-up / scale-down cases") def test_clickhouse_keeper_rescale(self): test_keeper_rescale_outline( keeper_type="clickhouse-keeper", @@ -358,7 +358,7 @@ def test_clickhouse_keeper_rescale(self): @TestScenario -@Name("test_clickhouse_keeper_rescale_chk. Using ClickHouseKeeperInstallation. Check KEEPER scale-up / scale-down cases") +@Name("test_clickhouse_keeper_rescale_chk# Using ClickHouseKeeperInstallation. Check KEEPER scale-up / scale-down cases") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0")) def test_clickhouse_keeper_rescale_chk(self): test_keeper_rescale_outline( @@ -370,7 +370,7 @@ def test_clickhouse_keeper_rescale_chk(self): # @TestScenario -# @Name("test_zookeeper_operator_rescale. Check Zookeeper OPERATOR scale-up / scale-down cases") +# @Name("test_zookeeper_operator_rescale# Check Zookeeper OPERATOR scale-up / scale-down cases") # def test_zookeeper_operator_rescale(self): # test_keeper_rescale_outline( # keeper_type="zookeeper-operator", @@ -381,7 +381,7 @@ def test_clickhouse_keeper_rescale_chk(self): # @TestScenario -# @Name("test_zookeeper_pvc_scaleout_rescale. Check ZK+PVC scale-up / scale-down cases") +# @Name("test_zookeeper_pvc_scaleout_rescale# Check ZK+PVC scale-up / scale-down cases") # def test_zookeeper_pvc_scaleout_rescale(self): # test_keeper_rescale_outline( # keeper_type="zookeeper", @@ -392,7 +392,7 @@ def test_clickhouse_keeper_rescale_chk(self): @TestScenario -@Name("test_zookeeper_manual_teardown_rescale. Check ZK+Manual TEARDOWN scale-up / scale-down cases") +@Name("test_zookeeper_manual_teardown_rescale# Check ZK+Manual TEARDOWN scale-up / scale-down cases") def test_zookeeper_manual_teardown_rescale(self): test_keeper_rescale_outline( keeper_type="zookeeper", @@ -480,7 +480,7 @@ def test_keeper_probes_outline( @TestScenario @Name( - "test_zookeeper_probes_workload. Liveness + Readiness probes shall works fine " + "test_zookeeper_probes_workload# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) def test_zookeeper_probes_workload(self): @@ -493,7 +493,7 @@ def test_zookeeper_probes_workload(self): # @TestScenario # @Name( -# "test_zookeeper_pvc_probes_workload. Liveness + Readiness probes shall works fine " +# "test_zookeeper_pvc_probes_workload# Liveness + Readiness probes shall works fine " # "under workload in multi-datacenter installation" # ) # def test_zookeeper_pvc_probes_workload(self): @@ -506,7 +506,7 @@ def test_zookeeper_probes_workload(self): # @TestScenario # @Name( -# "test_zookeeper_operator_probes_workload. Liveness + Readiness probes shall works fine " +# "test_zookeeper_operator_probes_workload# Liveness + Readiness probes shall works fine " # "under workload in multi-datacenter installation" # ) # def test_zookeeper_operator_probes_workload(self): @@ -523,7 +523,7 @@ def test_zookeeper_probes_workload(self): @TestScenario @Name( - "test_clickhouse_keeper_probes_workload. Liveness + Readiness probes shall works fine " + "test_clickhouse_keeper_probes_workload# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) def test_clickhouse_keeper_probes_workload(self): @@ -536,7 +536,7 @@ def test_clickhouse_keeper_probes_workload(self): @TestScenario @Name( - "test_clickhouse_keeper_probes_workload_with_chk. Liveness + Readiness probes shall works fine " + "test_clickhouse_keeper_probes_workload_with_chk# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0")) diff --git a/tests/e2e/test_metrics_alerts.py b/tests/e2e/test_metrics_alerts.py index cb31ad6da..236f6258f 100644 --- a/tests/e2e/test_metrics_alerts.py +++ b/tests/e2e/test_metrics_alerts.py @@ -1,3 +1,7 @@ +import json +import os +os.environ["TEST_NAMESPACE"]="test-keeper" + import re import time import random @@ -15,13 +19,13 @@ @TestScenario -@Name("test_prometheus_setup. Check clickhouse-operator/prometheus/alertmanager setup") +@Name("test_prometheus_setup# Check clickhouse-operator/prometheus/alertmanager setup") def test_prometheus_setup(self, prometheus_operator_spec, clickhouse_operator_spec, chi): with Given("clickhouse-operator is installed"): assert ( kubectl.get_count( "pod", - ns=settings.operator_namespace, + ns=self.context.operator_namespace, label="-l app=clickhouse-operator", ) > 0 @@ -64,12 +68,12 @@ def test_prometheus_setup(self, prometheus_operator_spec, clickhouse_operator_sp @TestScenario -@Name("test_metrics_exporter_down. Check ClickHouseMetricsExporterDown") +@Name("test_metrics_exporter_down# Check ClickHouseMetricsExporterDown") def test_metrics_exporter_down(self, prometheus_operator_spec, clickhouse_operator_spec, chi): def reboot_metrics_exporter(): clickhouse_operator_pod = clickhouse_operator_spec["items"][0]["metadata"]["name"] kubectl.launch( - f"exec -n {settings.operator_namespace} {clickhouse_operator_pod} -c metrics-exporter -- sh -c 'kill 1'", + f"exec -n {self.context.operator_namespace} {clickhouse_operator_pod} -c metrics-exporter -- sh -c 'kill 1'", ok_to_fail=True, ) @@ -89,7 +93,7 @@ def reboot_metrics_exporter(): @TestScenario -@Name("test_clickhouse_server_reboot. Check ClickHouseServerDown, ClickHouseServerRestartRecently") +@Name("test_clickhouse_server_reboot# Check ClickHouseServerDown, ClickHouseServerRestartRecently") def test_clickhouse_server_reboot(self, prometheus_operator_spec, clickhouse_operator_spec, chi): random_idx = random.randint(0, 1) clickhouse_pod = chi["status"]["pods"][random_idx] @@ -149,7 +153,7 @@ def reboot_clickhouse_server(): @TestScenario -@Name("test_clickhouse_dns_errors. Check ClickHouseDNSErrors") +@Name("test_clickhouse_dns_errors# Check ClickHouseDNSErrors") def test_clickhouse_dns_errors(self, prometheus_operator_spec, clickhouse_operator_spec, chi): random_idx = random.randint(0, 1) clickhouse_pod = chi["status"]["pods"][random_idx] @@ -196,7 +200,7 @@ def rewrite_dns_on_clickhouse_server(write_new=True): @TestScenario -@Name("test_distributed_files_to_insert. Check ClickHouseDistributedFilesToInsertHigh") +@Name("test_distributed_files_to_insert# Check ClickHouseDistributedFilesToInsertHigh") def test_distributed_files_to_insert(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( delayed_pod, @@ -276,7 +280,7 @@ def test_distributed_files_to_insert(self, prometheus_operator_spec, clickhouse_ @TestScenario -@Name("test_distributed_connection_exceptions. Check ClickHouseDistributedConnectionExceptions") +@Name("test_distributed_connection_exceptions# Check ClickHouseDistributedConnectionExceptions") def test_distributed_connection_exceptions(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( delayed_pod, @@ -340,12 +344,14 @@ def reboot_clickhouse_and_distributed_exection(): "true", ns=self.context.test_namespace, ) + # why connection refused if /ping return ok ? ;( + time.sleep(5) clickhouse.drop_distributed_table_on_cluster(chi) @TestScenario @Name( - "test_insert_related_alerts. Check ClickHouseRejectedInsert, ClickHouseDelayedInsertThrottling, ClickHouseMaxPartCountForPartition, ClickHouseLowInsertedRowsPerQuery" + "test_insert_related_alerts# Check ClickHouseRejectedInsert, ClickHouseDelayedInsertThrottling, ClickHouseMaxPartCountForPartition, ClickHouseLowInsertedRowsPerQuery" ) def test_insert_related_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): clickhouse.create_table_on_cluster(chi) @@ -357,10 +363,16 @@ def test_insert_related_alerts(self, prometheus_operator_spec, clickhouse_operat ) = alerts.random_pod_choice_for_callbacks(chi) prometheus_scrape_interval = settings.prometheus_scrape_interval + chi_name = chi["metadata"]["name"] # default values in system.merge_tree_settings + settings_json = json.loads(clickhouse.query(chi_name, "SELECT name, value FROM system.merge_tree_settings WHERE name IN ('parts_to_delay_insert','parts_to_throw_insert') FORMAT JSONCompact")) parts_to_throw_insert = 300 parts_to_delay_insert = 150 - chi_name = chi["metadata"]["name"] + for row in settings_json["data"]: + if row[0] == "parts_to_throw_insert": + parts_to_throw_insert = row[1] + if row[0] == "parts_to_delay_insert": + parts_to_delay_insert = row[1] parts_limits = parts_to_delay_insert selected_svc = delayed_svc @@ -375,13 +387,13 @@ def insert_many_parts_to_clickhouse(): + min_block + f"INSERT INTO default.test(event_time, test) SELECT now(),number FROM system.numbers LIMIT {r};" ) - clickhouse.query(chi_name, sql, host=selected_svc, ns=self.context.test_namespace) + clickhouse.query(chi_name, sql, host=selected_svc, ns=self.context.test_namespace, timeout=600) sql = ( min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;" ) - clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=self.context.test_namespace) + clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=self.context.test_namespace, timeout=300) with Then(f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"): time.sleep(prometheus_scrape_interval * 2) @@ -454,6 +466,13 @@ def insert_many_parts_to_clickhouse(): ) assert resolved, error("can't check ClickHouseLowInsertedRowsPerQuery alert is gone away") + clickhouse.query( + chi_name, + "OPTIMIZE TABLE default.test FINAL;SELECT count() FROM system.parts WHERE active AND database='default' AND table='test'", + host=selected_svc, + ns=self.context.test_namespace, + ) + parts_limits = parts_to_throw_insert selected_svc = rejected_svc insert_many_parts_to_clickhouse() @@ -487,7 +506,7 @@ def insert_many_parts_to_clickhouse(): @TestScenario -@Name("test_longest_running_query. Check ClickHouseLongestRunningQuery") +@Name("test_longest_running_query# Check ClickHouseLongestRunningQuery") def test_longest_running_query(self, prometheus_operator_spec, clickhouse_operator_spec, chi): long_running_pod, long_running_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) # 600s trigger + 2*30s - double prometheus scraping interval @@ -517,7 +536,7 @@ def test_longest_running_query(self, prometheus_operator_spec, clickhouse_operat @TestScenario -@Name("test_query_preempted. Check ClickHouseQueryPreempted") +@Name("test_query_preempted# Check ClickHouseQueryPreempted") def test_query_preempted(self, prometheus_operator_spec, clickhouse_operator_spec, chi): priority_pod, priority_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -555,7 +574,7 @@ def run_queries_with_priority(): @TestScenario -@Name("test_read_only_replica. Check ClickHouseReadonlyReplica") +@Name("test_read_only_replica# Check ClickHouseReadonlyReplica") def test_read_only_replica(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( read_only_pod, @@ -573,8 +592,9 @@ def test_read_only_replica(self, prometheus_operator_spec, clickhouse_operator_s ) def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "{kill_cmd}"', ok_to_fail=True, ) clickhouse.query_with_error( @@ -641,7 +661,7 @@ def restart_keeper(): @TestScenario -@Name("test_replicas_max_absolute_delay. Check ClickHouseReplicasMaxAbsoluteDelay") +@Name("test_replicas_max_absolute_delay# Check ClickHouseReplicasMaxAbsoluteDelay") def test_replicas_max_absolute_delay(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( stop_replica_pod, @@ -702,7 +722,7 @@ def restart_clickhouse_and_insert_to_replicated_table(): @TestScenario -@Name("test_too_many_connections. Check ClickHouseTooManyConnections") +@Name("test_too_many_connections# Check ClickHouseTooManyConnections") def test_too_many_connections(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( too_many_connection_pod, @@ -724,12 +744,12 @@ def make_too_many_connection(): # HTTPConnection metric increase after full parsing of HTTP Request, we can't provide pause between CONNECT and QUERY running # long_cmd += f"nc -vv 127.0.0.1 {port} <( printf \"POST / HTTP/1.1\\r\\nHost: 127.0.0.1:8123\\r\\nContent-Length: 34\\r\\n\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\");" long_cmd += ( - 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0, max_threads=1";' ) elif port == "9000": - long_cmd += 'clickhouse-client --send_logs_level trace --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + long_cmd += 'clickhouse-client --send_logs_level trace --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0,max_threads=1";' # elif port == "3306": - # long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + # long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0,max_threads=1";' else: long_cmd += f'printf "1\\n1" | nc -q 5 -i 30 -vv 127.0.0.1 {port};' @@ -766,7 +786,7 @@ def make_too_many_connection(): @TestScenario -@Name("test_too_much_running_queries. Check ClickHouseTooManyRunningQueries") +@Name("test_too_much_running_queries# Check ClickHouseTooManyRunningQueries") def test_too_much_running_queries(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( _, @@ -828,7 +848,7 @@ def make_too_many_queries(): @TestScenario -@Name("test_system_settings_changed. Check ClickHouseSystemSettingsChanged") +@Name("test_system_settings_changed# Check ClickHouseSystemSettingsChanged") def test_system_settings_changed(self, prometheus_operator_spec, clickhouse_operator_spec, chi): changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -889,7 +909,7 @@ def test_system_settings_changed(self, prometheus_operator_spec, clickhouse_oper @TestScenario -@Name("test_version_changed. Check ClickHouseVersionChanged") +@Name("test_version_changed# Check ClickHouseVersionChanged") def test_version_changed(self, prometheus_operator_spec, clickhouse_operator_spec, chi): changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -954,14 +974,15 @@ def test_version_changed(self, prometheus_operator_spec, clickhouse_operator_spe @TestScenario -@Name("test_zookeeper_hardware_exceptions. Check ClickHouseZooKeeperHardwareExceptions") +@Name("test_zookeeper_hardware_exceptions# Check ClickHouseZooKeeperHardwareExceptions") def test_zookeeper_hardware_exceptions(self, prometheus_operator_spec, clickhouse_operator_spec, chi): pod1, svc1, pod2, svc2 = alerts.random_pod_choice_for_callbacks(chi) chi_name = chi["metadata"]["name"] def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "{kill_cmd}"', ok_to_fail=True, ) clickhouse.query_with_error( @@ -1009,7 +1030,7 @@ def restart_keeper(): @TestScenario -@Name("test_distributed_sync_insertion_timeout. Check ClickHouseDistributedSyncInsertionTimeoutExceeded") +@Name("test_distributed_sync_insertion_timeout# Check ClickHouseDistributedSyncInsertionTimeoutExceeded") def test_distributed_sync_insertion_timeout(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( sync_pod, @@ -1059,7 +1080,7 @@ def insert_distributed_sync(): @TestScenario -@Name("test_detached_parts. Check ClickHouseDetachedParts") +@Name("test_detached_parts# Check ClickHouseDetachedParts") def test_detached_parts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): clickhouse.create_table_on_cluster(chi) detached_pod, detached_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -1118,14 +1139,27 @@ def attach_all_parts(): @TestScenario -@Name("test_clickhouse_keeper_alerts. Check ClickHouseKeeperDown") -def test_clickhouse_keeper_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): +@Name("test_clickhouse_keeper_alerts# Check ClickHouseKeeperDown") +def test_clickhouse_keeper_alerts(self): + alerts.initialize( + chi_file="manifests/chi/test-cluster-for-alerts.yaml", + chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", + chi_name="test-cluster-for-alerts", + keeper_type="clickhouse-keeper", + ) test_keeper_alerts_outline(keeper_type="clickhouse-keeper") @TestScenario -@Name("test_zookeeper_alerts. Check ZookeeperDown, ZookeeperRestartRecently") -def test_zookeeper_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): +@Name("test_zookeeper_alerts# Check ZookeeperDown, ZookeeperRestartRecently") +def test_zookeeper_alerts(self): + alerts.initialize( + chi_file="manifests/chi/test-cluster-for-alerts.yaml", + chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", + chi_name="test-cluster-for-alerts", + keeper_type="zookeeper", + ) + test_keeper_alerts_outline(keeper_type="zookeeper") @@ -1145,8 +1179,9 @@ def test_keeper_alerts_outline(self, keeper_type): } def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {keeper_spec} -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {keeper_spec} -- sh -c "{kill_cmd}"', ok_to_fail=True, ) @@ -1167,7 +1202,7 @@ def wait_when_keeper_up(): "firing", True, labels={"pod_name": keeper_spec}, - time_range="1m", + time_range="3m", sleep_time=settings.prometheus_scrape_interval, callback=restart_keeper, ) @@ -1194,7 +1229,7 @@ def wait_when_keeper_up(): "firing", True, labels={"pod_name": keeper_spec}, - time_range="30s", + time_range="3m", ) assert fired, error(f"can't get {expected_alerts[keeper_type]['restart']} alert in firing state") @@ -1217,35 +1252,16 @@ def wait_when_keeper_up(): def test(self): with Given("I setup settings"): steps.set_settings() + self.context.test_namespace = "test-metrics-alerts" + self.context.operator_namespace = "test-metrics-alerts" with Given("I create shell"): shell = steps.get_shell() self.context.shell = shell util.clean_namespace(delete_chi=True) util.install_operator_if_not_exist() - (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( - chi_file="manifests/chi/test-cluster-for-alerts.yaml", - chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", - chi_name="test-cluster-for-alerts", - keeper_type="clickhouse-keeper", - ) - Scenario(test=test_clickhouse_keeper_alerts)( - prometheus_operator_spec=prometheus_operator_spec, - clickhouse_operator_spec=clickhouse_operator_spec, - chi=chi, - ) - - (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( - chi_file="manifests/chi/test-cluster-for-alerts.yaml", - chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", - chi_name="test-cluster-for-alerts", - keeper_type="zookeeper", - ) - Scenario(test=test_zookeeper_alerts)( - prometheus_operator_spec=prometheus_operator_spec, - clickhouse_operator_spec=clickhouse_operator_spec, - chi=chi, - ) + Scenario(test=test_zookeeper_alerts) + Scenario(test=test_clickhouse_keeper_alerts) (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( chi_file="manifests/chi/test-cluster-for-alerts.yaml", @@ -1254,14 +1270,13 @@ def test(self): keeper_type=self.context.keeper_type, ) - test_cases = [ + all_tests = [ test_prometheus_setup, + test_insert_related_alerts, + test_distributed_connection_exceptions, test_read_only_replica, - test_replicas_max_absolute_delay, test_metrics_exporter_down, test_clickhouse_dns_errors, - test_distributed_connection_exceptions, - test_insert_related_alerts, test_too_many_connections, test_too_much_running_queries, test_longest_running_query, @@ -1272,10 +1287,13 @@ def test(self): test_distributed_files_to_insert, test_detached_parts, test_clickhouse_server_reboot, + test_replicas_max_absolute_delay, ] - for t in test_cases: + for t in all_tests: Scenario(test=t)( prometheus_operator_spec=prometheus_operator_spec, clickhouse_operator_spec=clickhouse_operator_spec, chi=chi, ) + + util.clean_namespace(delete_chi=True, delete_keeper=True) diff --git a/tests/e2e/test_metrics_exporter.py b/tests/e2e/test_metrics_exporter.py index 923abd08a..61ac56db6 100644 --- a/tests/e2e/test_metrics_exporter.py +++ b/tests/e2e/test_metrics_exporter.py @@ -1,5 +1,6 @@ -import time -import re +import json +import os +os.environ["TEST_NAMESPACE"]="test-metrics-exporter" import json from e2e.steps import * @@ -14,16 +15,16 @@ @TestScenario -@Name("Check metrics server setup and version") +@Name("test_metrics_exporter_setup: Check metrics server setup and version") def test_metrics_exporter_setup(self): with Given("clickhouse-operator is installed"): - assert kubectl.get_count("pod", ns="--all-namespaces", label=util.operator_label) > 0, error() + assert kubectl.get_count("pod", ns=self.context.operator_namespace, label=util.operator_label) > 0, error() with Then(f"Set metrics-exporter version {settings.operator_version}"): util.set_metrics_exporter_version(settings.operator_version) @TestScenario -@Name("Test basic metrics exporter functionality") +@Name("test_metrics_exporter_chi: Test basic metrics exporter functionality") def test_metrics_exporter_chi(self): def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10): with Then(f"metrics-exporter /chi endpoint result should return {expect_result}"): @@ -77,7 +78,7 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma "true,true", ns=self.context.operator_namespace, ) - assert kubectl.get_count("pod", ns="--all-namespaces", label=util.operator_label) > 0, error() + assert kubectl.get_count("pod", ns=self.context.operator_namespace, label=util.operator_label) > 0, error() out = kubectl.launch("get pods -l app=clickhouse-operator", ns=self.context.operator_namespace).splitlines()[1] operator_pod = re.split(r"[\t\r\n\s]+", out)[0] @@ -98,7 +99,7 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma ) expected_chi = [ { - "namespace": "test", + "namespace": self.context.test_namespace, "name": "test-017-multi-version", "labels": {"clickhouse.altinity.com/chi": "test-017-multi-version"}, "annotations": {"clickhouse.altinity.com/email": "myname@mydomain.com, yourname@yourdoman.com"}, @@ -108,13 +109,13 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma "hosts": [ { "name": "0-0", - "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local", + "hostname": f"chi-test-017-multi-version-default-0-0.{self.context.test_namespace}.svc.cluster.local", "tcpPort": 9000, "httpPort": 8123 }, { "name": "1-0", - "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local", + "hostname": f"chi-test-017-multi-version-default-1-0.{self.context.test_namespace}.svc.cluster.local", "tcpPort": 9000, "httpPort": 8123 } @@ -186,8 +187,8 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma def test(self): with Given("set settings"): set_settings() - self.context.test_namespace = "test" - self.context.operator_namespace = "test" + self.context.test_namespace = "test-metrics-exporter" + self.context.operator_namespace = "test-metrics-exporter" with Given("I create shell"): shell = get_shell() self.context.shell = shell diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index b5e66bb20..15a0fa155 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -19,7 +19,7 @@ @TestScenario -@Name("test_010001. 1 node") +@Name("test_010001# 1 node") @Requirements(RQ_SRS_026_ClickHouseOperator_Create("1.0")) def test_010001(self): create_shell_namespace_clickhouse_template() @@ -50,7 +50,7 @@ def test_010001(self): @TestScenario -@Name("test_010002. useTemplates for pod, volume templates, and distribution") +@Name("test_010002# useTemplates for pod, volume templates, and distribution") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_UseTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_UseTemplates_Name("1.0"), @@ -79,7 +79,7 @@ def test_010002(self): @TestScenario -@Name("test_010003. 4 nodes with custom layout definition") +@Name("test_010003# 4 nodes with custom layout definition") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout("1.0"), @@ -105,7 +105,7 @@ def test_010003(self): @TestScenario -@Name("test_010004. Compatibility test if old syntax with volumeClaimTemplate is still supported") +@Name("test_010004# Compatibility test if old syntax with volumeClaimTemplate is still supported") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_VolumeClaimTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_VolumeClaimTemplates_Name("1.0"), @@ -129,7 +129,7 @@ def test_010004(self): @TestScenario -@Name("test_010005. Test manifest created by ACM") +@Name("test_010005# Test manifest created by ACM") @Requirements(RQ_SRS_026_ClickHouseOperator_ACM("1.0")) def test_010005(self): create_shell_namespace_clickhouse_template() @@ -150,7 +150,7 @@ def test_010005(self): @TestScenario -@Name("test_010006. Test clickhouse version upgrade from one version to another using podTemplate change") +@Name("test_010006# Test clickhouse version upgrade from one version to another using podTemplate change") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_VersionUpgrades("1.0")) def test_010006(self): create_shell_namespace_clickhouse_template() @@ -192,7 +192,7 @@ def test_010006(self): @TestScenario -@Name("test_010007. Test template with custom clickhouse ports") +@Name("test_010007# Test template with custom clickhouse ports") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_HostTemplates_Spec_InterServerHttpPort("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_HostTemplates_Spec_TcpPort("1.0"), @@ -379,7 +379,7 @@ def check_remote_servers(self, chi, shards, trigger_event, shell=None, cluster=" @TestScenario -@Name("test_010008_1. Test operator restart") +@Name("test_010008_1# Test operator restart") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_RestartingOperator("1.0")) def test_010008_1(self): create_shell_namespace_clickhouse_template() @@ -395,7 +395,7 @@ def test_010008_1(self): @TestScenario -@Name("test_010008_2. Test operator restart") +@Name("test_010008_2# Test operator restart") def test_010008_2(self): create_shell_namespace_clickhouse_template() @@ -410,7 +410,7 @@ def test_010008_2(self): @TestScenario -@Name("test_010008_3. Test operator restart in the middle of reconcile") +@Name("test_010008_3# Test operator restart in the middle of reconcile") def test_010008_3(self): create_shell_namespace_clickhouse_template() @@ -579,7 +579,7 @@ def test_operator_upgrade(self, manifest, service, version_from, version_to=None @TestScenario -@Name("test_010009_1. Test operator upgrade") +@Name("test_010009_1# Test operator upgrade") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_UpgradingOperator("1.0")) @Tags("NO_PARALLEL") def test_010009_1(self, version_from="0.24.5", version_to=None): @@ -596,7 +596,7 @@ def test_010009_1(self, version_from="0.24.5", version_to=None): @TestScenario -@Name("test_010009_2. Test operator upgrade") +@Name("test_010009_2# Test operator upgrade") @Tags("NO_PARALLEL") def test_010009_2(self, version_from="0.24.5", version_to=None): if version_to is None: @@ -612,7 +612,7 @@ def test_010009_2(self, version_from="0.24.5", version_to=None): @TestScenario -@Name("test_010010. Test zookeeper initialization") +@Name("test_010010# Test zookeeper initialization") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_ZooKeeper("1.0")) def test_010010(self): create_shell_namespace_clickhouse_template() @@ -643,7 +643,7 @@ def test_010010(self): @TestScenario -@Name("test_010010_1. Test zookeeper initialization AFTER starting a cluster") +@Name("test_010010_1# Test zookeeper initialization AFTER starting a cluster") def test_010010_1(self): create_shell_namespace_clickhouse_template() chi = "test-010-zk-init" @@ -685,7 +685,7 @@ def get_user_xml_from_configmap(chi, user): @TestScenario -@Name("test_010011_1. Test user security and network isolation") +@Name("test_010011_1# Test user security and network isolation") @Requirements(RQ_SRS_026_ClickHouseOperator_DefaultUsers("1.0")) def test_010011_1(self): create_shell_namespace_clickhouse_template() @@ -859,7 +859,7 @@ def test_default_user(): @TestScenario -@Name("test_010011_2. Test default user security") +@Name("test_010011_2# Test default user security") @Requirements(RQ_SRS_026_ClickHouseOperator_DefaultUsers("1.0")) def test_010011_2(self): create_shell_namespace_clickhouse_template() @@ -925,7 +925,7 @@ def test_010011_2(self): @TestScenario -@Name("test_010011_3. Test k8s secrets usage") +@Name("test_010011_3# Test k8s secrets usage") @Requirements(RQ_SRS_026_ClickHouseOperator_Secrets("1.0")) def test_010011_3(self): create_shell_namespace_clickhouse_template() @@ -1022,7 +1022,7 @@ def test_010011_3(self): @TestScenario -@Name("test_010012. Test service templates") +@Name("test_010012# Test service templates") @Requirements( RQ_SRS_026_ClickHouseOperator_ServiceTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_ServiceTemplates_NameGeneration("1.0"), @@ -1093,7 +1093,7 @@ def test_010012(self): RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_AddingShards("1.0"), RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_SchemaPropagation("1.0"), ) -@Name("test_010013_1. Automatic schema propagation for shards") +@Name("test_010013_1# Automatic schema propagation for shards") def test_010013_1(self): """Check clickhouse operator supports automatic schema propagation for shards.""" create_shell_namespace_clickhouse_template() @@ -1394,7 +1394,7 @@ def wait_for_cluster(chi, cluster, num_shards, num_replicas=0, pwd="", force_wai @TestScenario -@Name("test_010014_0. Test that schema is correctly propagated on replicas") +@Name("test_010014_0# Test that schema is correctly propagated on replicas") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_ZooKeeper("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), @@ -1750,7 +1750,7 @@ def check_schema_propagation(replicas): @TestScenario -@Name("test_010014_1. Test replicasUseFQDN") +@Name("test_010014_1# Test replicasUseFQDN") def test_010014_1(self): create_shell_namespace_clickhouse_template() @@ -1779,7 +1779,7 @@ def test_010014_1(self): with Given("Create schema objects"): for replica in replicas: - clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-{replica}") + clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-{replica}", timeout=120) def check_data_is_replicated(replicas, v): with When("Data is inserted on two replicas"): @@ -1902,7 +1902,7 @@ def check_host_network(manifest, replica1_port="9000", replica2_port="9000"): @TestScenario -@Name("test_010015. hostNetwork") +@Name("test_010015# hostNetwork") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_CircularReplication("1.0")) def test_010015(self): create_shell_namespace_clickhouse_template() @@ -1920,7 +1920,7 @@ def test_010015(self): @TestScenario -@Name("test_010016. Test advanced settings options") +@Name("test_010016# Test advanced settings options") @Requirements( RQ_SRS_026_ClickHouseOperator_ConfigurationFileControl_EmbeddedXML("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), @@ -2093,7 +2093,7 @@ def test_010016(self): @TestScenario -@Name("test_010017. Test deployment of multiple versions in a cluster") +@Name("test_010017# Test deployment of multiple versions in a cluster") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_DifferentClickHouseVersionsOnReplicasAndShards("1.0")) def test_010017(self): create_shell_namespace_clickhouse_template() @@ -2135,7 +2135,7 @@ def test_010017(self): @TestScenario -@Name("test_010018. Test that server settings are applied before StatefulSet is started") +@Name("test_010018# Test that server settings are applied before StatefulSet is started") # Obsolete, covered by test_016 def test_010018(self): create_shell_namespace_clickhouse_template() @@ -2322,7 +2322,7 @@ def test_019(self, step=1): @TestScenario -@Name("test_010019_1. Test that volume is correctly retained and can be re-attached. Provisioner: StatefulSet") +@Name("test_010019_1# Test that volume is correctly retained and can be re-attached. Provisioner: StatefulSet") @Requirements(RQ_SRS_026_ClickHouseOperator_RetainingVolumeClaimTemplates("1.0")) def test_010019_1(self): create_shell_namespace_clickhouse_template() @@ -2331,7 +2331,7 @@ def test_010019_1(self): @TestScenario -@Name("test_010019_2. Test that volume is correctly retained and can be re-attached. Provisioner: Operator") +@Name("test_010019_2# Test that volume is correctly retained and can be re-attached. Provisioner: Operator") @Requirements(RQ_SRS_026_ClickHouseOperator_RetainingVolumeClaimTemplates("1.0")) def test_010019_2(self): create_shell_namespace_clickhouse_template() @@ -2384,7 +2384,7 @@ def test_020(self, step=1): @TestScenario -@Name("test_010020_1. Test multi-volume configuration, step=1") +@Name("test_010020_1# Test multi-volume configuration, step=1") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_MultipleStorageVolumes("1.0")) def test_010020_1(self): create_shell_namespace_clickhouse_template() @@ -2393,7 +2393,7 @@ def test_010020_1(self): @TestScenario -@Name("test_010020_2. Test multi-volume configuration, step=2") +@Name("test_010020_2# Test multi-volume configuration, step=2") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_MultipleStorageVolumes("1.0")) def test_010020_2(self): create_shell_namespace_clickhouse_template() @@ -2613,7 +2613,7 @@ def test_021(self, step=1): @TestScenario -@Name("test_010021_1. Test rescaling storage. Provisioner: StatefulSet") +@Name("test_010021_1# Test rescaling storage. Provisioner: StatefulSet") @Requirements(RQ_SRS_026_ClickHouseOperator_StorageProvisioning("1.0")) def test_010021_1(self): create_shell_namespace_clickhouse_template() @@ -2622,7 +2622,7 @@ def test_010021_1(self): @TestScenario -@Name("test_010021_2. Test rescaling storage. Provisioner: Operator") +@Name("test_010021_2# Test rescaling storage. Provisioner: Operator") @Requirements(RQ_SRS_026_ClickHouseOperator_StorageProvisioning("1.0")) def test_010021_2(self): create_shell_namespace_clickhouse_template() @@ -2631,7 +2631,7 @@ def test_010021_2(self): @TestScenario -@Name("test_010022. Test that chi with broken image can be deleted") +@Name("test_010022# Test that chi with broken image can be deleted") @Requirements(RQ_SRS_026_ClickHouseOperator_DeleteBroken("1.0")) def test_010022(self): create_shell_namespace_clickhouse_template() @@ -2662,7 +2662,7 @@ def test_010022(self): @TestScenario -@Name("test_010023. Test auto templates") +@Name("test_010023# Test auto templates") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templating("1.0")) def test_010023(self): create_shell_namespace_clickhouse_template() @@ -2768,7 +2768,7 @@ def checkEnv(pos, env_name, env_value): @TestScenario -@Name("test_010024. Test annotations for various template types") +@Name("test_010024# Test annotations for various template types") @Requirements(RQ_SRS_026_ClickHouseOperator_AnnotationsInTemplates("1.0")) def test_010024(self): create_shell_namespace_clickhouse_template() @@ -2869,7 +2869,7 @@ def check_annotations(annotation, value, allow_to_fail_for_pvc=False): @TestScenario -@Name("test_010025. Test that service is available during re-scaling, upgrades etc.") +@Name("test_010025# Test that service is available during re-scaling, upgrades etc.") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_AddingReplicas("1.0")) def test_010025(self): create_shell_namespace_clickhouse_template() @@ -2907,6 +2907,14 @@ def test_010025(self): timeout=600, ) + kubectl.wait_jsonpath( + "pod", + "chi-test-025-rescaling-default-0-0-0", + "{.status.containerStatuses[0].ready}", + "true", + ns=self.context.test_namespace, + ) + numbers = "100000000" with Given("Create replicated table and populate it"): @@ -2979,7 +2987,7 @@ def test_010025(self): @TestScenario -@Name("test_010026. Test mixed single and multi-volume configuration in one cluster") +@Name("test_010026# Test mixed single and multi-volume configuration in one cluster") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout("1.0")) def test_010026(self): create_shell_namespace_clickhouse_template() @@ -3059,7 +3067,7 @@ def test_010026(self): @TestScenario -@Name("test_010027. Test troubleshooting mode") +@Name("test_010027# Test troubleshooting mode") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Troubleshoot("1.0")) def test_010027(self): # TODO: Add a case for a custom endpoint @@ -3111,7 +3119,7 @@ def test_010027(self): @TestScenario -@Name("test_010028. Test restart scenarios") +@Name("test_010028# Test restart scenarios") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_RestartingOperator("1.0")) def test_010028(self): create_shell_namespace_clickhouse_template() @@ -3165,7 +3173,8 @@ def test_010028(self): sql, pod="chi-test-028-replication-default-0-0-0", host="chi-test-028-replication-default-0-0", - advanced_params="--connect_timeout=1 --send_timeout=10 --receive_timeout=10", + timeout=10, + advanced_params="--connect_timeout=1 --send_timeout=10", ) ch2 = clickhouse.query_with_error( chi, @@ -3247,7 +3256,7 @@ def test_010028(self): @TestScenario -@Name("test_010029. Test different distribution settings") +@Name("test_010029# Test different distribution settings") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_PodTemplates_podDistribution("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_PodTemplates_podDistribution_Type("1.0"), @@ -3291,7 +3300,7 @@ def test_010029(self): @TestScenario -@Name("test_090099. Test CRD deletion. Should be executed at the end") +@Name("test_090099# Test CRD deletion. Should be executed at the end") @Tags("NO_PARALLEL") def test_090099(self): create_shell_namespace_clickhouse_template() @@ -3340,7 +3349,7 @@ def test_090099(self): @TestScenario -@Name("test_010031. Test excludeFromPropagationAnnotations work") +@Name("test_010031# Test excludeFromPropagationAnnotations work") def test_010031(self): create_shell_namespace_clickhouse_template() @@ -3496,7 +3505,7 @@ def run_insert_query(self, host, user, password, query, trigger_event, shell=Non @TestScenario -@Name("test_010032. Test rolling update logic") +@Name("test_010032# Test rolling update logic") # @Tags("NO_PARALLEL") def test_010032(self): """Test rolling update logic.""" @@ -3605,7 +3614,7 @@ def test_010032(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_EnableHttps("1.0")) -@Name("test_010034. Check HTTPS support for health check") +@Name("test_010034# Check HTTPS support for health check") def test_010034(self): """Check ClickHouse-Operator HTTPS support by switching configuration to HTTPS using the chopconf file and creating a ClickHouse-Installation with HTTPS enabled and confirming the secure connectivity between them by @@ -3765,7 +3774,7 @@ def test_010034(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ReprovisioningVolume("1.0")) -@Name("test_010036. Check operator volume re-provisioning") +@Name("test_010036# Check operator volume re-provisioning") def test_010036(self): """Check clickhouse operator recreates volumes and schema if volume is broken.""" create_shell_namespace_clickhouse_template() @@ -3968,7 +3977,7 @@ def check_data_is_recovered(): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_StorageManagementSwitch("1.0")) -@Name("test_010037. StorageManagement switch") +@Name("test_010037# StorageManagement switch") def test_010037(self): """Check clickhouse-operator supports switching storageManagement config option from default (StatefulSet) to Operator""" @@ -4074,7 +4083,7 @@ def test_010037(self): @TestCheck -@Name("test_039. Inter-cluster communications with secret") +@Name("test_039# Inter-cluster communications with secret") def test_039(self, step=0, delete_chi=0): """Check clickhouse-operator support inter-cluster communications with secrets.""" cluster = "default" @@ -4153,7 +4162,7 @@ def test_039(self, step=0, delete_chi=0): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_010039_0. Inter-cluster communications with no secret defined") +@Name("test_010039_0# Inter-cluster communications with no secret defined") def test_010039_0(self): create_shell_namespace_clickhouse_template() @@ -4162,7 +4171,7 @@ def test_010039_0(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_010039_1. Inter-cluster communications with 'auto' secret") +@Name("test_010039_1# Inter-cluster communications with 'auto' secret") def test_010039_1(self): """Check clickhouse-operator support inter-cluster communications with 'auto' secret.""" create_shell_namespace_clickhouse_template() @@ -4172,7 +4181,7 @@ def test_010039_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_010039_2. Inter-cluster communications with plane text secret") +@Name("test_010039_2# Inter-cluster communications with plane text secret") def test_010039_2(self): """Check clickhouse-operator support inter-cluster communications with plan text secret.""" create_shell_namespace_clickhouse_template() @@ -4182,7 +4191,7 @@ def test_010039_2(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_010039_3. Inter-cluster communications with k8s secret") +@Name("test_010039_3# Inter-cluster communications with k8s secret") def test_010039_3(self): """Check clickhouse-operator support inter-cluster communications with k8s secret.""" create_shell_namespace_clickhouse_template() @@ -4192,7 +4201,7 @@ def test_010039_3(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_010039_4. Inter-cluster communications over HTTPS") +@Name("test_010039_4# Inter-cluster communications over HTTPS") def test_010039_4(self): """Check clickhouse-operator support inter-cluster communications over HTTPS.""" create_shell_namespace_clickhouse_template() @@ -4201,7 +4210,7 @@ def test_010039_4(self): @TestScenario -@Name("test_010040. Inject a startup probe using an auto template") +@Name("test_010040# Inject a startup probe using an auto template") def test_010040(self): create_shell_namespace_clickhouse_template() @@ -4239,7 +4248,7 @@ def test_010040(self): @TestScenario -@Name("test_010040_1. Inject a startup probe using a reconcile setting") +@Name("test_010040_1# Inject a startup probe using a reconcile setting") def test_010040_1(self): create_shell_namespace_clickhouse_template() @@ -4250,10 +4259,6 @@ def test_010040_1(self): kubectl.create_and_check( manifest=manifest, check={ - "apply_templates": { - current().context.clickhouse_template, - }, - "pod_image": current().context.clickhouse_version, "pod_count": 1, "do_not_delete": 1, }, @@ -4280,7 +4285,7 @@ def test_010040_1(self): @TestScenario -@Name("test_010041. Secure zookeeper") +@Name("test_010041# Secure zookeeper") def test_010041(self): """Check clickhouse operator support secure zookeeper.""" @@ -4300,10 +4305,6 @@ def test_010041(self): kubectl.create_and_check( manifest=manifest, check={ - "apply_templates": { - current().context.clickhouse_template, - }, - "pod_image": current().context.clickhouse_version, "pod_count": 2, "do_not_delete": 1, }, @@ -4348,7 +4349,7 @@ def test_010041(self): @TestScenario -@Name("test_010042. Test configuration rollback") +@Name("test_010042# Test configuration rollback") def test_010042(self): create_shell_namespace_clickhouse_template() with Given("I change operator statefullSet timeout"): @@ -4445,12 +4446,10 @@ def test_010042(self): @TestCheck -@Name("test_043. Logs container customizing") +@Name("test_043# Logs container customizing") def test_043(self, manifest): """Check that clickhouse-operator support logs container customizing.""" - create_shell_namespace_clickhouse_template() - cluster = "cluster" chi = yaml_manifest.get_name(util.get_full_path(manifest)) @@ -4458,10 +4457,6 @@ def test_043(self, manifest): kubectl.create_and_check( manifest=manifest, check={ - "apply_templates": { - current().context.clickhouse_template, - }, - "pod_image": current().context.clickhouse_version, "pod_count": 1, "do_not_delete": 1, }, @@ -4492,18 +4487,20 @@ def test_043(self, manifest): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Defaults_Templates_logVolumeClaimTemplate("1.0")) -@Name("test_010043_0. Logs container customizing using PodTemplate") +@Name("test_010043_0# Logs container customizing using PodTemplate") def test_010043_0(self): """Check that clickhouse-operator support manual logs container customizing.""" + create_shell_namespace_clickhouse_template() test_043(manifest="manifests/chi/test-043-0-logs-container-customizing.yaml") @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Defaults_Templates_logVolumeClaimTemplate("1.0")) -@Name("test_010043_1. Default clickhouse-log container") +@Name("test_010043_1# Default clickhouse-log container") def test_010043_1(self): """Check that clickhouse-operator sets up default logs container if it is not specified in Pod.""" + create_shell_namespace_clickhouse_template() test_043(manifest="manifests/chi/test-043-1-logs-container-customizing.yaml") @@ -4511,7 +4508,7 @@ def test_010043_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_ReconcilingCycle("1.0"), RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_SchemaPropagation("1.0")) -@Name("test_010044. Schema and data propagation with slow replica") +@Name("test_010044# Schema and data propagation with slow replica") def test_010044(self): """Check that schema and data can be propagated on other replica if replica start takes a lot of time.""" create_shell_namespace_clickhouse_template() @@ -4575,7 +4572,7 @@ def test_010044(self): @TestCheck -@Name("test_045. Restart operator without waiting for queries to finish") +@Name("test_045# Restart operator without waiting for queries to finish") def test_045(self, manifest): """Check that operator support does not wait for the query to finish before operator commences restart.""" @@ -4611,7 +4608,7 @@ def test_045(self, manifest): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Reconciling_Policy("1.0")) -@Name("test_010045_1. Reconcile wait queries property specified by CHI") +@Name("test_010045_1# Reconcile wait queries property specified by CHI") def test_010045_1(self): """Check that operator supports spec.reconciling.policy property in CHI that forces the operator not to wait for the queries to finish before restart.""" @@ -4623,7 +4620,7 @@ def test_010045_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Configuration_Spec_ReconcileWaitQueries("1.0")) -@Name("test_010045_2. Reconcile wait queries property specified by clickhouse-operator config") +@Name("test_010045_2# Reconcile wait queries property specified by clickhouse-operator config") def test_010045_2(self): """Check that operator supports spec.reconcile.host.wait.queries property in clickhouse-operator config that forces the operator not to wait for the queries to finish before restart.""" @@ -4636,7 +4633,7 @@ def test_010045_2(self): @TestScenario -@Name("test_010046. Metrics for clickhouse-operator") +@Name("test_010046# Metrics for clickhouse-operator") def test_010046(self): """Check that clickhouse-operator creates metrics for reconcile and other clickhouse-operator events.""" create_shell_namespace_clickhouse_template() @@ -4767,7 +4764,7 @@ def check_metrics(metric_names): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout_Shards_Weight("1.0")) -@Name("test_010047. Zero weighted shard") +@Name("test_010047# Zero weighted shard") def test_010047(self): """Check that clickhouse-operator supports specifying shard weight as 0 and check that data not inserted into zero-weighted shard in distributed table.""" @@ -4828,7 +4825,7 @@ def test_010047(self): @TestScenario -@Name("test_010050. Test metrics exclusion in operator config") +@Name("test_010050# Test metrics exclusion in operator config") def test_010050(self): create_shell_namespace_clickhouse_template() with Given("Operator configuration is installed"): @@ -4906,7 +4903,7 @@ def check_replication(chi, replicas, token, table = ''): @TestScenario -@Name("test_010053. Check that standard Kubernetes annotations are ignored if set to StatefulSet externally") +@Name("test_010053# Check that standard Kubernetes annotations are ignored if set to StatefulSet externally") @Tags("NO_PARALLEL") def test_010053(self): version_from = "0.23.7" @@ -4973,7 +4970,7 @@ def check_restart(): @TestScenario -@Name("test_010054. Test that 'suspend' mode delays any changes until unsuspended") +@Name("test_010054# Test that 'suspend' mode delays any changes until unsuspended") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_VersionUpgrades("1.0")) def test_010054(self): create_shell_namespace_clickhouse_template() @@ -5020,7 +5017,7 @@ def test_010054(self): @TestScenario -@Name("test_010055. Test that restart rules can be merged from CHOP configuration") +@Name("test_010055# Test that restart rules can be merged from CHOP configuration") def test_010055(self): create_shell_namespace_clickhouse_template() with Given("Operator configuration is installed"): @@ -5082,7 +5079,7 @@ def test_010055(self): @TestScenario -@Name("test_010056. Test replica delay") +@Name("test_010056# Test replica delay") def test_010056(self): create_shell_namespace_clickhouse_template() with Given("I change operator StatefulSet timeout"): @@ -5177,7 +5174,7 @@ def test_010056(self): @TestScenario -@Name("test_010057. Test reconcile concurrency settings on CHI level") +@Name("test_010057# Test reconcile concurrency settings on CHI level") def test_010057(self): create_shell_namespace_clickhouse_template() @@ -5211,7 +5208,7 @@ def test_010057(self): @TestScenario -@Name("test_010058. Check ClickHouse with rootCA") +@Name("test_010058# Check ClickHouse with rootCA") def test_010058(self): # Can be merged with test_034 potentially create_shell_namespace_clickhouse_template() operator_namespace = current().context.operator_namespace @@ -5268,7 +5265,7 @@ def test_010058(self): # Can be merged with test_034 potentially @TestScenario -@Name("test_010059. Test macro substitutions in settings") +@Name("test_010059# Test macro substitutions in settings") def test_010059(self): create_shell_namespace_clickhouse_template() @@ -5354,7 +5351,7 @@ def test_010059(self): @TestScenario -@Name("test_010060. pdb management disabled") +@Name("test_010060# pdb management disabled") @Requirements(RQ_SRS_026_ClickHouseOperator_Create("1.0")) def test_010060(self): create_shell_namespace_clickhouse_template() @@ -5390,7 +5387,7 @@ def test_010060(self): # @TestScenario -@Name("test_020000. Test Basic CHK functions") +@Name("test_020000# Test Basic CHK functions") def test_020000(self): create_shell_namespace_clickhouse_template() @@ -5427,7 +5424,7 @@ def test_020000(self): delete_test_namespace() @TestScenario -@Name("test_020001. Test that Kubernetes objects between CHI and CHK does not overlap") +@Name("test_020001# Test that Kubernetes objects between CHI and CHK does not overlap") def test_020001(self): create_shell_namespace_clickhouse_template() @@ -5466,7 +5463,7 @@ def test_020001(self): delete_test_namespace() @TestScenario -@Name("test_020002. Test CHI with CHK") +@Name("test_020002# Test CHI with CHK") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_ClickHouseKeeperInstallation_volumeClaimTemplates("1.0")) def test_020002(self): @@ -5493,7 +5490,7 @@ def test_020002(self): @TestScenario -@Name("test_020003. Clickhouse-keeper upgrade") +@Name("test_020003# Clickhouse-keeper upgrade") def test_020003(self): """Check that clickhouse-operator support upgrading clickhouse-keeper version when clickhouse-keeper defined with ClickHouseKeeperInstallation.""" @@ -5546,13 +5543,12 @@ def test_020003(self): clickhouse.query(chi, "select * from system.zookeeper_connection") check_replication(chi, {0, 1}, 2) - with Finally("I clean up"): delete_test_namespace() @TestScenario -@Name("test_020004. Test CHK upgrade from 0.23.x operator version") +@Name("test_020004# Test CHK upgrade from 0.23.x operator version") @Tags("NO_PARALLEL") def test_020004(self): with Then("Skip it. test_051_1 does a better job"): @@ -5639,7 +5635,7 @@ def test_020004(self): @TestScenario -@Name("test_020004_1. Test CHK upgrade from 0.23.x operator version") +@Name("test_020004_1# Test CHK upgrade from 0.23.x operator version") @Tags("NO_PARALLEL") def test_020004_1(self): version_from = "0.23.7" @@ -5740,7 +5736,7 @@ def test_020004_1(self): @TestScenario -@Name("test_020005. Clickhouse-keeper scale-up/scale-down") +@Name("test_020005# Clickhouse-keeper scale-up/scale-down") def test_020005(self): """Check that clickhouse-operator support scale-up/scale-down without service interruption""" diff --git a/tests/helpers/argparser.py b/tests/helpers/argparser.py index 7390fef3f..2d23ca60f 100644 --- a/tests/helpers/argparser.py +++ b/tests/helpers/argparser.py @@ -3,7 +3,7 @@ def argparser(parser): parser.add_argument( "--native", action="store_true", - help="run tests without docker-compose, require only working kubectl + python", + help="run tests without docker compose, require only working kubectl + python", default=False, ) parser.add_argument( diff --git a/tests/helpers/cluster.py b/tests/helpers/cluster.py index 71a131410..a29064a20 100644 --- a/tests/helpers/cluster.py +++ b/tests/helpers/cluster.py @@ -18,7 +18,7 @@ def __exit__(self, type, value, traceback): # to terminate any open shell commands. # This is needed for example # to solve a problem with - # 'docker-compose exec {name} bash --noediting' + # 'docker compose exec {name} bash --noediting' # that does not clean up open bash processes # if not exited normally for i in range(10): @@ -32,13 +32,13 @@ def __exit__(self, type, value, traceback): class Cluster(object): - """Simple object around docker-compose cluster.""" + """Simple object around docker compose cluster.""" def __init__(self, configs_dir=None): self.environ = {} self.configs_dir = configs_dir - self.docker_compose = "docker-compose" + self.docker_compose = "docker compose" self.shell = Shell() frame = inspect.currentframe().f_back @@ -62,7 +62,7 @@ def __init__(self, configs_dir=None): ) def __enter__(self): - with Given("docker-compose cluster"): + with Given("docker compose cluster"): self.up() return self @@ -71,11 +71,11 @@ def __exit__(self, type, value, traceback): self.down() def down(self, timeout=3600): - """Bring cluster down by executing docker-compose down.""" + """Bring cluster down by executing docker compose down.""" return self.shell(f"{self.docker_compose} down --timeout {timeout} -v --remove-orphans") def up(self, timeout=3600): - with Given("docker-compose"): + with Given("docker compose"): max_attempts = 5 max_up_attempts = 1 @@ -84,7 +84,7 @@ def up(self, timeout=3600): with By("checking if any containers are already running"): self.shell(f"set -o pipefail && {self.docker_compose} ps | tee") - with And("executing docker-compose down just in case it is up"): + with And("executing docker compose down just in case it is up"): cmd = self.shell( f"set -o pipefail && {self.docker_compose} down --timeout={timeout} -v --remove-orphans 2>&1 | tee" ) @@ -94,7 +94,7 @@ def up(self, timeout=3600): with And("checking if any containers are still left running"): self.shell(f"set -o pipefail && {self.docker_compose} ps | tee") - with And("executing docker-compose up"): + with And("executing docker compose up"): for up_attempt in range(max_up_attempts): with By(f"attempt {up_attempt}/{max_up_attempts}"): cmd = self.shell( @@ -113,4 +113,4 @@ def up(self, timeout=3600): break if cmd.exitcode != 0 or "is unhealthy" in cmd.output or "Exit" in ps_cmd.output: - fail("could not bring up docker-compose cluster") + fail("could not bring up docker compose cluster") diff --git a/tests/image/build_docker.sh b/tests/image/build_docker.sh index 43aa020d9..bd8d0832a 100755 --- a/tests/image/build_docker.sh +++ b/tests/image/build_docker.sh @@ -7,22 +7,22 @@ OPERATOR_IMAGE="altinity/clickhouse-operator:${OPERATOR_VERSION}" OPERATOR_IMAGE_OLD="altinity/clickhouse-operator:${OPERATOR_VERSION_OLD}" METRICS_EXPORTER_IMAGE="altinity/metrics-exporter:${OPERATOR_VERSION}" METRICS_EXPORTER_IMAGE_OLD="altinity/metrics-exporter:${OPERATOR_VERSION_OLD}" -CLICKHOUSE_BACKUP_IMAGE="altinity/clickhouse-backup:2.4.15" -CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:="clickhouse/clickhouse-server:23.8"} -CLICKHOUSE_IMAGE_OLD=${CLICKHOUSE_IMAGE_OLD:="clickhouse/clickhouse-server:23.3"} +CLICKHOUSE_BACKUP_IMAGE="altinity/clickhouse-backup:latest" +CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:="clickhouse/clickhouse-server:24.8"} +CLICKHOUSE_IMAGE_OLD=${CLICKHOUSE_IMAGE_OLD:="clickhouse/clickhouse-server:24.3"} CLICKHOUSE_IMAGE_LATEST=${CLICKHOUSE_IMAGE_LATEST:="clickhouse/clickhouse-server:latest"} CLICKHOUSE_OPERATOR_TESTS_IMAGE=${CLICKHOUSE_OPERATOR_TESTS_IMAGE:="registry.gitlab.com/altinity-public/container-images/clickhouse-operator-test-runner:latest"} ZOOKEEPER_IMAGE=${ZOOKEEPER_IMAGE:="zookeeper:3.8.4"} -K8S_VERSION=${K8S_VERSION:=1.28.5} +K8S_VERSION=${K8S_VERSION:=1.30.1} MINIKUBE_PRELOADED_TARBALL="preloaded-images-k8s-v18-v${K8S_VERSION}-docker-overlay2-amd64.tar.lz4" -MINIKUBE_KICBASE_IMAGE=${MINIKUBE_KICBASE_IMAGE:-"gcr.io/k8s-minikube/kicbase:v0.0.42"} -MINIKUBE_STORAGE_IMAGE=${MINIKUBE_STORAGE_IMAGE:="gcr.io/k8s-minikube/storage-provisioner:v20210514"} +MINIKUBE_KICBASE_IMAGE=${MINIKUBE_KICBASE_IMAGE:-"gcr.io/k8s-minikube/kicbase:v0.0.45"} +MINIKUBE_STORAGE_IMAGE=${MINIKUBE_STORAGE_IMAGE:="gcr.io/k8s-minikube/storage-provisioner:latest"} -MINIO_IMAGE=${MINIO_IMAGE:="minio/minio:RELEASE.2021-06-17T00-10-46Z"} +MINIO_IMAGE=${MINIO_IMAGE:="minio/minio:latest"} MINIO_CONSOLE_IMAGE=${MINIO_CONSOLE_IMAGE:="minio/console:latest"} MINIO_CLIENT_IMAGE=${MINIO_CLIENT_IMAGE:="minio/mc:latest"} -MINIO_OPERATOR_IMAGE=${MINIO_OPERATOR_IMAGE:="minio/operator:v4.1.3"} +MINIO_OPERATOR_IMAGE=${MINIO_OPERATOR_IMAGE:="minio/operator:latest"} PROMETHEUS_RELOADER_IMAGE=${PROMETHEUS_RELOADER_IMAGE:="quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0"} PROMETHEUS_OPERATOR_IMAGE=${PROMETHEUS_OPERATOR_IMAGE:="quay.io/prometheus-operator/prometheus-operator:v0.68.0"} diff --git a/tests/image/requirements.txt b/tests/image/requirements.txt index 48921211c..eec827f15 100644 --- a/tests/image/requirements.txt +++ b/tests/image/requirements.txt @@ -1,4 +1,4 @@ -testflows==2.2.8 +testflows==2.4.19 requests PyYAML setuptools diff --git a/tests/regression.py b/tests/regression.py index ec0cdff0c..1ed05b133 100755 --- a/tests/regression.py +++ b/tests/regression.py @@ -7,29 +7,29 @@ xfails = { # test_operator.py - "/regression/e2e.test_operator/test_020005*": [(Fail, "Keeper scale-up/scale-down is flaky")], + "/regression/e2e?test_operator/test_020005*": [(Fail, "Keeper scale-up/scale-down is flaky")], # test_clickhouse.py - "/regression/e2e.test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")], + "/regression/e2e?test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")], # test_metrics_alerts.py - # "/regression/e2e.test_metrics_alerts/test_clickhouse_keeper_alerts*": [ + # "/regression/e2e?test_metrics_alerts/test_clickhouse_keeper_alerts*": [ # (Fail, "clickhouse-keeper wrong prometheus endpoint format, look https://github.com/ClickHouse/ClickHouse/issues/46136") # ], # test_keeper.py - # "/regression/e2e.test_keeper/test_clickhouse_keeper_rescale*": [ - # (Fail, "need `ruok` before quorum https://github.com/ClickHouse/ClickHouse/issues/35464, need apply file config instead use commited data for quorum https://github.com/ClickHouse/ClickHouse/issues/35465. --force-recovery useless https://github.com/ClickHouse/ClickHouse/issues/37434"), - # ], - # "/regression/e2e.test_metrics_alerts/test_clickhouse_dns_errors*": [ + "/regression/e2e?test_keeper/test_*_chk*": [ + (Fail, "need proper ClickHouseKeeperInstallation scale up and scale down implementation"), + ], + # "/regression/e2e?test_metrics_alerts/test_clickhouse_dns_errors*": [ # (Fail, "DNSError behavior changed on 21.9, look https://github.com/ClickHouse/ClickHouse/issues/29624") # ], # test_keeper.py - "/regression/e2e.test_keeper/test_zookeeper_operator_probes_workload*": [ - ( - Fail, - "zookeeper liveness probe doesn't work, wait when https://github.com/pravega/zookeeper-operator/pull/476 will merge", - ) - ], - # "/regression/e2e.test_keeper/test_clickhouse_keeper_probes_workload*": [ + # "/regression/e2e?test_keeper/test_zookeeper_operator_probes_workload*": [ + # ( + # Fail, + # "zookeeper liveness probe doesn't work, wait when https://github.com/pravega/zookeeper-operator/pull/476 will merge", + # ) + # ], + # "/regression/e2e?test_keeper/test_clickhouse_keeper_probes_workload*": [ # (Fail, "clickhouse-keeper fail after insert 10000 parts, look https://github.com/ClickHouse/ClickHouse/issues/35712") # ], }