diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml index 6d640e552898..a92fec5f9879 100644 --- a/.github/workflows/grype_scan.yml +++ b/.github/workflows/grype_scan.yml @@ -113,8 +113,8 @@ jobs: ' result.json >> $GITHUB_STEP_SUMMARY fi - HIGH_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "High")) | length' result.json) - CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "Critical")) | length' result.json) + HIGH_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "High")) | length' result.json) + CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "Critical")) | length' result.json) TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT)) echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index ef4796300e46..2e0d55b26268 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -149,7 +149,7 @@ jobs: strategy: fail-fast: false matrix: - SUITE: [aes_encryption, atomic_insert, base_58, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, functions, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, selects, session_timezone, swarms, tiered_storage, version, window_functions] + SUITE: [aes_encryption, atomic_insert, base_58, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, functions, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, selects, session_timezone, swarms, version, window_functions] needs: [runner_labels_setup] runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} timeout-minutes: ${{ inputs.timeout_minutes }} @@ -299,7 +299,7 @@ jobs: REPORTS_PATH=${{ runner.temp }}/reports_dir SUITE=alter STORAGE=/${{ matrix.ONLY }}_partition - PART='${{ matrix.PART }}' + PART=${{ matrix.PART }} EOF - name: Setup run: .github/setup.sh @@ -971,7 +971,7 @@ jobs: strategy: fail-fast: false matrix: - STORAGE: [minio, s3amazon, s3gcs] + STORAGE: [local, minio, s3amazon, s3gcs] needs: [runner_labels_setup] runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} timeout-minutes: ${{ inputs.timeout_minutes }} @@ -1014,7 +1014,7 @@ jobs: --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }} --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }} --gcs-uri ${{ secrets.REGRESSION_GCS_URI }} - --with-${{ matrix.STORAGE }} + ${{ matrix.STORAGE == 'local' && '' || format('--with-{0}', matrix.STORAGE) }} --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" ${{ env.args }} || EXITCODE=$?; .github/add_link_to_logs.sh; diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 28dfe89c8ae7..ecb357ea1b40 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -101,7 +101,7 @@ job.set_dependency(FUNCTIONAL_TESTS_PARALLEL_BLOCKING_JOB_NAMES) for job in JobConfigs.buzz_fuzzer_jobs ], - #*[ + # *[ # job.set_dependency(FUNCTIONAL_TESTS_PARALLEL_BLOCKING_JOB_NAMES) # for job in JobConfigs.performance_comparison_with_master_head_jobs # ], # NOTE (strtgbb): failed previously due to GH secrets not being handled properly, try again later @@ -136,7 +136,7 @@ workflow_filter_hooks=[should_skip_job], post_hooks=[ # "python3 ./ci/jobs/scripts/workflow_hooks/feature_docs.py", # NOTE (strtgbb): we don't build docs - "python3 ./ci/jobs/scripts/workflow_hooks/new_tests_check.py", + # "python3 ./ci/jobs/scripts/workflow_hooks/new_tests_check.py", # NOTE (strtgbb): we don't use this # "python3 ./ci/jobs/scripts/workflow_hooks/can_be_merged.py", # NOTE (strtgbb): relies on labels we don't use ], ) diff --git a/tests/broken_tests.json b/tests/broken_tests.json index e237f039d1a4..bee81a0a19b2 100644 --- a/tests/broken_tests.json +++ b/tests/broken_tests.json @@ -1,4 +1,11 @@ { + "02844_max_backup_bandwidth_s3": { + "reason": "INVESTIGATE - Fails on debug", + "message": "result differs with reference", + "check_types": [ + "debug" + ] + }, "03455_direct_io_read_array_values": { "reason": "INVESTIGATE - Unstable on tsan", "check_types": [ @@ -26,6 +33,13 @@ "tsan" ] }, + "02435_rollback_cancelled_queries": { + "reason": "INVESTIGATE - Unstable on tsan", + "message": "DB::Exception: There is no current transaction. (INVALID_TRANSACTION)", + "check_types": [ + "tsan" + ] + }, "02443_detach_attach_partition": { "reason": "INVESTIGATE - Unstable", "message": "Cannot rename parts to perform operation on them: Code: 521. DB::ErrnoException: Cannot rename" @@ -37,10 +51,18 @@ "tsan" ] }, + "03100_lwu_03_join": { + "reason": "INVESTIGATE - Unstable on tsan", + "message": "DB::Exception: Timeout exceeded (180 s) while flushing system log", + "check_types": [ + "tsan" + ] + }, "02479_race_condition_between_insert_and_droppin_mv": { "reason": "INVESTIGATE - Timeout in ClickHouse query", "check_types": [ - "tsan" + "tsan", + "msan" ] }, "01037_polygon_dicts_correctness_fast": { @@ -61,7 +83,8 @@ "03357_join_pk_sharding": { "reason": "INVESTIGATE - Timeout. Unstable?", "check_types": [ - "tsan" + "tsan", + "msan" ] }, "01630_simple_aggregate_all_functions_in_summing_merge_tree": { @@ -179,7 +202,6 @@ }, "02581_share_big_sets_between_mutation_tasks_with_storage_set": { "reason": "INVESTIGATE - Timeout on tsan", - "message": "Timeout! Processes left in process group", "check_types": [ "tsan" ] @@ -214,17 +236,26 @@ ] }, "00172_hits_joins": { - "reason": "INVESTIGATE - Timeout on tsan", + "reason": "INVESTIGATE - Timeout on tsan, msan", "message": "Timeout! Processes left in process group", "check_types": [ - "tsan" + "tsan", + "msan" ] }, "00084_external_aggregation": { - "reason": "INVESTIGATE - Timeout on tsan", + "reason": "INVESTIGATE - Timeout on tsan, msan", "message": "Timeout! Processes left in process group", "check_types": [ - "tsan" + "tsan", + "msan" + ] + }, + "03127_system_unload_primary_key_table": { + "reason": "INVESTIGATE - Timeout on msan", + "message": "Timeout! Processes left in process group", + "check_types": [ + "msan" ] }, "00184_parallel_hash_returns_same_res_as_hash": { @@ -242,6 +273,13 @@ "msan" ] }, + "00159_parallel_formatting_json_and_friends_1": { + "reason": "INVESTIGATE - Timeout on msan", + "message": "Timeout! Processes left in process group", + "check_types": [ + "msan" + ] + }, "02177_issue_31009": { "reason": "INVESTIGATE - Timeout on ubsan", "message": "Timeout! Processes left in process group", @@ -257,11 +295,13 @@ ] }, "00024_random_counters": { - "reason": "INVESTIGATE - Timeout on tsan, coverage", + "reason": "INVESTIGATE - Timeout on tsan, coverage, asan, msan", "message": "Timeout! Processes left in process group", "check_types": [ "tsan", - "coverage" + "coverage", + "asan", + "msan" ] }, "00159_parallel_formatting_json_and_friends_2": { @@ -273,7 +313,7 @@ }, "01782_field_oom": { "reason": "INVESTIGATE - Timeout on tsan", - "message": "Timeout! Processes left in process group", + "message": "Timeout", "check_types": [ "tsan" ] @@ -284,9 +324,30 @@ "msan" ] }, + "03262_column_sizes_with_dynamic_structure": { + "reason": "INVESTIGATE - Unstable on tsan", + "message": "DB::Exception: Estimated query execution time", + "check_types": [ + "tsan" + ] + }, + "03096_order_by_system_tables": { + "reason": "INVESTIGATE - Unstable on tsan", + "message": "Timeout", + "check_types": [ + "tsan" + ] + }, + "02210_processors_profile_log": { + "reason": "INVESTIGATE - Unstable on tsan", + "message": "Timeout", + "check_types": [ + "tsan" + ] + }, "03555_json_shared_data_advanced_paths_indexes": { "reason": "INVESTIGATE - Timeout on tsan, msan", - "message": "Timeout! Processes left in process group", + "message": "Timeout", "check_types": [ "tsan", "msan" @@ -299,13 +360,42 @@ "tsan" ] }, + "03100_lwu_23_apply_patches": { + "reason": "INVESTIGATE - Timeout on tsan", + "message": "Timeout exceeded (180 s) while flushing system log", + "check_types": [ + "tsan" + ] + }, "00284_external_aggregation": { - "reason": "INVESTIGATE - Timeout in query on tsan", + "reason": "INVESTIGATE - Timeout in query on tsan, msan", "message": "DB::Exception: Timeout exceeded", + "check_types": [ + "tsan", + "msan" + ] + }, + "03550_analyzer_remote_view_columns": { + "reason": "INVESTIGATE - Timeout on tsan", + "message": "DB::Exception: Timeout exceeded (180 s) while flushing system log", + "check_types": [ + "tsan" + ] + }, + "01630_simple_aggregate_all_functions_in_aggregating_merge_tree": { + "reason": "INVESTIGATE - Timeout on tsan", + "message": "Timeout exceeded", "check_types": [ "tsan" ] }, + "03593_backup_with_broken_projection": { + "reason": "INVESTIGATE - Timeout on msan", + "message": "Timeout", + "check_types": [ + "msan" + ] + }, "00159_parallel_formatting_http": { "reason": "INVESTIGATE - Timeout in curl on tsan", "message": "curl: (28) Operation timed out", @@ -320,11 +410,19 @@ "tsan" ] }, + "03221_refreshable_matview_progress": { + "reason": "INVESTIGATE - Unstable on msan", + "message": "result differs with reference", + "check_types": [ + "msan" + ] + }, "test_scheduler_cpu_preemptive/test.py::test_downscaling[cpu-slot-preemption-timeout-1ms]": { - "reason": "INVESTIGATE - Unstable on tsan", + "reason": "INVESTIGATE - Unstable on tsan, asan", "message": "Failed: Timeout >900.0s", "check_types": [ - "tsan" + "tsan", + "asan" ] }, "test_grpc_protocol/test.py::test_ipv6_select_one": { @@ -343,9 +441,15 @@ "reason": "INVESTIGATE - NETLINK_ERROR", "message": "NETLINK_ERROR" }, + "test_overcommit_tracker/test.py::test_user_overcommit": { + "reason": "INVESTIGATE - NETLINK_ERROR on tsan", + "message": "NETLINK_ERROR", + "check_types": [ + "tsan" + ] + }, "test_storage_delta/test.py::test_concurrent_queries[False]": { - "reason": "INVESTIGATE - NETLINK_ERROR", - "message": "NETLINK_ERROR" + "reason": "INVESTIGATE - Unstable test" }, "test_storage_s3_queue/test_0.py::test_streaming_to_many_views[unordered]": { "reason": "INVESTIGATE: Broken test?" @@ -353,6 +457,9 @@ "test_storage_s3_queue/test_0.py::test_streaming_to_many_views[ordered]": { "reason": "INVESTIGATE: Broken test?" }, + "test_storage_s3_queue/test_2.py::test_shards_distributed[unordered-1]": { + "reason": "INVESTIGATE: Unstable test" + }, "test_storage_s3_queue/test_4.py::test_list_and_delete_race": { "reason": "KNOWN: Unstable test", "message": "AssertionError" @@ -416,5 +523,25 @@ "check_types": [ "tsan" ] + }, + "test_database_hms/test.py::test_list_tables": { + "reason": "KNOWN: Occasional timeout", + "message": "docker_compose_iceberg_hms_catalog.yml', '--verbose', 'up', '-d']' timed out after 300 seconds" + }, + "test_database_hms/test.py::test_many_namespaces": { + "reason": "KNOWN: Occasional timeout", + "message": "docker_compose_iceberg_hms_catalog.yml', '--verbose', 'up', '-d']' timed out after 300 seconds" + }, + "test_database_hms/test.py::test_select": { + "reason": "KNOWN: Occasional timeout", + "message": "docker_compose_iceberg_hms_catalog.yml', '--verbose', 'up', '-d']' timed out after 300 seconds" + }, + "test_database_hms/test.py::test_tables_with_same_location": { + "reason": "KNOWN: Occasional timeout", + "message": "docker_compose_iceberg_hms_catalog.yml', '--verbose', 'up', '-d']' timed out after 300 seconds" + }, + "test_database_hms/test.py::test_hide_sensitive_info": { + "reason": "KNOWN: Occasional timeout", + "message": "docker_compose_iceberg_hms_catalog.yml', '--verbose', 'up', '-d']' timed out after 300 seconds" } } diff --git a/tests/integration/compose/docker_compose_iceberg_hms_catalog.yml b/tests/integration/compose/docker_compose_iceberg_hms_catalog.yml index 56c1cd9941d9..073e69a3cedb 100644 --- a/tests/integration/compose/docker_compose_iceberg_hms_catalog.yml +++ b/tests/integration/compose/docker_compose_iceberg_hms_catalog.yml @@ -17,7 +17,7 @@ services: - 10001:10001 stop_grace_period: 5s hive: - image: clickhouse/integration-test-with-hms + image: altinityinfra/integration-test-with-hms restart: unless-stopped depends_on: minio: diff --git a/tests/integration/compose/docker_compose_iceberg_lakekeeper_catalog.yml b/tests/integration/compose/docker_compose_iceberg_lakekeeper_catalog.yml index 4f6a997a0d42..8fa7b7e0a4ac 100644 --- a/tests/integration/compose/docker_compose_iceberg_lakekeeper_catalog.yml +++ b/tests/integration/compose/docker_compose_iceberg_lakekeeper_catalog.yml @@ -64,7 +64,7 @@ services: db: - image: bitnami/postgresql:16.3.0 + image: bitnamilegacy/postgresql:16.3.0 environment: - POSTGRESQL_USERNAME=postgres - POSTGRESQL_PASSWORD=postgres @@ -78,7 +78,7 @@ services: # TODO: can we simply use with_minio=True instead? minio: - image: bitnami/minio:2025.4.22 + image: bitnamilegacy/minio:2025.4.22 environment: - MINIO_ROOT_USER=minio - MINIO_ROOT_PASSWORD=ClickHouse_Minio_P@ssw0rd diff --git a/tests/integration/compose/docker_compose_ldap.yml b/tests/integration/compose/docker_compose_ldap.yml index 70b38ed9e824..0f7620d60236 100644 --- a/tests/integration/compose/docker_compose_ldap.yml +++ b/tests/integration/compose/docker_compose_ldap.yml @@ -1,6 +1,6 @@ services: openldap: - image: bitnami/openldap:2.6.8 + image: bitnamilegacy/openldap:2.6.8 stop_grace_period: 5s restart: always environment: