|
| 1 | +import logging |
| 2 | +import pytest |
| 3 | + |
| 4 | +from helpers.cluster import ClickHouseCluster |
| 5 | + |
| 6 | + |
| 7 | +def get_cluster(with_minio): |
| 8 | + cluster = ClickHouseCluster(__file__) |
| 9 | + cluster.add_instance( |
| 10 | + "node", |
| 11 | + main_configs=["configs/storage_conf.xml"], |
| 12 | + user_configs=["configs/users.xml"], |
| 13 | + with_minio=with_minio, |
| 14 | + stay_alive=True, |
| 15 | + # remote database disk adds MinIO implicitly |
| 16 | + # FIXME: disable with_remote_database_disk if with_minio set to False explicitly |
| 17 | + ) |
| 18 | + logging.info("Starting cluster...") |
| 19 | + cluster.start() |
| 20 | + logging.info("Cluster started") |
| 21 | + |
| 22 | + return cluster |
| 23 | + |
| 24 | + |
| 25 | +# ClickHouse checks extra (AKA orpahned) parts on different disks, in order to not allow to miss data parts at undefined disks. |
| 26 | +# The test verifies how the search of orphaned parts works if there is no connection to MinIO. |
| 27 | +# The following is expected |
| 28 | +# * search_orphaned_parts_disks is `none` - does not search s3, the query is successful |
| 29 | +# * search_orphaned_parts_disks is `local` - does not search s3, the query is successful |
| 30 | +# * search_orphaned_parts_disks is `any` - searches s3, the query throws if no MinIO |
| 31 | +# Note that disk_s3_plain is configured disk that is not used either in n_s3 or local_cache policies. |
| 32 | +@pytest.mark.parametrize("with_minio", [True, False]) |
| 33 | +def test_search_orphaned_parts(with_minio): |
| 34 | + table_name = "t1" |
| 35 | + |
| 36 | + try: |
| 37 | + cluster = get_cluster(with_minio) |
| 38 | + |
| 39 | + node = cluster.instances["node"] |
| 40 | + |
| 41 | + for search_mode in ["any", "local", "none"]: |
| 42 | + for storage_policy in ["no_s3", "local_cache"]: |
| 43 | + node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") |
| 44 | + |
| 45 | + if search_mode == "any" and not with_minio: |
| 46 | + assert "Code: 499. DB::Exception" in node.query_and_get_error( |
| 47 | + f""" |
| 48 | + CREATE TABLE {table_name} ( |
| 49 | + id Int64, |
| 50 | + data String |
| 51 | + ) ENGINE=MergeTree() |
| 52 | + PARTITION BY id % 10 |
| 53 | + ORDER BY id |
| 54 | + SETTINGS storage_policy='{storage_policy}', search_orphaned_parts_disks='{search_mode}' |
| 55 | + """ |
| 56 | + ) |
| 57 | + else: |
| 58 | + node.query( |
| 59 | + f""" |
| 60 | + CREATE TABLE {table_name} ( |
| 61 | + id Int64, |
| 62 | + data String |
| 63 | + ) ENGINE=MergeTree() |
| 64 | + PARTITION BY id % 10 |
| 65 | + ORDER BY id |
| 66 | + SETTINGS storage_policy='{storage_policy}', search_orphaned_parts_disks='{search_mode}' |
| 67 | + """ |
| 68 | + ) |
| 69 | + node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") |
| 70 | + |
| 71 | + finally: |
| 72 | + cluster.shutdown() |
0 commit comments