diff --git a/.github/docker/admin.json b/.github/docker/admin.json new file mode 100644 index 0000000000..5076a4a5e1 --- /dev/null +++ b/.github/docker/admin.json @@ -0,0 +1,4 @@ +{ + "accessKey": "D4IT2AWSB588GO5J9T00", + "secretKeyValue": "UEEu8tYlsOGGrgf4DAiSZD6apVNPUWqRiPG0nTB6" +} diff --git a/.github/docker/docker-compose.yaml b/.github/docker/docker-compose.yaml index 8c70341d6a..849448ba5a 100644 --- a/.github/docker/docker-compose.yaml +++ b/.github/docker/docker-compose.yaml @@ -8,7 +8,12 @@ services: - /tmp/ssl-kmip:/ssl-kmip - ${HOME}/.aws/credentials:/root/.aws/credentials - /tmp/artifacts/${JOB_NAME}:/artifacts + # using artesca container, with persistent volumes for sse migration + - ../../localData:/usr/src/app/localData + - ../../localMetadata:/usr/src/app/localMetadata + - ../../tests/functional/sse-kms-migration/config.${SSE_CONF}.json:/conf/config.json environment: + - S3_CONFIG_FILE=/conf/config.json - CI=true - ENABLE_LOCAL_CACHE=true - REDIS_HOST=0.0.0.0 @@ -19,7 +24,9 @@ services: - DATA_HOST=0.0.0.0 - METADATA_HOST=0.0.0.0 - S3BACKEND + - S3VAULT=scality - S3DATA + - S3METADATA - MPU_TESTING - S3VAULT - S3_LOCATION_FILE @@ -42,6 +49,36 @@ services: extra_hosts: - "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1" - "pykmip.local:127.0.0.1" + cloudserver-sse-migration: + extends: cloudserver + profiles: [sse-migration] + volumes: + # using artesca container + - ../../tests/functional/sse-kms-migration/config.${SSE_CONF}.json:/conf/config.json + - ../../localData:/usr/src/app/localData + - ../../localMetadata:/usr/src/app/localMetadata + environment: + - S3_CONFIG_FILE=/conf/config.json + - S3KMS=aws + - S3VAULT=scality + vault: + # image: ${VAULT_IMAGE_BEFORE_SSE_MIGRATION} + image: ${VAULT_IMAGE} + command: sh -c "chmod 400 tests/utils/keyfile && yarn start > /artifacts/vault.log" + network_mode: "host" + volumes: + - /tmp/artifacts/${JOB_NAME}:/artifacts + - ./vault-config.json:/conf/config.json:ro + - ./vault-db:/data + environment: + - VAULT_DB_BACKEND=LEVELDB + - CI=true + - ENABLE_LOCAL_CACHE=true + - REDIS_HOST=0.0.0.0 + - REDIS_PORT=6379 + - KMS_BACKEND=aws + depends_on: + - redis redis: image: redis:alpine network_mode: "host" diff --git a/.github/docker/local.sh b/.github/docker/local.sh new file mode 100755 index 0000000000..a9ae87bb4f --- /dev/null +++ b/.github/docker/local.sh @@ -0,0 +1,110 @@ +#!/bin/bash +set -e -o pipefail +#in .github/docker + +export S3BACKEND=file +export S3METADATA=scality +export S3VAULT=scality +export CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION=ghcr.io/scality/cloudserver:7.70.21-11 +export CLOUDSERVER_IMAGE_ORIGINAL=ghcr.io/scality/cloudserver:50db1ada69a394cf877bd3486d4d0e318158e338 +export MPU_TESTING="yes" +export JOB_NAME=sse-kms-migration-tests-show-arn +export kmsHideScalityArn=showArn + +export VAULT_IMAGE_BEFORE_SSE_MIGRATION=ghcr.io/scality/vault:7.70.15-5 +export VAULT_IMAGE_ORIGINAL=ghcr.io/scality/vault:e8c0fa2890c131581efd13ad3fd1ade7dcbd0968 +export KMS_IMAGE=nsmithuk/local-kms:3.11.7 + +# IMAGE IS HARDCODED FOR OKMS TO HIDE +export JOB_NAME=sse-kms-migration-tests-hide-arn +export kmsHideScalityArn=hideArn +# export JOB_NAME=sse-kms-migration-tests-show-arn +# export kmsHideScalityArn=showArn + +mkdir -p /tmp/artifacts/$JOB_NAME + +export CLOUDSERVER_IMAGE=$CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION +export VAULT_IMAGE=$VAULT_IMAGE_BEFORE_SSE_MIGRATION +export SSE_CONF=before + +export KMS_AWS_SECRET_ACCESS_KEY=123 +export KMS_AWS_ACCESS_KEY_ID=456 + +# START KMS +docker run -d -p 8080:8080 $KMS_IMAGE || true + + echo "waiting for local AWS KMS service on port 8080 to be available." + + timeout 300 bash -c 'until curl -sS 0:8080 > /dev/null; do + echo "service not ready on port 8080. Retrying in 2 seconds." + sleep 2 + done' + echo "local AWS KMS service is up and running on port 8080." + + AWS_ENDPOINT_URL=http://0:8080 AWS_DEFAULT_REGION=us-east-1 AWS_ACCESS_KEY_ID=456 AWS_SECRET_ACCESS_KEY=123 aws kms list-keys --max-items 1 +# END KMS + +# Start all before migration +docker compose up -d + bash ../../wait_for_local_port.bash 8500 40 + bash ../../wait_for_local_port.bash 8000 40 +# HAVE vaultclient bin in your PATH or an alias +alias vaultclient="~/scality/vaultclient/bin/vaultclient" +export PATH="$PATH:~/scality/vaultclient/bin/" +vaultclient --config admin.json delete-account --name mick || true +vaultclient --config admin.json create-account --name mick --email mick@mick.mick +vaultclient --config admin.json generate-account-access-key --name mick --accesskey SCUBAINTERNAL0000000 --secretkey SCUBAINTERNAL000000000000000000000000000 +vaultclient --config admin.json get-account --account-name mick + +cd ../.. + +echo ===== RUN BEFORE MIGRATION ===== +export S3_CONFIG_FILE=config.before.json + + set -o pipefail; + + + echo Ensures the expected version of cloudserver is old one: + VERSION=$(docker compose -f .github/docker/docker-compose.yaml \ + exec cloudserver cat package.json | jq -r .version) + if [[ "$VERSION" != "7.70.21-11" ]]; then + echo "bad version of container. Should be 7.70.21-11. Was $VERSION" >&2 + exit 1 + else + echo OK $VERSION + fi + + yarn run ft_sse_before_migration | tee /tmp/artifacts/$JOB_NAME/beforeMigration.log + +# RUN latest images +cd .github/docker +export SSE_CONF=sseMigration.$kmsHideScalityArn +export CLOUDSERVER_IMAGE=$CLOUDSERVER_IMAGE_ORIGINAL +export VAULT_IMAGE=$VAULT_IMAGE_ORIGINAL + +docker compose down cloudserver vault && docker compose up -d vault # cloudserver-sse-migration + +echo ==== RUN MIGRATION ==== +cd ../.. +yarn start_migration > s3.log & +export S3_CONFIG_FILE=config.sseMigration.$kmsHideScalityArn.json +export S3KMS=aws + + set -o pipefail; + bash wait_for_local_port.bash 8500 40 + bash wait_for_local_port.bash 8000 40 + + # echo Ensures the expected version of cloudserver is NOT old one + # VERSION=$(docker compose -f .github/docker/docker-compose.yaml \ + # exec cloudserver-sse-migration cat package.json | jq -r .version) + # if [[ "$VERSION" == "7.70.21-11" ]]; then + # echo "bad version of container. Should NOT be 7.70.21-11. Was $VERSION" >&2 + # exit 1 + # else + # echo OK $VERSION + # fi + + yarn run ft_sse_migration # | tee /tmp/artifacts/$JOB_NAME/migration.log + sleep 10 + yarn run ft_sse_arn # | tee /tmp/artifacts/$JOB_NAME/migration.log + diff --git a/.github/docker/vault-config.json b/.github/docker/vault-config.json new file mode 100644 index 0000000000..14b86162be --- /dev/null +++ b/.github/docker/vault-config.json @@ -0,0 +1,76 @@ +{ + "clusters": 1, + "healthChecks": { + "allowFrom": ["127.0.0.1/8", "::1"] + }, + "interfaces": { + "S3": { + "address": "0.0.0.0", + "port": 8500, + "allowFrom": ["0.0.0.0/8", "::1"] + }, + "administration": { + "address": "0.0.0.0", + "port": 8600 + }, + "sts": { + "address": "127.0.0.1", + "port": 8800 + } + }, + "map": ["127.0.0.1:4300", "127.0.0.2:4301", "127.0.0.3:4302", "127.0.0.4:4303", "127.0.0.5:4304"], + "keyFilePath": "./tests/utils/keyfile", + "adminCredentialsFilePath": "./tests/utils/admincredentials.json.encrypted", + "log": { + "level": "info", + "dump": "error" + }, + "accountSeeds": [ + { + "role": { + "roleName": "scality-role1", + "trustPolicy": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "AWS": "arn:aws:iam::000000000000:user/root" }, + "Action": "sts:AssumeRole", + "Condition": {} + } + ] + } + }, + "permissionPolicy": { + "policyName": "scality-policy1", + "policyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "FullAccess", + "Effect": "Allow", + "Action": ["s3:*"], + "Resource": ["*"] + } + ] + } + } + } + ], + "utapi": { + "host": "127.0.0.1", + "port": 8100 + }, + "scuba": { + "host": "127.0.0.1", + "port": 8100 + }, + "kmsAWS": { + "noAwsArn": true, + "providerName": "local", + "region": "us-east-1", + "endpoint": "http://0:8080", + "ak": "456", + "sk": "123" + } +} diff --git a/.github/docker/vault-db/.gitignore b/.github/docker/vault-db/.gitignore new file mode 100644 index 0000000000..5e7d2734cf --- /dev/null +++ b/.github/docker/vault-db/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index d232cf27e6..dc747eab50 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -218,6 +218,8 @@ jobs: source: /tmp/artifacts if: always() + # All tests use non federation images + file-ft-tests: strategy: matrix: @@ -345,3 +347,171 @@ jobs: password: ${{ secrets.ARTIFACTS_PASSWORD }} source: /tmp/artifacts if: always() + + sse-kms-migration-tests: + strategy: + matrix: + include: + - kmsHideScalityArn: 'showArn' + job-name: sse-kms-migration-tests-show-arn + # To use this one, the test needs to stop expecting an Arn in rsponse headers + # But the check it should query the metadata directly to ensure the md has the arn + # - kmsHideScalityArn: 'hideArn' + # job-name: sse-kms-migration-tests-hide-arn + name: ${{ matrix.job-name }} + runs-on: ubuntu-latest + needs: build + env: + S3BACKEND: file + S3VAULT: scality + CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION: ghcr.io/${{ github.repository }}:7.70.21-11 + VAULT_IMAGE_BEFORE_SSE_MIGRATION: ghcr.io/scality/vault:7.70.15-5 + CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} + VAULT_IMAGE: ghcr.io/scality/vault:e8c0fa2890c131581efd13ad3fd1ade7dcbd0968 + KMS_IMAGE: nsmithuk/local-kms:3.11.7 + MPU_TESTING: "yes" + JOB_NAME: ${{ matrix.job-name }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.9 + - name: Setup CI environment + uses: ./.github/actions/setup-ci + - name: Setup matrix job artifacts directory + shell: bash + run: | + set -exu + mkdir -p /tmp/artifacts/${{ matrix.job-name }}/ + - name: Setup CI services (with old cloudserver image before sse migration) + run: docker compose up -d + working-directory: .github/docker + env: + CLOUDSERVER_IMAGE: ${{ env.CLOUDSERVER_IMAGE_BEFORE_SSE_MIGRATION }} + SSE_CONF: before + - name: Run SSE before migration tests (setup buckets and objects) + env: + # yarn run does a cd into the test folder + S3_CONFIG_FILE: config.before.json + run: |- + set -o pipefail; + bash wait_for_local_port.bash 8000 40 + + echo Ensures the expected version of cloudserver is old one: + VERSION=$(docker compose -f .github/docker/docker-compose.yaml \ + exec cloudserver cat package.json | jq -r .version) + if [[ "$VERSION" != "7.70.21-11" ]]; then + echo "bad version of container. Should be 7.70.21-11. Was $VERSION" >&2 + exit 1 + else + echo OK $VERSION + fi + + yarn run ft_sse_before_migration | tee /tmp/artifacts/${{ matrix.job-name }}/beforeMigration.log + - name: Replace old cloudserver image with current one + run: |- + docker compose down cloudserver + mv /tmp/artifacts/${{ matrix.job-name }}/s3.log /tmp/artifacts/${{ matrix.job-name }}/s3.old.log + docker compose up -d cloudserver-sse-migration + working-directory: .github/docker + env: + SSE_CONF: sseMigration.${{ matrix.kmsHideScalityArn }} + - name: Run SSE migration tests + env: + # yarn run does a cd into the test folder + S3_CONFIG_FILE: config.sseMigration.${{ matrix.kmsHideScalityArn }}.json + run: |- + set -o pipefail; + bash wait_for_local_port.bash 8000 40 + + echo Ensures the expected version of cloudserver is NOT old one + VERSION=$(docker compose -f .github/docker/docker-compose.yaml \ + exec cloudserver-sse-migration cat package.json | jq -r .version) + if [[ "$VERSION" == "7.70.21-11" ]]; then + echo "bad version of container. Should NOT be 7.70.21-11. Was $VERSION" >&2 + exit 1 + else + echo OK $VERSION + fi + + yarn run ft_sse_migration | tee /tmp/artifacts/${{ matrix.job-name }}/migration.log + - name: Run SSE arnPrefix tests + env: + S3_CONFIG_FILE: config.sseMigration.${{ matrix.kmsHideScalityArn }}.json + run: |- + set -o pipefail; + yarn run ft_sse_arn | tee /tmp/artifacts/${{ matrix.job-name }}/arnPrefix.log + - name: Print docker compose logs + run: docker compose logs cloudserver cloudserver-sse-migration + working-directory: .github/docker + if: failure() + - name: Upload logs to artifacts + uses: scality/action-artifacts@v4 + with: + method: upload + url: https://artifacts.scality.net + user: ${{ secrets.ARTIFACTS_USER }} + password: ${{ secrets.ARTIFACTS_PASSWORD }} + source: /tmp/artifacts + if: always() + + # Temporary until I have a good test taking into account kmsHideScalityArn + tmp-kmsHideScalityArn-file-ft-tests: + strategy: + matrix: + include: + - enable-null-compat: '' + job-name: tmp-kmsHideScalityArn-file-ft-tests + name: ${{ matrix.job-name }} + runs-on: ubuntu-latest + needs: build + env: + S3BACKEND: file + S3VAULT: mem + CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} + MPU_TESTING: "yes" + ENABLE_NULL_VERSION_COMPAT_MODE: "${{ matrix.enable-null-compat }}" + JOB_NAME: ${{ matrix.job-name }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.9 + - name: Setup CI environment + uses: ./.github/actions/setup-ci + - name: Setup matrix job artifacts directory + shell: bash + run: | + set -exu + mkdir -p /tmp/artifacts/${{ matrix.job-name }}/ + - name: Setup python test environment + run: | + sudo apt-get install -y libdigest-hmac-perl + pip install 's3cmd==2.3.0' + - name: Setup CI services + run: |- + docker compose up -d + docker compose down cloudserver + docker compose up -d cloudserver-sse-migration + working-directory: .github/docker + env: + SSE_CONF: sseMigration.hideArn + - name: Run file ft tests + env: + # need absolute path as tests move into folders + S3_CONFIG_FILE: ${{ github.workspace }}/tests/functional/sse-kms-migration/config.sseMigration.hideArn.json + run: |- + set -o pipefail; + bash wait_for_local_port.bash 8000 40 + yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log + - name: Upload logs to artifacts + uses: scality/action-artifacts@v4 + with: + method: upload + url: https://artifacts.scality.net + user: ${{ secrets.ARTIFACTS_USER }} + password: ${{ secrets.ARTIFACTS_PASSWORD }} + source: /tmp/artifacts + if: always() diff --git a/.gitignore b/.gitignore index e0a1474ee0..ddd1184c00 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ node_modules # Junit directory junit + + +.github/docker/vault-db diff --git a/lib/kms/wrapper.js b/lib/kms/wrapper.js index d8f161ff82..14b97fadcf 100644 --- a/lib/kms/wrapper.js +++ b/lib/kms/wrapper.js @@ -155,6 +155,11 @@ function getClientForKey(key, log) { } class KMS { + /** Access to client for tests (Integration needs to turn off _supportsDefaultKeyPerAccount) */ + static get client() { + return client; + } + /** Used for keys from current client */ static get arnPrefix() { return client.backend.arnPrefix; diff --git a/package.json b/package.json index d9af9fd662..d1cdf3751e 100644 --- a/package.json +++ b/package.json @@ -74,6 +74,11 @@ "ft_s3curl": "cd tests/functional/s3curl && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js", "ft_test": "npm-run-all -s ft_awssdk ft_s3cmd ft_s3curl ft_node ft_healthchecks ft_management", "ft_kmip": "cd tests/functional/kmip && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js", + "ft_sse_cleanup": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js", + "ft_sse_before_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js beforeMigration.js", + "ft_sse_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 migration.js", + "ft_sse_arn": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js arnPrefix.js", + "start_migration": "S3DATA=file S3METADATA=scality S3KMS=aws S3VAULT=scality S3_CONFIG_FILE=./tests/functional/sse-kms-migration/config.sseMigration.hideArn.json yarn start", "install_ft_deps": "yarn install aws-sdk@2.28.0 bluebird@3.3.1 mocha@2.3.4 mocha-junit-reporter@1.23.1 tv4@1.2.7", "lint": "eslint $(git ls-files '*.js')", "lint_md": "mdlint $(git ls-files '*.md')", diff --git a/tests/functional/aws-node-sdk/lib/json/mem_credentials.json b/tests/functional/aws-node-sdk/lib/json/mem_credentials.json index bfc574cece..f6f992bf73 100644 --- a/tests/functional/aws-node-sdk/lib/json/mem_credentials.json +++ b/tests/functional/aws-node-sdk/lib/json/mem_credentials.json @@ -6,5 +6,9 @@ "lisa": { "accessKey": "accessKey2", "secretKey": "verySecretKey2" + }, + "vault": { + "accessKey": "SCUBAINTERNAL0000000", + "secretKey": "SCUBAINTERNAL000000000000000000000000000" } } diff --git a/tests/functional/sse-kms-migration/arnPrefix.js b/tests/functional/sse-kms-migration/arnPrefix.js new file mode 100644 index 0000000000..005bda7585 --- /dev/null +++ b/tests/functional/sse-kms-migration/arnPrefix.js @@ -0,0 +1,695 @@ +/* eslint-disable */ +const getConfig = require('../aws-node-sdk/test/support/config'); +const { S3 } = require('aws-sdk'); +const kms = require('../../../lib/kms/wrapper'); +const { promisify } = require('util'); +const BucketInfo = require('arsenal').models.BucketInfo; +const { DummyRequestLogger } = require('../../unit/helpers'); +const BucketUtility = require('../aws-node-sdk/lib/utility/bucket-util'); +const assert = require('assert'); +const metadata = require('../../../lib/metadata/wrapper'); +const crypto = require('crypto'); +const constants = require('../../../constants'); +const log = new DummyRequestLogger(); +const { makeRequest } = require('../raw-node/utils/makeRequest'); +const { config } = require('../../../lib/Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); + +// use file to defined key in arn prefix, if no prefix mem is used + +// copy part of aws-node-sdk/test/object/encryptionHeaders.js and add more tests +// around SSE Key prefix and migration +// always getObject to ensure decryption + +function getKey(key) { + return config.kmsHideScalityArn ? getKeyIdFromArn(key) : key; +} + +const testCases = [ + { + name: 'algo-none', + // as the init insert objects with each encryption + // this bucket will have a non mandatory AES256 + }, + { + name: 'algo-none-del-sse', + /** flag to remove non mandatory AES256 SS3 from bucket MD beforeEach test */ + deleteSSE: true, + }, + { + name: 'algo-aes256', + algo: 'AES256', + }, + { + name: 'algo-awskms', + algo: 'aws:kms', + }, + { + name: 'algo-awskms-key', + algo: 'aws:kms', + masterKeyId: true, + }, + { + name: 'algo-awskms-key-arnprefix', + algo: 'aws:kms', + masterKeyId: true, + arnPrefix: true, + }, +]; +const testCasesObj = testCases.filter(tc => !tc.deleteSSE); + +const s3config = getConfig('vault', { signatureVersion: 'v4' }); +const s3 = new S3(s3config); +const bucketUtil = new BucketUtility('vault'); + +kms.client._supportsDefaultKeyPerAccount = false; // To generate keys without vault account side effect + +function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { + // stringify and parse to strip undefined values + return JSON.parse(JSON.stringify({ Rules: [{ + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm, + KMSMasterKeyID, + }, + }] })); +} + +function putObjParams(Bucket, Key, sseConfig, kmsKeyId) { + return { + Bucket, + Key, + ...(sseConfig.algo && { + ServerSideEncryption: sseConfig.algo, + ...(sseConfig.masterKeyId && { + SSEKMSKeyId: kmsKeyId, + }), + }), + }; +} + +const getBucketMD = promisify(metadata.getBucket.bind(metadata)); +const getObjectMD = promisify(metadata.getObjectMD.bind(metadata)); +const updateBucketMD = promisify(metadata.updateBucket.bind(metadata)); + +async function putEncryptedObject(Bucket, Key, sseConfig, kmsKeyId, Body) { + return s3.putObject({ + ...putObjParams(Bucket, Key, sseConfig, kmsKeyId), + Body, + }).promise(); +} + +async function assertObjectSSE(Bucket, Key, objConf, obj, bktConf, bkt, VersionId, Body) { + const head = await s3.headObject({ Bucket, Key, VersionId }).promise(); + + // obj precedence over bkt + assert.strictEqual(head.ServerSideEncryption, (objConf.algo || bktConf.algo)); + if (obj.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(obj.kmsKeyInfo.masterKeyArn)); + } else if (objConf.algo !== 'AES256' && bkt.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(bkt.kmsKeyInfo.masterKeyArn)); + } else if (head.ServerSideEncryption === 'aws:kms') { + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + if (config.kmsHideScalityArn){ + assert.doesNotMatch(head.SSEKMSKeyId, new RegExp(kms.arnPrefix)); + } else { + assert.match(head.SSEKMSKeyId, new RegExp(kms.arnPrefix)); + } + } else { + assert.strictEqual(head.SSEKMSKeyId, undefined); + if (head.ServerSideEncryption === 'AES256') { + // Verify metadata since KMS Key is not returned + const objMD = await getObjectMD(Bucket, Key, {}, log); + const objSseMD = objMD['x-amz-server-side-encryption-aws-kms-key-id']; + assert.match(objSseMD, new RegExp(kms.arnPrefix)); + } + } + + // always verify GetObject as well to ensure acurate decryption + const get = await s3.getObject({ Bucket, Key, ...(VersionId && { VersionId }) }).promise(); + assert.strictEqual(get.Body.toString(), Body); +} + +async function cleanup(Bucket) { + void await bucketUtil.empty(Bucket); + void await s3.deleteBucket({ Bucket }).promise(); +} + +/** for kms createBucketKey */ +const bucketInfo = new BucketInfo('enc-bucket-test', 'OwnerId', + 'OwnerDisplayName', new Date().toJSON()); + +describe.only('SSE KMS arnPrefix', () => { + /** Bucket to test CopyObject from and to */ + const copyBkt = 'enc-bkt-copy'; + const copyObj = 'copy-obj'; + let copyKmsKey; + const bkts = {}; + const mpuCopyBkt = 'enc-bkt-mpu-copy'; + + this.initBucket = async function initBucket(bktConf) { + const bkt = { + name: `enc-bkt-${bktConf.name}`, + /** versioned bucket name */ + vname: `versioned-enc-bkt-${bktConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + /** For copy has source, include an object with each encryption */ + objs: {}, + }; + bkts[bktConf.name] = bkt; + if (bktConf.algo && bktConf.masterKeyId) { + bkt.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + bkt.kmsKey = bktConf.arnPrefix + ? bkt.kmsKeyInfo.masterKeyArn + : bkt.kmsKeyInfo.masterKeyId; + } + void await s3.createBucket(({ Bucket: bkt.name })).promise(); + void await s3.createBucket(({ Bucket: bkt.vname })).promise(); + if (bktConf.algo) { + // bucket encryption will be asserted in bucket test + void await s3.putBucketEncryption({ + Bucket: bkt.name, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ + algo: bktConf.algo, masterKeyId: bkt.kmsKey }), + }).promise(); + void await s3.putBucketEncryption({ + Bucket: bkt.vname, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ + algo: bktConf.algo, masterKeyId: bkt.kmsKey }), + }).promise(); + } + + // Put an object for each SSE conf in each bucket + void await Promise.all(testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + try { + return await putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + } catch (err) { + console.log('ERR', err, err && err.toString(), bktConf.name, obj.name, objConf.algo, obj.kmsKeyInfo) + throw err; + } + })); + }; + + before('setup', async () => { + void await promisify(metadata.setup.bind(metadata))(); + copyKmsKey = (await promisify(kms.createBucketKey)(bucketInfo, log)).masterKeyArn; + try { + // pre cleanup + void await cleanup(copyBkt); + void await cleanup(mpuCopyBkt); + void await Promise.all(Object.values(bkts).map(async bkt => { + void await cleanup(bkt.name); + return await cleanup(bkt.vname); + })); + } catch (e) { void e; } + + // init copy bucket + void await s3.createBucket(({ Bucket: copyBkt })).promise(); + void await s3.createBucket(({ Bucket: mpuCopyBkt })).promise(); + void await s3.putBucketEncryption({ + Bucket: copyBkt, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: copyKmsKey }), + }).promise(); + void await s3.putObject({ Bucket: copyBkt, Key: copyObj, Body: 'BODY(copy)' }).promise(); + + // Prepare every buckets with 1 object (for copy) + void await Promise.all(testCases.map(async bktConf => this.initBucket(bktConf))); + }); + + after(async () => { + void await cleanup(copyBkt); + void await cleanup(mpuCopyBkt); + // Clean every bucket + void await Promise.all(Object.values(bkts).map(async bkt => { + void await cleanup(bkt.name); + return await cleanup(bkt.vname); + })); + }); + + testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; + + before(() => { + bkt = bkts[bktConf.name]; + }); + + if (bktConf.deleteSSE) { + beforeEach(async () => { + const bucketMD = await getBucketMD(bkt.name, log); + if (bucketMD.getServerSideEncryption()) { + bucketMD.setServerSideEncryption(null); + void await updateBucketMD(bucketMD.getName(), bucketMD, log); + } + }); + } + + if (!bktConf.algo) { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => { + void await assert.rejects(s3.getBucketEncryption({ Bucket: bkt.name }).promise(), err => { + assert.strictEqual(err.code, 'ServerSideEncryptionConfigurationNotFoundError'); + return true; + }); + }); + + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => { + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + assert.strictEqual(sseMD.mandatory, false); + assert.strictEqual(sseMD.algorithm, 'AES256'); + assert.match(sseMD.masterKeyId, new RegExp(kms.arnPrefix)); + }); + } + } else { + it('GetBucketEncryption should return SSE with arnPrefix to key', async () => { + // bucket already has SSE from initBucket function + const sseS3 = await s3.getBucketEncryption({ Bucket: bkt.name }).promise(); + + const { SSEAlgorithm, KMSMasterKeyID } = sseS3 + .ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; + + // Compare bucketMD as well to make sure key is stored with arn + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + + assert.strictEqual(SSEAlgorithm, bktConf.algo); + assert.strictEqual(sseMD.algorithm, bktConf.algo); + if (!bktConf.masterKeyId) { + // AES256 or aws:kms without keyId + assert.match(sseMD.masterKeyId, new RegExp(kms.arnPrefix)); + } + if (bktConf.masterKeyId) { + // arn prefixed even if not prefixed in input + assert.strictEqual(sseMD.configuredMasterKeyId, bkt.kmsKeyInfo.masterKeyArn); + assert.strictEqual(KMSMasterKeyID, getKey(bkt.kmsKeyInfo.masterKeyArn)); + } + }); + } + + testCasesObj.forEach(objConf => it(`should have pre uploaded object with SSE ${objConf.name}`, async () => { + const obj = bkt.objs[objConf.name]; + void await assertObjectSSE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + })); + + testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, + }; + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => { + void await putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + void await assertObjectSSE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + }); + + // TODO S3C-9996 Fix MPU & SSE to unskip this + // Should as well fix the output of CreateMPU and UploadPart to include the SSE + // and validate the SSE output here and check with listPart & listMultipartUploads as well + const optionalSkip = objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) + ? it // .skip + : it; + optionalSkip('should encrypt MPU and put 2 encrypted parts', async () => { + const mpuKey = `${obj.name}-mpu`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU1`, + Key: mpuKey, + PartNumber: 1, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = `BODY(copy)${obj.body}-MPU2`; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should encrypt MPU and copy an encrypted range parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + // source body is "BODY(copy)" + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=5-8', // copy + }).promise(); + const part2 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 2, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=0-3', // BODY + }).promise(); + + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = 'copyBODY'; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + it(`should CopyObject ${obj.name} into encrypted destination bucket`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${bkt.name}/${objForCopy.name}`; + const copied = await s3.copyObject({ + Bucket: copyBkt, + Key: source, + CopySource: source, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, 'aws:kms'); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + const head = await s3.headObject({ Bucket: copyBkt, Key: source }).promise(); + // hardcoded SSE for copy bucket + assert.strictEqual(head.ServerSideEncryption, 'aws:kms'); + assert.strictEqual(head.SSEKMSKeyId, getKey(copyKmsKey)); + + const get = await s3.getObject({ Bucket: copyBkt, Key: source }).promise(); + assert.strictEqual(get.Body.toString(), objForCopy.body); + }); + + it(`should CopyObject ${obj.name} into same bucket with SSE config`, async () => { + // if SSE not provided it uses bucket SSE + const copyKey = `${obj.name}-copy`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSE(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, obj.body); + }); + + it(`should CopyObject from encrypted destination into ${obj.name}`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${copyBkt}/${copyObj}`; + const copyKey = `${obj.name}-copy-from`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: source, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSE(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, 'BODY(copy)'); + }); + + it(`should PutObject versioned with SSE ${obj.name}`, async () => { + // ensure versioned bucket is empty + void await bucketUtil.empty(bkt.vname); + let { Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise(); + // regularly count versioned objects + assert.strictEqual(Versions.length, 0); + + const bodyBase = `BODY(${obj.name})-base`; + void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyBase); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 1); + + void await s3.putBucketVersioning({ Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Enabled' }, + }).promise(); + + const bodyV1 = `BODY(${obj.name})-v1`; + const v1 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); + const bodyV2 = `BODY(${obj.name})-v2`; + const v2 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + const current = await s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); + assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + void await s3.putBucketVersioning({ Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Suspended' }, + }).promise(); + + // should be fine after version suspension + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + // put a new null version + const bodyFinal = `BODY(${obj.name})-final`; + void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyFinal); // null + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyFinal); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + }); + })); + })); + + it('should encrypt MPU and copy parts from every buckets and objects matrice', async () => { + void await s3.putBucketEncryption({ + Bucket: mpuCopyBkt, + // AES256 because input key is broken for now + ServerSideEncryptionConfiguration: hydrateSSEConfig({ algo: 'AES256' }), + }).promise(); + const mpuKey = 'mpucopy'; + const mpu = await s3.createMultipartUpload( + putObjParams(mpuCopyBkt, mpuKey, {}, null)).promise(); + const copyPartArg = { + UploadId: mpu.UploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + }; + // For each test Case bucket and object copy a part + const uploadPromises = testCases.reduce((acc, bktConf, bktIdx) => { + const bkt = bkts[bktConf.name]; + + return acc.concat(testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = bktIdx * testCasesObj.length + objIdx + 1; + const res = await s3.uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + })); + }, []); + + const parts = await Promise.all(uploadPromises); + + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + MultipartUpload: { + Parts: parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), + }, + }).promise(); + const fullBody = parts.reduce((acc, part) => `${acc}${part.body}`, ''); + void await assertObjectSSE(mpuCopyBkt, mpuKey, {}, {}, { algo: 'AES256' }, {}, null, fullBody); + }); +}); + +describe('ensure MPU use good SSE', () => { + const mpuKmsBkt = 'bkt-mpu-kms'; + // const mpuKmsBktKey = 'bkt-mpu-kms-key'; + let kmsKeympuKmsBkt; + + + before(async () => { + kmsKeympuKmsBkt = (await promisify(kms.createBucketKey)(bucketInfo, log)).masterKeyArn; + void await promisify(metadata.setup.bind(metadata))(); + void await s3.createBucket({ Bucket: mpuKmsBkt }).promise(); + // void await s3.createBucket({ Bucket: mpuKmsBktKey }).promise(); + // void await s3.putBucketEncryption({ + // Bucket: mpuKmsBktKey, + // ServerSideEncryptionConfiguration: hydrateSSEConfig({ algo: 'AES256' }) }).promise(); + // void await s3.putBucketEncryption({ + // Bucket: mpuKmsBktKey, + // ServerSideEncryptionConfiguration: + // hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: kmsKeympuKmsBkt }) }).promise(); + void await s3.putBucketEncryption({ + Bucket: mpuKmsBkt, + ServerSideEncryptionConfiguration: + hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: kmsKeympuKmsBkt }) }).promise(); + }); + + after(async () => { + void await bucketUtil.empty(mpuKmsBkt); + void await s3.deleteBucket({ Bucket: mpuKmsBkt }).promise(); + // void await s3.deleteBucket({ Bucket: mpuKmsBktKey }).promise(); + }); + + it('mpu upload part should fail with sse header', async () => { + const key = 'mpuKeyBadUpload'; + const mpu = await s3.createMultipartUpload({ + Bucket: mpuKmsBkt, Key: key }).promise(); + void await assert.rejects(promisify(makeRequest)({ + method: 'PUT', + hostname: s3.endpoint.hostname, + port: s3.endpoint.port, + path: `/${mpuKmsBkt}/${key}`, + headers: { + 'content-length': 4, + // not allowed on UploadPart + 'x-amz-server-side-encryption': 'aws:kms', + 'x-amz-server-side-encryption-aws-kms-key-id': 'makeRequest', + }, + queryObj: { + uploadId: mpu.UploadId, + partNumber: '2', + }, + authCredentials: { + accessKey: s3.config.credentials.accessKeyId, + secretKey: s3.config.credentials.secretAccessKey, + }, + requestBody: 'hello', + }), err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return true; + }); + }); + + it('mpu should use encryption from createMPU', async () => { + const key = 'mpuKey'; + const mpuKms = (await promisify(kms.createBucketKey)(bucketInfo, log)).masterKeyArn; + const mpu = await s3.createMultipartUpload({ + Bucket: mpuKmsBkt, Key: key, ServerSideEncryption: 'aws:kms', SSEKMSKeyId: mpuKms }).promise(); + assert.strictEqual(mpu.ServerSideEncryption, 'aws:kms'); + assert.strictEqual(mpu.SSEKMSKeyId, getKey(mpuKms)); + + const part1 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: mpuKmsBkt, + Body: 'Scality', + Key: key, + PartNumber: 1, + }).promise(); + assert.strictEqual(part1.ServerSideEncryption, 'aws:kms'); + assert.strictEqual(part1.SSEKMSKeyId, getKey(mpuKms)); + const complete = await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: mpuKmsBkt, + Key: key, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + ], + }, + }).promise(); + assert.strictEqual(complete.ServerSideEncryption, 'aws:kms'); + assert.strictEqual(complete.SSEKMSKeyId, getKey(mpuKms)); + void await assertObjectSSE(mpuKmsBkt, key, + { algo: 'aws:kms', masterKeyId: true }, + { kmsKey: mpuKms, kmsKeyInfo: { masterKeyId: mpuKms, masterKeyArn: mpuKms } }, + { algo: 'aws:kms', masterKeyId: true }, + { kmsKey: kmsKeympuKmsBkt, kmsKeyInfo: { masterKeyId: kmsKeympuKmsBkt, masterKeyArn: kmsKeympuKmsBkt } }, + null, 'Scality'); + }); +}); diff --git a/tests/functional/sse-kms-migration/beforeMigration.js b/tests/functional/sse-kms-migration/beforeMigration.js new file mode 100644 index 0000000000..17d158de72 --- /dev/null +++ b/tests/functional/sse-kms-migration/beforeMigration.js @@ -0,0 +1,696 @@ +/* eslint-disable */ +const getConfig = require('../aws-node-sdk/test/support/config'); +const { S3 } = require('aws-sdk'); +const kms = require('../../../lib/kms/wrapper'); +const { promisify } = require('util'); +const BucketInfo = require('arsenal').models.BucketInfo; +const { DummyRequestLogger } = require('../../unit/helpers'); +const BucketUtility = require('../aws-node-sdk/lib/utility/bucket-util'); +const assert = require('assert'); +const metadata = require('../../../lib/metadata/wrapper'); +const crypto = require('crypto'); +const log = new DummyRequestLogger(); + +// use file to defined key in arn prefix, if no prefix mem is used + +// copy part of aws-node-sdk/test/object/encryptionHeaders.js and add more tests +// around SSE Key prefix and migration +// always getObject to ensure decryption + +const testCases = [ + { + name: 'algo-none', + // as the init insert objects with each encryption + // this bucket will have a non mandatory AES256 + }, + { + name: 'algo-none-del-sse', + /** flag to remove non mandatory AES256 SS3 from bucket MD beforeEach test */ + deleteSSE: true, + }, + { + name: 'algo-aes256', + algo: 'AES256', + }, + { + name: 'algo-awskms', + algo: 'aws:kms', + }, + { + name: 'algo-awskms-key', + algo: 'aws:kms', + masterKeyId: true, + }, + { + name: 'algo-awskms-key-arnprefix', + algo: 'aws:kms', + masterKeyId: true, + arnPrefix: true, + }, +]; +const testCasesObj = testCases.filter(tc => !tc.deleteSSE); + +const config = getConfig('vault', { signatureVersion: 'v4' }); +const s3 = new S3(config); +const bucketUtil = new BucketUtility('vault'); + +// Fix for before migration run +Object.defineProperty(kms, 'arnPrefix', { get() { return ''; } }); +console.log('PREFIX', kms.arnPrefix); + +function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { + // stringify and parse to strip undefined values + return JSON.parse(JSON.stringify({ Rules: [{ + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm, + KMSMasterKeyID, + }, + }] })); +} + +function putObjParams(Bucket, Key, sseConfig, kmsKeyId) { + return { + Bucket, + Key, + ...(sseConfig.algo && { + ServerSideEncryption: sseConfig.algo, + ...(sseConfig.masterKeyId && { + SSEKMSKeyId: kmsKeyId, + }), + }), + }; +} + +const getBucketMD = promisify(metadata.getBucket.bind(metadata)); +const getObjectMD = promisify(metadata.getObjectMD.bind(metadata)); +const updateBucketMD = promisify(metadata.updateBucket.bind(metadata)); + +async function putEncryptedObject(Bucket, Key, sseConfig, kmsKeyId, Body) { + return s3.putObject({ + ...putObjParams(Bucket, Key, sseConfig, kmsKeyId), + Body, + }).promise(); +} + +async function assertObjectSSE(Bucket, Key, objConf, obj, bktConf, bkt, VersionId, Body) { + const head = await s3.headObject({ Bucket, Key, VersionId }).promise(); + + // obj precedence over bkt + assert.strictEqual(head.ServerSideEncryption, (objConf.algo || bktConf.algo)); + if (obj.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, obj.kmsKeyInfo.masterKeyArn); + } else if (objConf.algo !== 'AES256' && bkt.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, bkt.kmsKeyInfo.masterKeyArn); + } else if (head.ServerSideEncryption === 'aws:kms') { + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + assert.match(head.SSEKMSKeyId, new RegExp(kms.arnPrefix)); + } else { + assert.strictEqual(head.SSEKMSKeyId, undefined); + if (head.ServerSideEncryption === 'AES256') { + // Verify metadata since KMS Key is not returned + const objMD = await getObjectMD(Bucket, Key, {}, log); + const objSseMD = objMD['x-amz-server-side-encryption-aws-kms-key-id']; + assert.match(objSseMD, new RegExp(kms.arnPrefix)); + } + } + + // always verify GetObject as well to ensure acurate decryption + const get = await s3.getObject({ Bucket, Key, ...(VersionId && { VersionId }) }).promise(); + assert.strictEqual(get.Body.toString(), Body); +} + +/** for kms createBucketKey */ +const bucketInfo = new BucketInfo('enc-bucket-test', 'OwnerId', + 'OwnerDisplayName', new Date().toJSON()); + +describe('SSE KMS before migration', () => { + /** Bucket to test CopyObject from and to */ + const copyBkt = 'enc-bkt-copy'; + const copyObj = 'copy-obj'; + const copyKmsKey = `${kms.arnPrefix}${crypto.randomBytes(32).toString('hex')}`; + const bkts = {}; + const mpuCopyBkt = 'enc-bkt-mpu-copy'; + + this.initBucket = async function initBucket(bktConf) { + const bkt = { + name: `enc-bkt-${bktConf.name}`, + /** versioned bucket name */ + vname: `versioned-enc-bkt-${bktConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + /** For copy has source, include an object with each encryption */ + objs: {}, + }; + bkts[bktConf.name] = bkt; + if (bktConf.algo && bktConf.masterKeyId) { + // bkt.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + // bkt.kmsKeyInfo.masterKeyArn = bkt.kmsKeyInfo.masterKeyId; + const key = crypto.randomBytes(32).toString('hex'); + bkt.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; + bkt.kmsKey = bktConf.arnPrefix + ? bkt.kmsKeyInfo.masterKeyArn + : bkt.kmsKeyInfo.masterKeyId; + } + void await s3.createBucket(({ Bucket: bkt.name })).promise(); + // test reading website + void await s3.putBucketWebsite({ Bucket: bkt.name, WebsiteConfiguration: { IndexDocument: { Suffix: 'index.html' }} }).promise(); + + const bpolicy = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadGetObject", + "Effect": "Allow", + "Principal": "*", + "Action": [ + "s3:GetObject" + ], + "Resource": [ + `arn:aws:s3:::${bkt.name}/*` + ] + } + ] + } + void await s3.putBucketPolicy({ + Bucket: bkt.name, + Policy: JSON.stringify(bpolicy), + }).promise(); + void await s3.createBucket(({ Bucket: bkt.vname })).promise(); + if (bktConf.algo) { + // bucket encryption will be asserted in bucket test + void await s3.putBucketEncryption({ + Bucket: bkt.name, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ + algo: bktConf.algo, masterKeyId: bkt.kmsKey }), + }).promise(); + void await s3.putBucketEncryption({ + Bucket: bkt.vname, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ + algo: bktConf.algo, masterKeyId: bkt.kmsKey }), + }).promise(); + } + + // Put an object for each SSE conf in each bucket + void await Promise.all(testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + const key = crypto.randomBytes(32).toString('hex'); + obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + void await putEncryptedObject(bkt.name, `${objConf.name}/index.html`, objConf, obj.kmsKey, obj.body) + return await putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + })); + }; + + before(async () => { + void await promisify(metadata.setup.bind(metadata))(); + + // init copy bucket + void await s3.createBucket(({ Bucket: copyBkt })).promise(); + void await s3.createBucket(({ Bucket: mpuCopyBkt })).promise(); + void await s3.putBucketEncryption({ + Bucket: copyBkt, + ServerSideEncryptionConfiguration: hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: copyKmsKey }), + }).promise(); + void await s3.putObject({ Bucket: copyBkt, Key: copyObj, Body: 'BODY(copy)' }).promise(); + + // Prepare every buckets with 1 object (for copy) + void await Promise.all(testCases.map(async bktConf => this.initBucket(bktConf))); + }); + + testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; + + before(() => { + bkt = bkts[bktConf.name]; + }); + + if (bktConf.deleteSSE) { + beforeEach(async () => { + const bucketMD = await getBucketMD(bkt.name, log); + if (bucketMD.getServerSideEncryption()) { + bucketMD.setServerSideEncryption(null); + void await updateBucketMD(bucketMD.getName(), bucketMD, log); + } + }); + } + + if (!bktConf.algo) { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => { + void await assert.rejects(s3.getBucketEncryption({ Bucket: bkt.name }).promise(), err => { + assert.strictEqual(err.code, 'ServerSideEncryptionConfigurationNotFoundError'); + return true; + }); + }); + + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => { + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + assert.strictEqual(sseMD.mandatory, false); + assert.strictEqual(sseMD.algorithm, 'AES256'); + assert.match(sseMD.masterKeyId, new RegExp(kms.arnPrefix)); + }); + } + } else { + it('GetBucketEncryption should return SSE with arnPrefix to key', async () => { + // bucket already has SSE from initBucket function + const sseS3 = await s3.getBucketEncryption({ Bucket: bkt.name }).promise(); + + const { SSEAlgorithm, KMSMasterKeyID } = sseS3 + .ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; + + // Compare bucketMD as well to make sure key is stored with arn + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + + assert.strictEqual(SSEAlgorithm, bktConf.algo); + assert.strictEqual(sseMD.algorithm, bktConf.algo); + if (!bktConf.masterKeyId) { + // AES256 or aws:kms without keyId + assert.match(sseMD.masterKeyId, new RegExp(kms.arnPrefix)); + } + if (bktConf.masterKeyId) { + // arn prefixed even if not prefixed in input + assert.strictEqual(sseMD.configuredMasterKeyId, bkt.kmsKeyInfo.masterKeyArn); + assert.strictEqual(KMSMasterKeyID, bkt.kmsKeyInfo.masterKeyArn); + } + }); + } + + testCasesObj.forEach(objConf => it(`should have pre uploaded object with SSE ${objConf.name}`, async () => { + const obj = bkt.objs[objConf.name]; + void await assertObjectSSE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + })); + + testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, + }; + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + const key = crypto.randomBytes(32).toString('hex'); + obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => { + void await putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + void await assertObjectSSE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + }); + + // TODO S3C-9996 Fix MPU & SSE to unskip this + // Should as well fix the output of CreateMPU and UploadPart to include the SSE + // and validate the SSE output here and check with listPart & listMultipartUploads as well + const optionalSkip = objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) + ? it.skip + : it; + optionalSkip('should encrypt MPU and put 2 encrypted parts', async () => { + const mpuKey = `${obj.name}-mpu`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU1`, + Key: mpuKey, + PartNumber: 1, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = `BODY(copy)${obj.body}-MPU2`; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should encrypt MPU and copy an encrypted range parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopyrange`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + // source body is "BODY(copy)" + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=5-8', // copy + }).promise(); + const part2 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 2, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=0-3', // BODY + }).promise(); + + void await s3.completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + const fullBody = 'copyBODY'; + void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare empty encrypted MPU without completion', async () => { + const mpuKey = `${obj.name}-migration-mpu-empty`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + }); + + optionalSkip('should prepare encrypte MPU and put 2 encrypted parts without completion', async () => { + const mpuKey = `${obj.name}-migration-mpu`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU1`, + Key: mpuKey, + PartNumber: 1, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + // void await s3.completeMultipartUpload({ + // UploadId: mpu.UploadId, + // Bucket: bkt.name, + // Key: mpuKey, + // MultipartUpload: { + // Parts: [ + // { PartNumber: 1, ETag: part1.ETag }, + // { PartNumber: 2, ETag: part2.ETag }, + // ], + // }, + // }).promise(); + // const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`; + // void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare encrypted MPU and copy an encrypted parts from encrypted bucket without completion', async () => { + const mpuKey = `${obj.name}-migration-mpucopy`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }).promise(); + const part2 = await s3.uploadPart({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + + // void await s3.completeMultipartUpload({ + // UploadId: mpu.UploadId, + // Bucket: bkt.name, + // Key: mpuKey, + // MultipartUpload: { + // Parts: [ + // { PartNumber: 1, ETag: part1.ETag }, + // { PartNumber: 2, ETag: part2.ETag }, + // ], + // }, + // }).promise(); + // const fullBody = `BODY(copy)${obj.body}-MPU2`; + // void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare encrypte MPU and copy an encrypted range parts from encrypted bucket without completion', async () => { + const mpuKey = `${obj.name}-migration-mpucopyrange`; + const mpu = await s3.createMultipartUpload( + putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + // source body is "BODY(copy)" + const part1 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=5-8', // copy + }).promise(); + const part2 = await s3.uploadPartCopy({ + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 2, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=0-3', // BODY + }).promise(); + + // void await s3.completeMultipartUpload({ + // UploadId: mpu.UploadId, + // Bucket: bkt.name, + // Key: mpuKey, + // MultipartUpload: { + // Parts: [ + // { PartNumber: 1, ETag: part1.ETag }, + // { PartNumber: 2, ETag: part2.ETag }, + // ], + // }, + // }).promise(); + // const fullBody = 'copyBODY'; + // void await assertObjectSSE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + it(`should CopyObject ${obj.name} into encrypted destination bucket`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${bkt.name}/${objForCopy.name}`; + const copied = await s3.copyObject({ + Bucket: copyBkt, + Key: source, + CopySource: source, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, 'aws:kms'); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + const head = await s3.headObject({ Bucket: copyBkt, Key: source }).promise(); + // hardcoded SSE for copy bucket + assert.strictEqual(head.ServerSideEncryption, 'aws:kms'); + assert.strictEqual(head.SSEKMSKeyId, copyKmsKey); + + const get = await s3.getObject({ Bucket: copyBkt, Key: source }).promise(); + assert.strictEqual(get.Body.toString(), objForCopy.body); + }); + + it(`should CopyObject ${obj.name} into same bucket with SSE config`, async () => { + // if SSE not provided it uses bucket SSE + const copyKey = `${obj.name}-copy`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSE(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, obj.body); + }); + + it(`should CopyObject from encrypted destination into ${obj.name}`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${copyBkt}/${copyObj}`; + const copyKey = `${obj.name}-copy-from`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: source, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSE(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, 'BODY(copy)'); + }); + + it(`should PutObject versioned with SSE ${obj.name}`, async () => { + // ensure versioned bucket is empty + void await bucketUtil.empty(bkt.vname); + let { Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise(); + // regularly count versioned objects + assert.strictEqual(Versions.length, 0); + + const bodyBase = `BODY(${obj.name})-base`; + void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyBase); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 1); + + void await s3.putBucketVersioning({ Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Enabled' }, + }).promise(); + + const bodyV1 = `BODY(${obj.name})-v1`; + const v1 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); + const bodyV2 = `BODY(${obj.name})-v2`; + const v2 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + const current = await s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); + assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + void await s3.putBucketVersioning({ Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Suspended' }, + }).promise(); + + // should be fine after version suspension + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + // put a new null version + const bodyFinal = `BODY(${obj.name})-final`; + void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyFinal); // null + void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyFinal); + ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + }); + })); + })); + + it('should prepare encrypted MPU and copy parts from every buckets and objects matrice without completion', async () => { + void await s3.putBucketEncryption({ + Bucket: mpuCopyBkt, + // AES256 because input key is broken for now + ServerSideEncryptionConfiguration: hydrateSSEConfig({ algo: 'AES256' }), + }).promise(); + const mpuKey = 'mpucopy'; + const mpu = await s3.createMultipartUpload( + putObjParams(mpuCopyBkt, mpuKey, {}, null)).promise(); + const copyPartArg = { + UploadId: mpu.UploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + }; + // For each test Case bucket and object copy a part + const uploadPromises = testCases.reduce((acc, bktConf, bktIdx) => { + const bkt = bkts[bktConf.name]; + + return acc.concat(testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = bktIdx * testCasesObj.length + objIdx + 1; + const res = await s3.uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + })); + }, []); + + const parts = await Promise.all(uploadPromises); + + // void await s3.completeMultipartUpload({ + // UploadId: mpu.UploadId, + // Bucket: mpuCopyBkt, + // Key: mpuKey, + // MultipartUpload: { + // Parts: parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), + // }, + // }).promise(); + // const fullBody = parts.reduce((acc, part) => `${acc}${part.body}`, ''); + // void await assertObjectSSE(mpuCopyBkt, mpuKey, {}, {}, { algo: 'AES256' }, {}, null, fullBody); + }); +}); diff --git a/tests/functional/sse-kms-migration/cleanup.js b/tests/functional/sse-kms-migration/cleanup.js new file mode 100644 index 0000000000..d6e561fe4b --- /dev/null +++ b/tests/functional/sse-kms-migration/cleanup.js @@ -0,0 +1,79 @@ +/* eslint-disable */ +const getConfig = require('../aws-node-sdk/test/support/config'); +const { S3 } = require('aws-sdk'); +const { promisify } = require('util'); +const BucketUtility = require('../aws-node-sdk/lib/utility/bucket-util'); + +const metadata = require('../../../lib/metadata/wrapper'); + +// use file to defined key in arn prefix, if no prefix mem is used + +// copy part of aws-node-sdk/test/object/encryptionHeaders.js and add more tests +// around SSE Key prefix and migration +// always getObject to ensure decryption + +const testCases = [ + { + name: 'algo-none', + // as the init insert objects with each encryption + // this bucket will have a non mandatory AES256 + }, + { + name: 'algo-none-del-sse', + /** flag to remove non mandatory AES256 SS3 from bucket MD beforeEach test */ + deleteSSE: true, + }, + { + name: 'algo-aes256', + algo: 'AES256', + }, + { + name: 'algo-awskms', + algo: 'aws:kms', + }, + { + name: 'algo-awskms-key', + algo: 'aws:kms', + masterKeyId: true, + }, + { + name: 'algo-awskms-key-arnprefix', + algo: 'aws:kms', + masterKeyId: true, + arnPrefix: true, + }, +]; + +const config = getConfig('vault', { signatureVersion: 'v4' }); +const s3 = new S3(config); +const bucketUtil = new BucketUtility('vault'); + +async function cleanup(Bucket) { + try { + void await bucketUtil.empty(Bucket); + void await s3.deleteBucket({ Bucket }).promise(); + } catch (e) { + console.log('Ignore error for', Bucket, e.toString()); + } +} + +describe('SSE KMS Cleanup', () => { + /** Bucket to test CopyObject from and to */ + const copyBkt = 'enc-bkt-copy'; + const mpuCopyBkt = 'enc-bkt-mpu-copy'; + + it('Empty and delete buckets for SSE KMS Migration', async () => { + console.log('cleanup'); + void await promisify(metadata.setup.bind(metadata))(); + + try { + // pre cleanup + void await cleanup(copyBkt); + void await cleanup(mpuCopyBkt); + void await Promise.all(testCases.map(async bktConf => { + void await cleanup(`enc-bkt-${bktConf.name}`); + return await cleanup(`versioned-enc-bkt-${bktConf.name}`); + })); + } catch (e) { void e; } + }); +}); diff --git a/tests/functional/sse-kms-migration/config.before.json b/tests/functional/sse-kms-migration/config.before.json new file mode 100644 index 0000000000..841aa341c7 --- /dev/null +++ b/tests/functional/sse-kms-migration/config.before.json @@ -0,0 +1,91 @@ +{ + "port": 8000, + "listenOn": [], + "metricsPort": 8002, + "metricsListenOn": [], + "replicationGroupId": "RG001", + "restEndpoints": { + "localhost": "us-east-1", + "127.0.0.1": "us-east-1", + "cloudserver-front": "us-east-1", + "s3.docker.test": "us-east-1", + "127.0.0.2": "us-east-1", + "s3.amazonaws.com": "us-east-1" + }, + "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test"], + "replicationEndpoints": [{ + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, { + "site": "us-east-2", + "type": "aws_s3" + }], + "cdmi": { + "host": "localhost", + "port": 81, + "path": "/dewpoint", + "readonly": true + }, + "bucketd": { + "bootstrap": ["localhost:9000"] + }, + "vaultd": { + "host": "localhost", + "port": 8500 + }, + "clusters": 10, + "log": { + "logLevel": "info", + "dumpLevel": "error" + }, + "healthChecks": { + "allowFrom": ["127.0.0.1/8", "::1"] + }, + "metadataClient": { + "host": "localhost", + "port": 9990 + }, + "dataClient": { + "host": "localhost", + "port": 9991 + }, + "metadataDaemon": { + "bindAddress": "localhost", + "port": 9990 + }, + "dataDaemon": { + "bindAddress": "localhost", + "port": 9991 + }, + "recordLog": { + "enabled": false, + "recordLogName": "s3-recordlog" + }, + "requests": { + "viaProxy": false, + "trustedProxyCIDRs": [], + "extractClientIPFromHeader": "" + }, + "bucketNotificationDestinations": [ + { + "resource": "target1", + "type": "dummy", + "host": "localhost:6000" + } + ], + "defaultEncryptionKeyPerAccount": true +} diff --git a/tests/functional/sse-kms-migration/config.sseMigration.hideArn.json b/tests/functional/sse-kms-migration/config.sseMigration.hideArn.json new file mode 100644 index 0000000000..498f189fed --- /dev/null +++ b/tests/functional/sse-kms-migration/config.sseMigration.hideArn.json @@ -0,0 +1,105 @@ +{ + "port": 8000, + "listenOn": [], + "metricsPort": 8002, + "metricsListenOn": [], + "replicationGroupId": "RG001", + "restEndpoints": { + "localhost": "us-east-1", + "127.0.0.1": "us-east-1", + "cloudserver-front": "us-east-1", + "s3.docker.test": "us-east-1", + "127.0.0.2": "us-east-1", + "s3.amazonaws.com": "us-east-1" + }, + "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test"], + "replicationEndpoints": [{ + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, { + "site": "us-east-2", + "type": "aws_s3" + }], + "cdmi": { + "host": "localhost", + "port": 81, + "path": "/dewpoint", + "readonly": true + }, + "bucketd": { + "bootstrap": ["localhost:9000"] + }, + "vaultd": { + "host": "localhost", + "port": 8500 + }, + "clusters": 10, + "log": { + "logLevel": "debug", + "dumpLevel": "error" + }, + "healthChecks": { + "allowFrom": ["127.0.0.1/8", "::1"] + }, + "metadataClient": { + "host": "localhost", + "port": 9990 + }, + "dataClient": { + "host": "localhost", + "port": 9991 + }, + "metadataDaemon": { + "bindAddress": "localhost", + "port": 9990 + }, + "dataDaemon": { + "bindAddress": "localhost", + "port": 9991 + }, + "recordLog": { + "enabled": false, + "recordLogName": "s3-recordlog" + }, + "requests": { + "viaProxy": false, + "trustedProxyCIDRs": [], + "extractClientIPFromHeader": "" + }, + "bucketNotificationDestinations": [ + { + "resource": "target1", + "type": "dummy", + "host": "localhost:6000" + } + ], + "defaultEncryptionKeyPerAccount": true, + "kmsHideScalityArn": true, + "sseMigration": { + "previousKeyType": "internal", + "previousKeyProtocol": "file", + "previousKeyProvider": "scality" + }, + "kmsAWS": { + "noAwsArn": true, + "providerName": "local", + "region": "us-east-1", + "endpoint": "http://0:8080", + "ak": "456", + "sk": "123" + } +} diff --git a/tests/functional/sse-kms-migration/config.sseMigration.showArn.json b/tests/functional/sse-kms-migration/config.sseMigration.showArn.json new file mode 100644 index 0000000000..e271e297f9 --- /dev/null +++ b/tests/functional/sse-kms-migration/config.sseMigration.showArn.json @@ -0,0 +1,105 @@ +{ + "port": 8000, + "listenOn": [], + "metricsPort": 8002, + "metricsListenOn": [], + "replicationGroupId": "RG001", + "restEndpoints": { + "localhost": "us-east-1", + "127.0.0.1": "us-east-1", + "cloudserver-front": "us-east-1", + "s3.docker.test": "us-east-1", + "127.0.0.2": "us-east-1", + "s3.amazonaws.com": "us-east-1" + }, + "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test"], + "replicationEndpoints": [{ + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, { + "site": "us-east-2", + "type": "aws_s3" + }], + "cdmi": { + "host": "localhost", + "port": 81, + "path": "/dewpoint", + "readonly": true + }, + "bucketd": { + "bootstrap": ["localhost:9000"] + }, + "vaultd": { + "host": "localhost", + "port": 8500 + }, + "clusters": 10, + "log": { + "logLevel": "info", + "dumpLevel": "error" + }, + "healthChecks": { + "allowFrom": ["127.0.0.1/8", "::1"] + }, + "metadataClient": { + "host": "localhost", + "port": 9990 + }, + "dataClient": { + "host": "localhost", + "port": 9991 + }, + "metadataDaemon": { + "bindAddress": "localhost", + "port": 9990 + }, + "dataDaemon": { + "bindAddress": "localhost", + "port": 9991 + }, + "recordLog": { + "enabled": false, + "recordLogName": "s3-recordlog" + }, + "requests": { + "viaProxy": false, + "trustedProxyCIDRs": [], + "extractClientIPFromHeader": "" + }, + "bucketNotificationDestinations": [ + { + "resource": "target1", + "type": "dummy", + "host": "localhost:6000" + } + ], + "defaultEncryptionKeyPerAccount": true, + "kmsHideScalityArn": false, + "sseMigration": { + "previousKeyType": "internal", + "previousKeyProtocol": "file", + "previousKeyProvider": "scality" + }, + "kmsAWS": { + "noAwsArn": true, + "providerName": "local", + "region": "us-east-1", + "endpoint": "http://0:8080", + "ak": "456", + "sk": "123" + } +} diff --git a/tests/functional/sse-kms-migration/migration.js b/tests/functional/sse-kms-migration/migration.js new file mode 100644 index 0000000000..fe4b7a65d1 --- /dev/null +++ b/tests/functional/sse-kms-migration/migration.js @@ -0,0 +1,746 @@ +/* eslint-disable */ +const getConfig = require('../aws-node-sdk/test/support/config'); +const { S3 } = require('aws-sdk'); +const kms = require('../../../lib/kms/wrapper'); +const filekms = require('../../../lib/kms/file/backend'); +const { promisify } = require('util'); +const BucketInfo = require('arsenal').models.BucketInfo; +const { DummyRequestLogger } = require('../../unit/helpers'); +const BucketUtility = require('../aws-node-sdk/lib/utility/bucket-util'); +const assert = require('assert'); +const metadata = require('../../../lib/metadata/wrapper'); +const crypto = require('crypto'); +const log = new DummyRequestLogger(); +const { config } = require('../../../lib/Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); + +// use file to defined key in arn prefix, if no prefix mem is used + +// copy part of aws-node-sdk/test/object/encryptionHeaders.js and add more tests +// around SSE Key prefix and migration +// always getObject to ensure decryption + +function getKey(key) { + return config.kmsHideScalityArn ? getKeyIdFromArn(key) : key; +} + +const testCases = [ + { + name: 'algo-none', + // as the init insert objects with each encryption + // this bucket will have a non mandatory AES256 + }, + { + name: 'algo-none-del-sse', + /** flag to remove non mandatory AES256 SS3 from bucket MD beforeEach test */ + deleteSSE: true, + }, + { + name: 'algo-aes256', + algo: 'AES256', + }, + { + name: 'algo-awskms', + algo: 'aws:kms', + }, + { + name: 'algo-awskms-key', + algo: 'aws:kms', + masterKeyId: true, + }, + { + name: 'algo-awskms-key-arnprefix', + algo: 'aws:kms', + masterKeyId: true, + arnPrefix: true, + }, +]; +const testCasesObj = testCases.filter(tc => !tc.deleteSSE); + +const s3config = getConfig('vault', { signatureVersion: 'v4' }); +const s3 = new S3(s3config); +const bucketUtil = new BucketUtility('vault'); + +kms.client._supportsDefaultKeyPerAccount = false; // To generate keys without vault account side effect + +// Fix for before migration run +// if (!kms.arnPrefix) kms.arnPrefix = ''; + +const fileKmsPrefix = filekms.backend.arnPrefix; + +function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { + // stringify and parse to strip undefined values + return JSON.parse(JSON.stringify({ Rules: [{ + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm, + KMSMasterKeyID, + }, + }] })); +} + +function putObjParams(Bucket, Key, sseConfig, kmsKeyId) { + return { + Bucket, + Key, + ...(sseConfig.algo && { + ServerSideEncryption: sseConfig.algo, + ...(sseConfig.masterKeyId && { + SSEKMSKeyId: kmsKeyId, + }), + }), + }; +} + +const getBucketMD = promisify(metadata.getBucket.bind(metadata)); +const getObjectMD = promisify(metadata.getObjectMD.bind(metadata)); +const updateBucketMD = promisify(metadata.updateBucket.bind(metadata)); + +async function getBucketSSE(Bucket) { + const sse = await s3.getBucketEncryption({ Bucket }).promise(); + return sse + .ServerSideEncryptionConfiguration + .Rules[0] + .ApplyServerSideEncryptionByDefault; +} + +async function getObjectMDSSE(Bucket, Key) { + // todo version ? + const objMD = await getObjectMD(Bucket, Key, {}, log); + + const sse = objMD['x-amz-server-side-encryption']; + const key = objMD['x-amz-server-side-encryption-aws-kms-key-id']; + + return { + ServerSideEncryption: sse, + SSEKMSKeyId: key, + }; +} + +async function putEncryptedObject(Bucket, Key, sseConfig, kmsKeyId, Body) { + return s3.putObject({ + ...putObjParams(Bucket, Key, sseConfig, kmsKeyId), + Body, + }).promise(); +} + +async function assertObjectSSEMigrationFILE(Bucket, Key, objConf, obj, bktConf, bkt, VersionId, Body) { + const sseMD = await getObjectMDSSE(Bucket, Key); + const head = await s3.headObject({ Bucket, Key, VersionId }).promise(); + const sseMDMigrated = await getObjectMDSSE(Bucket, Key); + const expectedKey = `${sseMD.SSEKMSKeyId && sseMD.SSEKMSKeyId.startsWith('arn:scality:kms') + ? '' : fileKmsPrefix}${sseMD.SSEKMSKeyId}`; + + if (sseMD.SSEKMSKeyId) { + // assert.doesNotMatch(sseMD.SSEKMSKeyId, /^arn:scality:kms/); + } + + // obj precedence over bkt + assert.strictEqual(head.ServerSideEncryption, (objConf.algo || bktConf.algo)); + + if (sseMDMigrated.SSEKMSKeyId) { + assert.strictEqual(sseMDMigrated.SSEKMSKeyId, expectedKey); + } + + if (obj.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else if (objConf.algo !== 'AES256' && bkt.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else if (head.ServerSideEncryption === 'aws:kms') { + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else { + assert.strictEqual(head.SSEKMSKeyId, undefined); + } + + // always verify GetObject as well to ensure acurate decryption + const get = await s3.getObject({ Bucket, Key, ...(VersionId && { VersionId }) }).promise(); + assert.strictEqual(get.Body.toString(), Body); +} + +async function assertObjectSSEMigration(Bucket, Key, objConf, obj, bktConf, bkt, VersionId, Body) { + const sseMD = await getObjectMDSSE(Bucket, Key); + const head = await s3.headObject({ Bucket, Key, VersionId }).promise(); + const sseMDMigrated = await getObjectMDSSE(Bucket, Key); + const expectedKey = `${sseMD.SSEKMSKeyId && sseMD.SSEKMSKeyId.startsWith('arn:scality:kms') + ? '' : kms.arnPrefix}${sseMD.SSEKMSKeyId}`; + + if (sseMD.SSEKMSKeyId) { + // assert.doesNotMatch(sseMD.SSEKMSKeyId, /^arn:scality:kms/); + } + + // obj precedence over bkt + assert.strictEqual(head.ServerSideEncryption, (objConf.algo || bktConf.algo)); + + if (sseMDMigrated.SSEKMSKeyId) { + assert.strictEqual(sseMDMigrated.SSEKMSKeyId, expectedKey); + } + + if (obj.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else if (objConf.algo !== 'AES256' && bkt.kmsKey) { + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else if (head.ServerSideEncryption === 'aws:kms') { + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + assert.strictEqual(head.SSEKMSKeyId, getKey(expectedKey)); + } else { + assert.strictEqual(head.SSEKMSKeyId, undefined); + } + + // always verify GetObject as well to ensure acurate decryption + const get = await s3.getObject({ Bucket, Key, ...(VersionId && { VersionId }) }).promise(); + assert.strictEqual(get.Body.toString(), Body); +} + +/** for kms createBucketKey */ +const bucketInfo = new BucketInfo('enc-bucket-test', 'OwnerId', + 'OwnerDisplayName', new Date().toJSON()); + +describe('SSE KMS migration', () => { + /** Bucket to test CopyObject from and to */ + const copyBkt = 'enc-bkt-copy'; + const copyObj = 'copy-obj'; + let copyKmsKey; + const bkts = {}; + const mpuCopyBkt = 'enc-bkt-mpu-copy'; + + this.checkInitBucket = async function checkInitBucket(bktConf) { + const bkt = { + name: `enc-bkt-${bktConf.name}`, + /** versioned bucket name */ + vname: `versioned-enc-bkt-${bktConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + /** For copy has source, include an object with each encryption */ + objs: {}, + }; + bkts[bktConf.name] = bkt; + if (bktConf.algo && bktConf.masterKeyId) { + bkt.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + bkt.kmsKey = bktConf.arnPrefix + ? bkt.kmsKeyInfo.masterKeyArn + : bkt.kmsKeyInfo.masterKeyId; + } + void await s3.headBucket(({ Bucket: bkt.name })).promise(); + void await s3.headBucket(({ Bucket: bkt.vname })).promise(); + if (bktConf.algo) { + // bucket encryption will be asserted in bucket test + const bktSSE = await getBucketSSE(bkt.name); + assert.strictEqual(bktSSE.SSEAlgorithm, bktConf.algo); + if (bktSSE.KMSMasterKeyID) { + // assert.doesNotMatch(bktSSE.KMSMasterKeyID, /^arn:scality:kms/); + } + + const vbktSSE = await getBucketSSE(bkt.vname); + assert.strictEqual(vbktSSE.SSEAlgorithm, bktConf.algo); + if (vbktSSE.KMSMasterKeyID) { + // assert.doesNotMatch(vbktSSE.KMSMasterKeyID, /^arn:scality:kms/); + } + } + + // Put an object for each SSE conf in each bucket + void await Promise.all(testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + const objSSE = await getObjectMDSSE(bkt.name, obj.name); + assert.strictEqual(objSSE.ServerSideEncryption, objConf.algo || bktConf.algo || ''); + // assert.doesNotMatch(objSSE.SSEKMSKeyId, /^arn:scality:kms/); + return undefined; + })); + }; + + before(async () => { + copyKmsKey = (await promisify(kms.createBucketKey)(bucketInfo, log)).masterKeyArn; + void await promisify(metadata.setup.bind(metadata))(); + + void await s3.headBucket({ Bucket: copyBkt }).promise(); + void await s3.headBucket(({ Bucket: mpuCopyBkt })).promise(); + const copySSE = await s3.getBucketEncryption({ Bucket: copyBkt }).promise(); + const { SSEAlgorithm, KMSMasterKeyID } = copySSE + .ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; + assert.strictEqual(SSEAlgorithm, 'aws:kms'); + // assert.doesNotMatch(KMSMasterKeyID, /^arn:scality:kms/); + + // Check Prepare every buckets with 1 object (for copy) + void await Promise.all(testCases.map(async bktConf => this.checkInitBucket(bktConf))); + }); + + testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; + + before(() => { + bkt = bkts[bktConf.name]; + }); + + if (bktConf.deleteSSE) { + beforeEach(async () => { + const bucketMD = await getBucketMD(bkt.name, log); + if (bucketMD.getServerSideEncryption()) { + bucketMD.setServerSideEncryption(null); + void await updateBucketMD(bucketMD.getName(), bucketMD, log); + } + }); + } + + if (!bktConf.algo) { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => { + void await assert.rejects(s3.getBucketEncryption({ Bucket: bkt.name }).promise(), err => { + assert.strictEqual(err.code, 'ServerSideEncryptionConfigurationNotFoundError'); + return true; + }); + }); + + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => { + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + assert.strictEqual(sseMD.mandatory, false); + assert.strictEqual(sseMD.algorithm, 'AES256'); + assert.doesNotMatch(sseMD.masterKeyId, /^arn:scality:kms/); + }); + } + } else { + it('ensure old SSE KMS key setup', async () => { + const bucketMD = await getBucketMD(bkt.name, log); + const sseMD = bucketMD.getServerSideEncryption(); + const sseS3 = await getBucketSSE(bkt.name); + + assert.strictEqual(sseS3.SSEAlgorithm, bktConf.algo); + assert.strictEqual(sseMD.algorithm, bktConf.algo); + if (!bktConf.masterKeyId) { + // AES256 or aws:kms without keyId + assert.doesNotMatch(sseMD.masterKeyId, /^arn:scality:kms/); + } + if (bktConf.masterKeyId) { + // arn prefixed even if not prefixed in input + assert.doesNotMatch(sseMD.configuredMasterKeyId, /^arn:scality:kms/); + assert.doesNotMatch(sseS3.KMSMasterKeyID, /^arn:scality:kms/); + } + }); + } + + testCasesObj.forEach(objConf => it(`should have pre uploaded object with SSE ${objConf.name}`, async () => { + const obj = bkt.objs[objConf.name]; + void await assertObjectSSEMigrationFILE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + })); + + testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, + }; + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await promisify(kms.createBucketKey)(bucketInfo, log); + obj.kmsKey = objConf.arnPrefix + ? obj.kmsKeyInfo.masterKeyArn + : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => { + void await putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + // void await assertObjectSSE(bkt.name, obj.name, objConf, obj, bktConf, bkt, null, obj.body); + }); + + // TODO S3C-9996 Fix MPU & SSE to unskip this + // Should as well fix the output of CreateMPU and UploadPart to include the SSE + // and validate the SSE output here and check with listPart & listMultipartUploads as well + const optionalSkip = objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) + ? it.skip + : it; + optionalSkip('should migrate completed MPU', async () => { + const mpuKey = `${obj.name}-mpu`; + + const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`; + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should migrate completed MPU that had copy', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const fullBody = `BODY(copy)${obj.body}-MPU2`; + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should migrate completed MPU that had byte range copy', async () => { + const mpuKey = `${obj.name}-mpucopyrange`; + const fullBody = 'copyBODY'; + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + const mpus = {}; + before('retrieve MPUS', async () => { + const listed = await s3.listMultipartUploads({ Bucket: bkt.name }).promise(); + assert.strictEqual(listed.IsTruncated, false, 'Too much MPUs, need to loop on pagination'); + for (const mpu of listed.Uploads) { + mpus[mpu.Key] = mpu.UploadId; + } + }); + + optionalSkip('should prepare empty encrypted MPU without completion', async () => { + const mpuKey = `${obj.name}-migration-mpu-empty`; + const uploadId = mpus[mpuKey]; + assert(uploadId, 'Missing MPU, it should have been prepared before'); + + const existingParts = await s3.listParts({ + Bucket: bkt.name, Key: mpuKey, UploadId: uploadId }).promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); + assert.strictEqual(partCount, 0); + + const part1 = await s3.uploadPart({ + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU1`, + Key: mpuKey, + PartNumber: 1, + }).promise(); + // console.log('part1', part1); + const part2 = await s3.uploadPart({ + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }).promise(); + // console.log('part2', part2); + const complete = await s3.completeMultipartUpload({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + { PartNumber: 1, ETag: part1.ETag }, + { PartNumber: 2, ETag: part2.ETag }, + ], + }, + }).promise(); + // console.log('complete', complete); + const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`; + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare encrypte MPU and put 2 encrypted parts without completion', async () => { + const mpuKey = `${obj.name}-migration-mpu`; + const uploadId = mpus[mpuKey]; + assert(uploadId, 'Missing MPU, it should have been prepared before'); + + const existingParts = await s3.listParts({ + Bucket: bkt.name, Key: mpuKey, UploadId: uploadId }).promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); + assert.strictEqual(partCount, 2); + // console.log('existingParts', existingParts); + + const part1 = await s3.uploadPart({ + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU1`, + Key: mpuKey, + PartNumber: partCount + 1, + }).promise(); + // console.log('part1', part1); + const part2 = await s3.uploadPart({ + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: partCount + 2, + }).promise(); + // console.log('part2', part2); + const complete = await s3.completeMultipartUpload({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + { PartNumber: partCount + 1, ETag: part1.ETag }, + { PartNumber: partCount + 2, ETag: part2.ETag }, + ], + }, + }).promise(); + // console.log('complete', complete); + const fullBody = `${obj.body}-MPU1${obj.body}-MPU2`.repeat(2); + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare encrypted MPU and copy an encrypted parts from encrypted bucket without completion', async () => { + const mpuKey = `${obj.name}-migration-mpucopy`; + const uploadId = mpus[mpuKey]; + assert(uploadId, 'Missing MPU, it should have been prepared before'); + + const existingParts = await s3.listParts({ + Bucket: bkt.name, Key: mpuKey, UploadId: uploadId }).promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); + assert.strictEqual(partCount, 2); + // console.log('existingParts', existingParts); + + const part1 = await s3.uploadPartCopy({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: partCount + 1, + CopySource: `${copyBkt}/${copyObj}`, + }).promise(); + // console.log('part1', part1); + const part2 = await s3.uploadPart({ + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: partCount + 2, + }).promise(); + // console.log('part2', part2); + + const complete = await s3.completeMultipartUpload({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + { PartNumber: partCount + 1, ETag: part1.ETag }, + { PartNumber: partCount + 2, ETag: part2.ETag }, + ], + }, + }).promise(); + // console.log('complete', complete); + const fullBody = `BODY(copy)${obj.body}-MPU2`.repeat(2); + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + optionalSkip('should prepare encrypte MPU and copy an encrypted range parts from encrypted bucket without completion', async () => { + const mpuKey = `${obj.name}-migration-mpucopyrange`; + const uploadId = mpus[mpuKey]; + assert(uploadId, 'Missing MPU, it should have been prepared before'); + + const existingParts = await s3.listParts({ + Bucket: bkt.name, Key: mpuKey, UploadId: uploadId }).promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); + assert.strictEqual(partCount, 2); + + // source body is "BODY(copy)" + const part1 = await s3.uploadPartCopy({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: partCount + 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=5-8', // copy + }).promise(); + // console.log('part1', part1); + const part2 = await s3.uploadPartCopy({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: partCount + 2, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: 'bytes=0-3', // BODY + }).promise(); + // console.log('part2', part2); + const complete = await s3.completeMultipartUpload({ + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + MultipartUpload: { + Parts: [ + ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + { PartNumber: partCount + 1, ETag: part1.ETag }, + { PartNumber: partCount + 2, ETag: part2.ETag }, + ], + }, + }).promise(); + // console.log('complete', complete); + const fullBody = 'copyBODY'.repeat(2); + void await assertObjectSSEMigrationFILE(bkt.name, mpuKey, objConf, obj, bktConf, bkt, null, fullBody); + }); + + it(`should CopyObject ${obj.name} into encrypted destination bucket`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${bkt.name}/${objForCopy.name}`; + const copied = await s3.copyObject({ + Bucket: copyBkt, + Key: source, + CopySource: source, + }).promise(); + + assert.strictEqual(copied.ServerSideEncryption, 'aws:kms'); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + const head = await s3.headObject({ Bucket: copyBkt, Key: source }).promise(); + // hardcoded SSE for copy bucket + assert.strictEqual(head.ServerSideEncryption, 'aws:kms'); + if (config.kmsHideScalityArn) { + assert.doesNotMatch(head.SSEKMSKeyId, /^arn:scality:kms/); + } else { + assert.match(head.SSEKMSKeyId, /^arn:scality:kms/); + } + + const get = await s3.getObject({ Bucket: copyBkt, Key: source }).promise(); + assert.strictEqual(get.Body.toString(), objForCopy.body); + }); + + it(`should CopyObject ${obj.name} into same bucket with SSE config`, async () => { + // if SSE not provided it uses bucket SSE + const copyKey = `${obj.name}-copy`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + // console.log('copied', copied); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSEMigration(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, obj.body); + }); + + it(`should CopyObject from encrypted destination into ${obj.name}`, async () => { + // if SSE not provided it uses bucket SSE + const source = `${copyBkt}/${copyObj}`; + const copyKey = `${obj.name}-copy-from`; + const copied = await s3.copyObject({ + ...putObjParams(bkt.name, copyKey, objConf, obj.kmsKey), + CopySource: source, + }).promise(); + // console.log('copied', copied); + + assert.strictEqual(copied.ServerSideEncryption, (objConf.algo || bktConf.algo)); + // TODO FIX return SSEKMSKeyId on CopyObject and test it + + void await assertObjectSSEMigration(bkt.name, copyKey, objConf, obj, bktConf, bkt, null, 'BODY(copy)'); + }); + + // it(`should PutObject versioned with SSE ${obj.name}`, async () => { + // // ensure versioned bucket is empty + // void await bucketUtil.empty(bkt.vname); + // let { Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise(); + // // regularly count versioned objects + // assert.strictEqual(Versions.length, 0); + + // const bodyBase = `BODY(${obj.name})-base`; + // void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyBase); + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 1); + + // void await s3.putBucketVersioning({ Bucket: bkt.vname, + // VersioningConfiguration: { Status: 'Enabled' }, + // }).promise(); + + // const bodyV1 = `BODY(${obj.name})-v1`; + // const v1 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); + // const bodyV2 = `BODY(${obj.name})-v2`; + // const v2 = await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 3); + + // const current = await s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); + // assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 3); + + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 3); + + // void await s3.putBucketVersioning({ Bucket: bkt.vname, + // VersioningConfiguration: { Status: 'Suspended' }, + // }).promise(); + + // // should be fine after version suspension + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyV2); // v2 + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyBase); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v1.VersionId, bodyV1); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, v2.VersionId, bodyV2); + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 3); + + // // put a new null version + // const bodyFinal = `BODY(${obj.name})-final`; + // void await putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, null, bodyFinal); // null + // void await assertObjectSSE(bkt.vname, obj.name, objConf, obj, bktConf, bkt, 'null', bodyFinal); + // ({ Versions } = await s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + // assert.strictEqual(Versions.length, 3); + // }); + })); + })); + + it('should prepare encrypted MPU and copy parts from all bkt and objects matrice without completion', async () => { + const mpuKey = 'mpucopy'; + const listed = await s3.listMultipartUploads({ Bucket: mpuCopyBkt }).promise(); + assert.strictEqual(listed.IsTruncated, false, 'Too much MPUs, need to loop on pagination'); + assert.strictEqual(listed.Uploads.length, 1, 'There should be only one MPU for global copy'); + const uploadId = listed.Uploads[0].UploadId; + const copyPartArg = { + UploadId: uploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + }; + + const existingParts = await s3.listParts(copyPartArg).promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); + assert.strictEqual(partCount, testCases.length * testCasesObj.length); + + // For each test Case bucket and object copy a part + const uploadPromises = testCases.reduce((acc, bktConf, bktIdx) => { + const bkt = bkts[bktConf.name]; + + return acc.concat(testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = partCount + bktIdx * testCasesObj.length + objIdx + 1; + const res = await s3.uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }).promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + })); + }, []); + + const parts = await Promise.all(uploadPromises); + // console.log('parts', parts); + + const complete = await s3.completeMultipartUpload({ + UploadId: uploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + MultipartUpload: { + Parts: [ + ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + ...parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), + ], + }, + }).promise(); + // console.log('complete', complete); + const fullBody = parts.reduce((acc, part) => `${acc}${part.body}`, '').repeat(2); + void await assertObjectSSEMigrationFILE(mpuCopyBkt, mpuKey, {}, {}, { algo: 'AES256' }, {}, null, fullBody); + }); +});