From b89907b3450fd272f5ba154ddbe7b85f85dca815 Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:09:58 +0200 Subject: [PATCH 01/12] delete operation related tests migration Issue: CLDSRV-724 --- .../test/multipleBackend/delete/delete.js | 231 +++++++++--------- .../delete/deleteAwsVersioning.js | 97 +++++--- .../multipleBackend/delete/deleteAzure.js | 106 ++++---- .../test/multipleBackend/delete/deleteGcp.js | 35 +-- 4 files changed, 266 insertions(+), 203 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js index 4cea3c7eb4..752eb3aed7 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js @@ -1,4 +1,10 @@ const assert = require('assert'); +const { + CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + GetObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -26,133 +32,128 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { let bucketUtil; let s3; - before(() => { + before(async () => { process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }) - .then(() => { - process.stdout.write('Putting object to mem\n'); - const params = { Bucket: bucket, Key: memObject, Body: body, - Metadata: { 'scal-location-constraint': memLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to file\n'); - const params = { Bucket: bucket, Key: fileObject, Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to AWS\n'); - const params = { Bucket: bucket, Key: awsObject, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting 0-byte object to AWS\n'); - const params = { Bucket: bucket, Key: emptyObject, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting large object to AWS\n'); - const params = { Bucket: bucket, Key: bigObject, - Body: bigBody, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to AWS\n'); - const params = { Bucket: bucket, Key: mismatchObject, - Body: body, Metadata: - { 'scal-location-constraint': awsLocationMismatch } }; - return s3.putObject(params).promise(); - }) - .catch(err => { - process.stdout.write(`Error putting objects: ${err}\n`); - throw err; - }); + + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + + process.stdout.write('Putting object to mem\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: memObject, + Body: body, + Metadata: { 'scal-location-constraint': memLocation } + })); + + process.stdout.write('Putting object to file\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: fileObject, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation } + })); + + process.stdout.write('Putting object to AWS\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: awsObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation } + })); + + process.stdout.write('Putting 0-byte object to AWS\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: emptyObject, + Metadata: { 'scal-location-constraint': awsLocation } + })); + + process.stdout.write('Putting big object to AWS\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { 'scal-location-constraint': awsLocation } + })); }); - after(() => { + + after(async () => { + process.stdout.write('Emptying bucket\n'); + await bucketUtil.empty(bucket); process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket) - .catch(err => { - process.stdout.write(`Error deleting bucket: ${err}\n`); - throw err; - }); + await bucketUtil.deleteOne(bucket); }); - it('should delete object from mem', done => { - s3.deleteObject({ Bucket: bucket, Key: memObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: memObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - }); + it('should delete object from mem', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: memObject })); + + try { + await s3.send(new GetObjectCommand({ Bucket: bucket, Key: memObject })); + throw new Error('Expected NoSuchKey error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'NoSuchKey'); + } }); - it('should delete object from file', done => { - s3.deleteObject({ Bucket: bucket, Key: fileObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: fileObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - }); + + it('should delete object from file', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: fileObject })); + + try { + await s3.send(new GetObjectCommand({ Bucket: bucket, Key: fileObject })); + throw new Error('Expected NoSuchKey error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'NoSuchKey'); + } }); - it('should delete object from AWS', done => { - s3.deleteObject({ Bucket: bucket, Key: awsObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: awsObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - }); + + it('should delete an object from AWS', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: awsObject })); + + try { + await s3.send(new GetObjectCommand({ Bucket: bucket, Key: awsObject })); + throw new Error('Expected NoSuchKey error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'NoSuchKey'); + } }); - it('should delete 0-byte object from AWS', done => { - s3.deleteObject({ Bucket: bucket, Key: emptyObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: emptyObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - }); + + it('should delete 0-byte object from AWS', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: emptyObject })); + + try { + await s3.send(new GetObjectCommand({ Bucket: bucket, Key: emptyObject })); + throw new Error('Expected NoSuchKey error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'NoSuchKey'); + } }); - it('should delete large object from AWS', done => { - s3.deleteObject({ Bucket: bucket, Key: bigObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: bigObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - }); + + it('should delete large object from AWS', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: bigObject })); + + try { + await s3.send(new GetObjectCommand({ Bucket: bucket, Key: bigObject })); + throw new Error('Expected NoSuchKey error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'NoSuchKey'); + } }); - it('should delete object from AWS location with bucketMatch set to ' + - 'false', done => { - s3.deleteObject({ Bucket: bucket, Key: mismatchObject }, err => { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: mismatchObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', - 'Expected error but got success'); - done(); - }); - }); + + it('should return an InvalidLocationConstraint ' + + 'error for mismatch location', async () => { + try { + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationMismatch } + })); + throw new Error('Expected InvalidLocationConstraint error but got success'); + } catch (err) { + assert.strictEqual(err.code, 'InvalidLocationConstraint'); + } }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js index 9445721922..6173cdafa2 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js @@ -1,6 +1,13 @@ const assert = require('assert'); const async = require('async'); const { errors } = require('arsenal'); +const { + CreateBucketCommand, + DeleteObjectCommand, + DeleteObjectsCommand, + GetObjectCommand, + PutObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -63,18 +70,23 @@ function _assertDeleteResult(result, resultType, requestVersionId) { function delAndAssertResult(s3, params, cb) { const { bucket, key, versionId, resultType, resultError } = params; - return s3.deleteObject({ Bucket: bucket, Key: key, VersionId: - versionId }, (err, result) => { + return s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId + })).then(result => { if (resultError) { - assert(err, `expected ${resultError} but found no error`); - assert.strictEqual(err.code, resultError); - assert.strictEqual(err.statusCode, errors[resultError].code); - return cb(null); + assert.fail(`expected ${resultError} but got success`); } - assert.strictEqual(err, null, 'Expected success ' + - `deleting object, got error ${err}`); _assertDeleteResult(result, resultType, versionId); return cb(null, result.VersionId); + }).catch(err => { + if (resultError) { + assert.strictEqual(err.name, resultError); + assert.strictEqual(err.$metadata.httpStatusCode, errors[resultError].code); + return cb(null); + } + return cb(err); }); } @@ -89,18 +101,23 @@ function delObjectsAndAssertResult(s3, params, cb) { ], Quiet: false, }; - return s3.deleteObjects({ Bucket: bucket, Delete: deleteParams }, (err, res) => { + return s3.send(new DeleteObjectsCommand({ + Bucket: bucket, + Delete: deleteParams + })).then(res => { if (resultError) { - assert(err, `expected ${resultError} but found no error`); - assert.strictEqual(err.code, resultError); - assert.strictEqual(err.statusCode, errors[resultError].code); - return cb(null); + assert.fail(`expected ${resultError} but got success`); } - assert.strictEqual(err, null, 'Expected success ' + - `deleting object, got error ${err}`); const result = res.Deleted[0]; _assertDeleteResult(result, resultType, versionId); return cb(null, result.VersionId); + }).catch(err => { + if (resultError) { + assert.strictEqual(err.name, resultError); + assert.strictEqual(err.$metadata.httpStatusCode, errors[resultError].code); + return cb(null); + } + return cb(err); }); } @@ -120,19 +137,25 @@ function _deleteDeleteMarkers(s3, bucket, key, deleteMarkerVids, cb) { function _getAssertDeleted(s3, params, cb) { const { key, versionId, errorCode } = params; - return s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, - err => { - assert.strictEqual(err.code, errorCode); - assert.strictEqual(err.statusCode, 404); - return cb(); - }); + return s3.send(new GetObjectCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId + })).then(() => { + assert.fail('Expected error but got success'); + }).catch(err => { + assert.strictEqual(err.name, errorCode); + assert.strictEqual(err.$metadata.httpStatusCode, 404); + return cb(); + }); } +// Update AWS S3 direct calls function _awsGetAssertDeleted(params, cb) { const { key, versionId, errorCode } = params; return getAwsRetry({ key, versionId }, 0, err => { - assert.strictEqual(err.code, errorCode); - assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.name, errorCode); + assert.strictEqual(err.$metadata.httpStatusCode, 404); return cb(); }); } @@ -147,7 +170,7 @@ describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucket })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -476,11 +499,17 @@ describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + (s3vid, next) => awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), // put an object in AWS - (s3vid, awsVid, next) => awsS3.putObject({ Bucket: awsBucket, - Key: key }, err => next(err, s3vid, awsVid)), + (s3vid, awsVid, next) => awsS3.send(new PutObjectCommand({ + Bucket: awsBucket, + Key: key + })).then(() => next(null, s3vid, awsVid)) + .catch(err => next(err)), // create a delete marker in AWS - (s3vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key }, err => next(err, s3vid, awsVid)), + (s3vid, awsVid, next) => awsS3.send(new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key + })).then(() => next(null, s3vid, awsVid)) + .catch(err => next(err)), // delete original version in s3 (s3vid, awsVid, next) => delAndAssertResult(s3, { bucket, key, versionId: s3vid, resultType: deleteVersion }, @@ -504,8 +533,12 @@ describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + (s3vid, next) => awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), // delete the object in AWS - (s3vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3vid)), + (s3vid, awsVid, next) => awsS3.send(new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: awsVid + })).then(() => next(null, s3vid)) + .catch(err => next(err)), // then try to delete in S3 (s3vid, next) => delAndAssertResult(s3, { bucket, key, versionId: s3vid, resultType: deleteVersion }, @@ -533,7 +566,7 @@ describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket(createBucketParams).promise() + return s3.send(new CreateBucketCommand(createBucketParams)) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -609,7 +642,7 @@ describeSkipIfNotMultiple('AWS backend delete multiple objects w. versioning: ' process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucket })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js index 22e9d150fb..68cdae5af3 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js @@ -1,6 +1,11 @@ const assert = require('assert'); const async = require('async'); - +const { CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + GetObjectCommand, + CreateMultipartUploadCommand, + AbortMultipartUploadCommand } = require('@aws-sdk/client-s3'); const BucketUtility = require('../../../lib/utility/bucket-util'); const withV4 = require('../../support/withV4'); const { @@ -36,7 +41,7 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() + return s3.send(new CreateBucketCommand({ Bucket: azureContainerName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -60,24 +65,22 @@ function testSuite() { const keyName = uniqName(keyObject); describe(`${key.describe} size`, () => { before(done => { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: keyName, Body: key.body, Metadata: { 'scal-location-constraint': azureLocation, }, - }, done); + })).then(() => done()); }); it(`should delete an ${key.describe} object from Azure`, done => { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: azureContainerName, Key: keyName, - }, err => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); + })).then(() => { setTimeout(() => azureClient.getContainerClient(azureContainerName) .getProperties(keyName) .then(() => assert.fail('Expected error'), err => { @@ -85,6 +88,9 @@ function testSuite() { assert.strictEqual(err.code, 'NotFound'); return done(); }), azureTimeout); + }).catch(err => { + assert.equal(err, null, 'Expected success ' + + `but got error ${err}`); }); }); }); @@ -94,23 +100,21 @@ function testSuite() { () => { beforeEach(function beforeF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.currentTest.azureObject, Body: normalBody, Metadata: { 'scal-location-constraint': azureLocationMismatch, }, - }, done); + })).then(() => done()); }); it('should delete object', function itF(done) { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: azureContainerName, Key: this.test.azureObject, - }, err => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); + })).then(() => { setTimeout(() => azureClient.getContainerClient(azureContainerName) .getProperties(`${azureContainerName}/${this.test.azureObject}`) @@ -119,6 +123,9 @@ function testSuite() { assert.strictEqual(err.code, 'NotFound'); return done(); }), azureTimeout); + }).catch(err => { + assert.equal(err, null, 'Expected success ' + + `but got error ${err}`); }); }); }); @@ -126,33 +133,37 @@ function testSuite() { describe('returning no error', () => { beforeEach(function beF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.currentTest.azureObject, Body: normalBody, Metadata: { 'scal-location-constraint': azureLocation, }, - }, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + })).then(() => { azureClient.getContainerClient(azureContainerName) .deleteBlob(this.currentTest.azureObject).then(done, err => { assert.equal(err, null, 'Expected success but got ' + `error ${err}`); done(err); }); + }).catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(); }); }); it('should return no error on deleting an object deleted ' + 'from Azure', function itF(done) { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: azureContainerName, Key: this.test.azureObject, - }, err => { + })).then(() => { + done(); + }).catch(err => { assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + `error ${err}`); done(); }); }); @@ -161,32 +172,37 @@ function testSuite() { describe('Versioning:: ', () => { beforeEach(function beF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.currentTest.azureObject, Body: normalBody, Metadata: { 'scal-location-constraint': azureLocation, }, - }, done); + })).then(() => done()); }); it('should not delete object when deleting a non-existing ' + 'version from Azure', function itF(done) { async.waterfall([ - next => s3.deleteObject({ + next => s3.send(new DeleteObjectCommand({ Bucket: azureContainerName, Key: this.test.azureObject, VersionId: nonExistingId, - }, err => next(err)), - next => s3.getObject({ + })).then(() => next()) + .catch(err => { + next(err); + }), + next => s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: this.test.azureObject, - }, (err, res) => { - assert.equal(err, null, 'getObject: Expected success ' + - `but got error ${err}`); + })).then(res => { assert.deepStrictEqual(res.Body, normalBody); - return next(err); + return next(); + }).catch(err => { + assert.equal(err, null, 'getObject: Expected success ' + + `but got error ${err}`); + next(err); }), next => azureClient.getContainerClient(azureContainerName) .getBlobClient(this.test.azureObject) @@ -211,39 +227,47 @@ function testSuite() { Body: normalBody, Metadata: { 'scal-location-constraint': azureLocation }, }; - s3.putObject(params, err => { - assert.equal(err, null, 'Err putting object to Azure: ' + - `${err}`); + s3.send(new PutObjectCommand(params)).then(() => { const params = { Bucket: azureContainerName, Key: this.currentTest.key, Metadata: { 'scal-location-constraint': azureLocation }, }; - s3.createMultipartUpload(params, (err, res) => { - assert.equal(err, null, 'Err initiating MPU on ' + - `Azure: ${err}`); + s3.send(new CreateMultipartUploadCommand(params)).then(res => { this.currentTest.uploadId = res.UploadId; setTimeout(() => done(), azureTimeout); + }).catch(err => { + assert.equal(err, null, 'Err initiating MPU on ' + + `Azure: ${err}`); + done(); }); + }).catch(err => { + assert.equal(err, null, 'Err putting object to Azure: ' + + `${err}`); + done(); }); }); afterEach(function afF(done) { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: azureContainerName, Key: this.currentTest.key, UploadId: this.currentTest.uploadId, - }, err => { - assert.equal(err, null, `Err aborting MPU: ${err}`); + })).then(() => { setTimeout(() => done(), azureTimeout); + }).catch(err => { + assert.equal(err, null, `Err aborting MPU: ${err}`); + done(); }); }); it('should return InternalError', function itFn(done) { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: azureContainerName, Key: this.test.key, - }, err => { + })).then(() => { + done(); + }).catch(err => { assert.strictEqual(err.code, 'MPUinProgress'); done(); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js index d8efe4c81e..a3c962e8ac 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js @@ -1,5 +1,8 @@ const assert = require('assert'); - +const { CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + GetObjectCommand } = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { @@ -28,7 +31,7 @@ function testSuite() { process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucket })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -36,27 +39,27 @@ function testSuite() { process.stdout.write('Putting object to GCP\n'); const params = { Bucket: bucket, Key: gcpObject, Body: body, Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); + return s3.send(new PutObjectCommand(params)); }) .then(() => { process.stdout.write('Putting 0-byte object to GCP\n'); const params = { Bucket: bucket, Key: emptyObject, Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); + return s3.send(new PutObjectCommand(params)); }) .then(() => { process.stdout.write('Putting large object to GCP\n'); const params = { Bucket: bucket, Key: bigObject, Body: bigBody, Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); + return s3.send(new PutObjectCommand(params)); }) .then(() => { process.stdout.write('Putting object to GCP\n'); const params = { Bucket: bucket, Key: mismatchObject, Body: body, Metadata: { 'scal-location-constraint': gcpLocationMismatch } }; - return s3.putObject(params).promise(); + return s3.send(new PutObjectCommand(params)); }) .catch(err => { process.stdout.write(`Error putting objects: ${err}\n`); @@ -93,22 +96,24 @@ function testSuite() { ]; deleteTests.forEach(test => { const { msg, Bucket, Key } = test; - it(msg, done => s3.deleteObject({ Bucket, Key }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket, Key }, err => { + it(msg, done => s3.send(new DeleteObjectCommand({ Bucket, Key })) + .then(() => s3.send(new GetObjectCommand({ Bucket, Key })) + .then(() => { + assert.fail('Expected error but got success'); + }).catch(err => { assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); - done(); - }); - })); + return done(); + }))); }); it('should return success if the object does not exist', - done => s3.deleteObject({ Bucket: bucket, Key: 'noop' }, err => { + done => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'noop' })).then(() => { + assert.fail('Expected error but got success'); + }).catch(err => { assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); - done(); + return done(); })); }); }); From a63a47a38913885c3b869dcc35c95b09ac4d676a Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:25:08 +0200 Subject: [PATCH 02/12] get operation related tests migration Issue: CLDSRV-724 --- .../test/multipleBackend/get/get.js | 415 +++++++++++------- .../multipleBackend/get/getAwsVersioning.js | 216 +++++---- .../test/multipleBackend/get/getAzure.js | 81 ++-- .../test/multipleBackend/get/getGcp.js | 47 +- 4 files changed, 482 insertions(+), 277 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js index 23d8688b49..93d4b5c2b1 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js @@ -1,5 +1,13 @@ const assert = require('assert'); const async = require('async'); +const { + CreateBucketCommand, + GetObjectCommand, + PutObjectCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { @@ -35,7 +43,8 @@ describe('Multiple backend get object', function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + const command = new CreateBucketCommand({ Bucket: bucket }); + return s3.send(command) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -60,21 +69,31 @@ describe('Multiple backend get object', function testSuite() { it.skip('should return an error to get request without a valid ' + 'bucket name', done => { - s3.getObject({ Bucket: '', Key: 'somekey' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'MethodNotAllowed'); - done(); - }); + const command = new GetObjectCommand({ Bucket: '', Key: 'somekey' }); + s3.send(command) + .then(() => { + done(new Error('Expected failure but got success')); + }) + .catch(err => { + assert.notEqual(err, null, + 'Expected failure but got success'); + assert.strictEqual(err.name, 'MethodNotAllowed'); + done(); + }); }); it('should return NoSuchKey error when no such object', done => { - s3.getObject({ Bucket: bucket, Key: 'nope' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchKey'); - done(); - }); + const command = new GetObjectCommand({ Bucket: bucket, Key: 'nope' }); + s3.send(command) + .then(() => { + done(new Error('Expected failure but got success')); + }) + .catch(err => { + assert.notEqual(err, null, + 'Expected failure but got success'); + assert.strictEqual(err.name, 'NoSuchKey'); + done(); + }); }); describeSkipIfNotMultiple('Complete MPU then get object on AWS ' + @@ -85,46 +104,67 @@ describe('Multiple backend get object', function testSuite() { s3 = bucketUtil.s3; async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': awsLocation, - } }, (err, res) => next(err, res.UploadId)), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: this.currentTest.key, - PartNumber: 1, - UploadId: uploadId, - Body: 'helloworld' }, (err, res) => next(err, uploadId, - res.ETag)), - (uploadId, eTag, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - MultipartUpload: { - Parts: [ - { - ETag: eTag, - PartNumber: 1, - }, - ], - }, - UploadId: uploadId, - }, err => next(err)), + next => { + const command = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': awsLocation } + }); + s3.send(command) + .then(res => next(null, res.UploadId)) + .catch(err => next(err)); + }, + (uploadId, next) => { + const command = new UploadPartCommand({ + Bucket: bucket, + Key: this.currentTest.key, + PartNumber: 1, + UploadId: uploadId, + Body: 'helloworld' + }); + s3.send(command) + .then(res => next(null, uploadId, res.ETag)) + .catch(err => next(err)); + }, + (uploadId, eTag, next) => { + const command = new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: this.currentTest.key, + MultipartUpload: { + Parts: [ + { + ETag: eTag, + PartNumber: 1, + }, + ], + }, + UploadId: uploadId, + }); + s3.send(command) + .then(() => next()) + .catch(err => next(err)); + }, ], done); }); it('should get object from MPU on AWS ' + 'location with bucketMatch: true ', function it(done) { - s3.getObject({ + const command = new GetObjectCommand({ Bucket: bucket, Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ContentLength, 10); - assert.strictEqual(res.Body.toString(), 'helloworld'); - assert.deepStrictEqual(res.Metadata, - { 'scal-location-constraint': awsLocation }); - return done(err); }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ContentLength, 10); + assert.strictEqual(res.Body.toString(), 'helloworld'); + assert.deepStrictEqual(res.Metadata, + { 'scal-location-constraint': awsLocation }); + done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); + }); }); }); @@ -136,47 +176,67 @@ describe('Multiple backend get object', function testSuite() { s3 = bucketUtil.s3; async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - awsLocationMismatch, - } }, (err, res) => next(err, res.UploadId)), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: this.currentTest.key, - PartNumber: 1, - UploadId: uploadId, - Body: 'helloworld' }, (err, res) => next(err, uploadId, - res.ETag)), - (uploadId, eTag, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - MultipartUpload: { - Parts: [ - { - ETag: eTag, - PartNumber: 1, - }, - ], - }, - UploadId: uploadId, - }, err => next(err)), + next => { + const command = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': awsLocationMismatch } + }); + s3.send(command) + .then(res => next(null, res.UploadId)) + .catch(err => next(err)); + }, + (uploadId, next) => { + const command = new UploadPartCommand({ + Bucket: bucket, + Key: this.currentTest.key, + PartNumber: 1, + UploadId: uploadId, + Body: 'helloworld' + }); + s3.send(command) + .then(res => next(null, uploadId, res.ETag)) + .catch(err => next(err)); + }, + (uploadId, eTag, next) => { + const command = new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: this.currentTest.key, + MultipartUpload: { + Parts: [ + { + ETag: eTag, + PartNumber: 1, + }, + ], + }, + UploadId: uploadId, + }); + s3.send(command) + .then(() => next()) + .catch(err => next(err)); + }, ], done); }); it('should get object from MPU on AWS ' + 'location with bucketMatch: false ', function it(done) { - s3.getObject({ + const command = new GetObjectCommand({ Bucket: bucket, Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ContentLength, 10); - assert.strictEqual(res.Body.toString(), 'helloworld'); - assert.deepStrictEqual(res.Metadata, - { 'scal-location-constraint': awsLocationMismatch }); - return done(err); }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ContentLength, 10); + assert.strictEqual(res.Body.toString(), 'helloworld'); + assert.deepStrictEqual(res.Metadata, + { 'scal-location-constraint': awsLocationMismatch }); + done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); + }); }); }); @@ -184,50 +244,60 @@ describe('Multiple backend get object', function testSuite() { '(mem/file/AWS)', () => { before(() => { process.stdout.write('Putting object to mem\n'); - return s3.putObject({ Bucket: bucket, Key: memObject, + const memCommand = new PutObjectCommand({ + Bucket: bucket, + Key: memObject, Body: body, Metadata: { 'scal-location-constraint': memLocation }, - }).promise() + }); + return s3.send(memCommand) .then(() => { process.stdout.write('Putting object to file\n'); - return s3.putObject({ Bucket: bucket, + const fileCommand = new PutObjectCommand({ + Bucket: bucket, Key: fileObject, Body: body, - Metadata: - { 'scal-location-constraint': fileLocation }, - }).promise(); + Metadata: { 'scal-location-constraint': fileLocation }, + }); + return s3.send(fileCommand); }) .then(() => { process.stdout.write('Putting object to AWS\n'); - return s3.putObject({ Bucket: bucket, Key: awsObject, + const awsCommand = new PutObjectCommand({ + Bucket: bucket, + Key: awsObject, Body: body, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); + Metadata: { 'scal-location-constraint': awsLocation }, + }); + return s3.send(awsCommand); }) .then(() => { process.stdout.write('Putting 0-byte object to mem\n'); - return s3.putObject({ Bucket: bucket, + const emptyCommand = new PutObjectCommand({ + Bucket: bucket, Key: emptyObject, - Metadata: - { 'scal-location-constraint': memLocation }, - }).promise(); + Metadata: { 'scal-location-constraint': memLocation }, + }); + return s3.send(emptyCommand); }) .then(() => { process.stdout.write('Putting 0-byte object to AWS\n'); - return s3.putObject({ Bucket: bucket, + const emptyAwsCommand = new PutObjectCommand({ + Bucket: bucket, Key: emptyAwsObject, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); + Metadata: { 'scal-location-constraint': awsLocation }, + }); + return s3.send(emptyAwsCommand); }) .then(() => { process.stdout.write('Putting large object to AWS\n'); - return s3.putObject({ Bucket: bucket, - Key: bigObject, Body: bigBody, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); + const bigCommand = new PutObjectCommand({ + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }); + return s3.send(bigCommand); }) .catch(err => { process.stdout.write(`Error putting objects: ${err}\n`); @@ -235,90 +305,131 @@ describe('Multiple backend get object', function testSuite() { }); }); it('should get an object from mem', done => { - s3.getObject({ Bucket: bucket, Key: memObject }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + const command = new GetObjectCommand({ Bucket: bucket, Key: memObject }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); + }); }); it('should get a 0-byte object from mem', done => { - s3.getObject({ Bucket: bucket, Key: emptyObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - done(); - }); + const command = new GetObjectCommand({ Bucket: bucket, Key: emptyObject }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ETag, `"${emptyMD5}"`); + done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); + }); }); it('should get a 0-byte object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: emptyAwsObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got error ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - done(); - }); + const command = new GetObjectCommand({ Bucket: bucket, Key: emptyAwsObject }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ETag, `"${emptyMD5}"`); + done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got error ' + + `error ${err}`); + done(err); + }); }); it('should get an object from file', done => { - s3.getObject({ Bucket: bucket, Key: fileObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + const command = new GetObjectCommand({ Bucket: bucket, Key: fileObject }); + s3.send(command) + .then(res => { assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); it('should get an object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: awsObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + const command = new GetObjectCommand({ Bucket: bucket, Key: awsObject }); + s3.send(command) + .then(res => { assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); it('should get a large object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: bigObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + const command = new GetObjectCommand({ Bucket: bucket, Key: bigObject }); + s3.send(command) + .then(res => { assert.strictEqual(res.ETag, `"${bigMD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); it('should get an object using range query from AWS', done => { - s3.getObject({ Bucket: bucket, Key: bigObject, - Range: 'bytes=0-9' }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + const command = new GetObjectCommand({ + Bucket: bucket, + Key: bigObject, + Range: 'bytes=0-9' + }); + s3.send(command) + .then(res => { assert.strictEqual(res.ContentLength, 10); assert.strictEqual(res.ContentRange, `bytes 0-9/${bigBodyLen}`); assert.strictEqual(res.ETag, `"${bigMD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); }); - describeSkipIfNotMultiple('with bucketMatch set to false', () => { beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: mismatchObject, Body: body, - Metadata: { 'scal-location-constraint': awsLocationMismatch } }, - err => { - assert.equal(err, null, `Err putting object: ${err}`); - done(); + const command = new PutObjectCommand({ + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationMismatch } }); + s3.send(command) + .then(() => done()) + .catch(err => { + assert.equal(err, null, `Err putting object: ${err}`); + done(err); + }); }); it('should get an object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: mismatchObject }, - (err, res) => { - assert.equal(err, null, `Error getting object: ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + const command = new GetObjectCommand({ Bucket: bucket, Key: mismatchObject }); + s3.send(command) + .then(res => { + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }) + .catch(err => { + assert.equal(err, null, `Error getting object: ${err}`); + done(err); + }); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js index fd17cef45e..5f514e2bca 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js @@ -1,6 +1,10 @@ const assert = require('assert'); const async = require('async'); const withV4 = require('../../support/withV4'); +const { GetObjectCommand, + PutObjectCommand, + CreateBucketCommand, + DeleteObjectCommand } = require('@aws-sdk/client-s3'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { awsS3, @@ -22,17 +26,26 @@ const bucket = `getawsversioning${genUniqID()}`; function getAndAssertVersions(s3, bucket, key, versionIds, expectedData, cb) { async.mapSeries(versionIds, (versionId, next) => { - s3.getObject({ Bucket: bucket, Key: key, - VersionId: versionId }, next); + s3.send(new GetObjectCommand({ Bucket: bucket, Key: key, + VersionId: versionId })).then(async result => { + const resultBody = await result.Body.transformToString(); + next(null, { + VersionId: result.VersionId, + Body: resultBody + }); + }) + .catch(err => { + next(err); + }); }, (err, results) => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object, got error ${err}`); + if (err) { + return cb(err); + } const resultIds = results.map(result => result.VersionId); - const resultData = results.map(result => - result.Body.toString()); + const resultData = results.map(result => result.Body); assert.deepStrictEqual(resultIds, versionIds); assert.deepStrictEqual(resultData, expectedData); - cb(); + return cb(); }); } @@ -47,7 +60,7 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucket })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -71,28 +84,30 @@ function testSuite() { it('should not return version ids when versioning has not been ' + 'configured via CloudServer', done => { const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(data => { assert.strictEqual(data.VersionId, undefined); getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: false }, done); + }).catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + done(); }); }); it('should not return version ids when versioning has not been ' + 'configured via CloudServer, even when version id specified', done => { const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(data => { assert.strictEqual(data.VersionId, undefined); getAndAssertResult(s3, { bucket, key, body: someBody, versionId: 'null', expectedVersionId: false }, done); + }).catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + done(); }); }); @@ -100,9 +115,13 @@ function testSuite() { 'has been configured via CloudServer', done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); + }), next => enableVersioning(s3, bucket, next), // get with version id specified next => getAndAssertResult(s3, { bucket, key, body: someBody, @@ -133,13 +152,21 @@ function testSuite() { const key = `somekey-${genUniqID()}`; const data = ['data1', 'data2']; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); + }), next => suspendVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[1], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[1], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); + }), // get latest version next => getAndAssertResult(s3, { bucket, key, body: data[1], expectedVersionId: 'null' }, next), @@ -155,23 +182,32 @@ function testSuite() { const data = [...Array(3).keys()].map(i => `data${i}`); let firstVersionId; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); + }), next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[1], - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, result) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[1], + Metadata: { 'scal-location-constraint': awsLocation } })).then(result => { assert.notEqual(result.VersionId, 'null'); firstVersionId = result.VersionId; next(); + }).catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); }), next => suspendVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[3], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[3], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + next(err); + }), // get latest version next => getAndAssertResult(s3, { bucket, key, body: data[3], expectedVersionId: 'null' }, next), @@ -191,9 +227,11 @@ function testSuite() { const data = [...Array(5).keys()].map(i => i.toString()); const versionIds = ['null']; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + next(err); + }), next => putVersionsToAws(s3, bucket, key, data.slice(1), next), (ids, next) => { versionIds.push(...ids); @@ -210,9 +248,11 @@ function testSuite() { const data = [...Array(5).keys()].map(i => i.toString()); const versionIds = ['null']; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation } })).then(() => next()) + .catch(err => { + next(err); + }), next => putVersionsToAws(s3, bucket, key, data.slice(1), next), (ids, next) => { versionIds.push(...ids); @@ -276,9 +316,11 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(res => next(null, res.VersionId)) + .catch(err => { + next(err); + }), // create a delete marker in AWS (versionId, next) => awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => next(err, versionId)), @@ -293,12 +335,17 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(res => next(null, res.VersionId)) + .catch(err => { + next(err); + }), // put an object in AWS - (versionId, next) => awsS3.putObject({ Bucket: awsBucket, - Key: key }, err => next(err, versionId)), + (versionId, next) => awsS3.send(new PutObjectCommand({ Bucket: awsBucket, + Key: key })).then(() => next(null, versionId)) + .catch(err => { + next(err); + }), (versionId, next) => getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: versionId }, next), ], done); @@ -310,19 +357,27 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(res => next(null, res.VersionId)) + .catch(err => { + next(err); + }), // get the latest version id in aws - (s3vid, next) => awsS3.getObject({ Bucket: awsBucket, - Key: key }, (err, res) => next(err, s3vid, res.VersionId)), - (s3VerId, awsVerId, next) => awsS3.deleteObject({ - Bucket: awsBucket, Key: key, VersionId: awsVerId }, - err => next(err, s3VerId)), - (s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key }, - err => { - assert.strictEqual(err.code, 'LocationNotFound'); - assert.strictEqual(err.statusCode, 424); + (s3vid, next) => awsS3.send(new GetObjectCommand({ Bucket: awsBucket, + Key: key })).then(res => next(null, s3vid, res.VersionId)) + .catch(err => { + next(err); + }), + (s3VerId, awsVerId, next) => awsS3.send(new DeleteObjectCommand({ + Bucket: awsBucket, Key: key, VersionId: awsVerId })).then(() => next(null, s3VerId)) + .catch(err => { + next(err); + }), + (s3VerId, next) => s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })) + .then(res => next(null, s3VerId, res.VersionId)) + .catch(err => { + assert.strictEqual(err.name, 'LocationNotFound'); + assert.strictEqual(err.$metadata.httpStatusCode, 424); next(); }), ], done); @@ -334,19 +389,28 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation } })).then(res => next(null, res.VersionId)) + .catch(err => { + next(err); + }), // get the latest version id in aws - (s3vid, next) => awsS3.getObject({ Bucket: awsBucket, - Key: key }, (err, res) => next(err, s3vid, res.VersionId)), - (s3VerId, awsVerId, next) => awsS3.deleteObject({ - Bucket: awsBucket, Key: key, VersionId: awsVerId }, - err => next(err, s3VerId)), - (s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key, - VersionId: s3VerId }, err => { - assert.strictEqual(err.code, 'LocationNotFound'); - assert.strictEqual(err.statusCode, 424); + (s3vid, next) => awsS3.send(new GetObjectCommand({ Bucket: awsBucket, + Key: key })).then(res => next(null, s3vid, res.VersionId)) + .catch(err => { + next(err); + }), + (s3VerId, awsVerId, next) => awsS3.send(new DeleteObjectCommand({ + Bucket: awsBucket, Key: key, VersionId: awsVerId })).then(() => next(null, s3VerId)) + .catch(err => { + next(err); + }), + (s3VerId, next) => s3.send(new GetObjectCommand({ Bucket: bucket, Key: key, + VersionId: s3VerId })).then(() => { + next(); + }).catch(err => { + assert.strictEqual(err.name, 'LocationNotFound'); + assert.strictEqual(err.$metadata.httpStatusCode, 424); next(); }), ], done); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js index d38a1e7069..b874096de5 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js @@ -1,4 +1,7 @@ const assert = require('assert'); +const { CreateBucketCommand, + PutObjectCommand, + GetObjectCommand } = require('@aws-sdk/client-s3'); const BucketUtility = require('../../../lib/utility/bucket-util'); const withV4 = require('../../support/withV4'); @@ -31,7 +34,7 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() + return s3.send(new CreateBucketCommand({ Bucket: azureContainerName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -56,26 +59,30 @@ function testSuite() { const testKey = `${key.name}-${Date.now()}`; before(done => { setTimeout(() => { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: testKey, Body: key.body, Metadata: { 'scal-location-constraint': azureLocation, }, - }, done); + })).then(() => done()) + .catch(err => { + done(err); + }); }, azureTimeout); }); it(`should get an ${key.describe} object from Azure`, done => { - s3.getObject({ Bucket: azureContainerName, Key: - testKey }, - (err, res) => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); - assert.strictEqual(res.ETag, `"${key.MD5}"`); - done(); - }); + s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: + testKey })).then(res => { + assert.strictEqual(res.ETag, `"${key.MD5}"`); + done(); + }).catch(err => { + assert.equal(err, null, 'Expected success ' + + `but got error ${err}`); + done(err); + }); }); }); }); @@ -83,44 +90,53 @@ function testSuite() { describe('with range', () => { const azureObject = uniqName(keyObject); before(done => { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: azureObject, Body: '0123456789', Metadata: { 'scal-location-constraint': azureLocation, }, - }, done); + })).then(() => done()) + .catch(err => { + done(err); + }); }); it('should get an object with body 012345 with "bytes=0-5"', done => { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: azureObject, Range: 'bytes=0-5', - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + })).then(async res => { + const body = await res.Body.transformToString(); assert.equal(res.ContentLength, 6); assert.strictEqual(res.ContentRange, 'bytes 0-5/10'); - assert.strictEqual(res.Body.toString(), '012345'); + assert.strictEqual(body, '012345'); done(); + }).catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); it('should get an object with body 456789 with "bytes=4-"', done => { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: azureObject, Range: 'bytes=4-', - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + })).then(async res => { + const body = await res.Body.transformToString(); assert.equal(res.ContentLength, 6); assert.strictEqual(res.ContentRange, 'bytes 4-9/10'); - assert.strictEqual(res.Body.toString(), '456789'); + assert.strictEqual(body, '456789'); done(); + }).catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); }); @@ -128,33 +144,38 @@ function testSuite() { describe('returning error', () => { const azureObject = uniqName(keyObject); before(done => { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: azureObject, Body: normalBody, Metadata: { 'scal-location-constraint': azureLocation, }, - }, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + })).then(() => { azureClient.getContainerClient(azureContainerName) .deleteBlob(azureObject).then(done, err => { assert.equal(err, null, 'Expected success but got ' + `error ${err}`); done(err); }); + }) + .catch(err => { + assert.equal(err, null, 'Expected success but got ' + + `error ${err}`); + done(err); }); }); it('should return an error on get done to object deleted ' + 'from Azure', done => { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: azureObject, - }, err => { - assert.strictEqual(err.code, 'LocationNotFound'); + })).then(() => { done(); + }).catch(err => { + assert.strictEqual(err.name, 'LocationNotFound'); + done(err); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js index 28234c78e9..fdb3f585de 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js @@ -1,5 +1,8 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); +const { PutObjectCommand, + GetObjectCommand, + CreateBucketCommand } = require('@aws-sdk/client-s3'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { describeSkipIfNotMultipleOrCeph, @@ -29,7 +32,7 @@ describe('Multiple backend get object', function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucket })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -53,28 +56,28 @@ describe('Multiple backend get object', function testSuite() { describeSkipIfNotMultipleOrCeph('with objects in GCP', () => { before(() => { process.stdout.write('Putting object to GCP\n'); - return s3.putObject({ Bucket: bucket, Key: gcpObject, + return s3.send(new PutObjectCommand({ Bucket: bucket, Key: gcpObject, Body: body, Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise() + }) .then(() => { process.stdout.write('Putting 0-byte object to GCP\n'); - return s3.putObject({ Bucket: bucket, + return s3.send(new PutObjectCommand({ Bucket: bucket, Key: emptyGcpObject, Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise(); + })); }) .then(() => { process.stdout.write('Putting large object to GCP\n'); - return s3.putObject({ Bucket: bucket, + return s3.send(new PutObjectCommand({ Bucket: bucket, Key: bigObject, Body: bigBody, Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise(); + })); }) .catch(err => { process.stdout.write(`Error putting objects: ${err}\n`); throw err; - }); + })); }); const getTests = [ @@ -108,16 +111,18 @@ describe('Multiple backend get object', function testSuite() { const { Bucket, Key, range, size } = test.input; const { MD5, contentRange } = test.output; it(test.msg, done => { - s3.getObject({ Bucket, Key, Range: range }, - (err, res) => { - assert.equal(err, null, - `Expected success but got error ${err}`); + s3.send(new GetObjectCommand({ Bucket, Key, Range: range })).then(res => { if (range) { assert.strictEqual(res.ContentLength, size); assert.strictEqual(res.ContentRange, contentRange); } assert.strictEqual(res.ETag, `"${MD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, + `Expected success but got error ${err}`); + done(err); }); }); }); @@ -125,20 +130,24 @@ describe('Multiple backend get object', function testSuite() { describeSkipIfNotMultipleOrCeph('with bucketMatch set to false', () => { beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: mismatchObject, Body: body, - Metadata: { 'scal-location-constraint': gcpLocationMismatch } }, - err => { - assert.equal(err, null, `Err putting object: ${err}`); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: mismatchObject, Body: body, + Metadata: { 'scal-location-constraint': gcpLocationMismatch } })).then(() => { done(); + }) + .catch(err => { + assert.equal(err, null, `Err putting object: ${err}`); + done(err); }); }); it('should get an object from GCP', done => { - s3.getObject({ Bucket: bucket, Key: mismatchObject }, - (err, res) => { - assert.equal(err, null, `Error getting object: ${err}`); + s3.send(new GetObjectCommand({ Bucket: bucket, Key: mismatchObject })).then(res => { assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); + }) + .catch(err => { + assert.equal(err, null, `Error getting object: ${err}`); + done(err); }); }); }); From e166ad87123ad2caad7a3bef047b87b1fedc66be Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:32:33 +0200 Subject: [PATCH 03/12] put operation related tests migration Issue: CLDSRV-724 --- .../test/multipleBackend/put/put.js | 572 ++++++++---------- .../test/multipleBackend/put/putAzure.js | 244 ++++---- 2 files changed, 410 insertions(+), 406 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js b/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js index 7def7262f1..4b6c41dd41 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js @@ -1,6 +1,12 @@ -const { promisify } = require('util'); const assert = require('assert'); -const async = require('async'); +const { + PutObjectCommand, + GetObjectCommand, + HeadObjectCommand, + PutBucketVersioningCommand, + CreateBucketCommand, + GetBucketLocationCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -24,98 +30,89 @@ const bigAWSMD5 = 'a7d414b9133d6483d9a1c4e04e856e3b-2'; let bucketUtil; let s3; -const retryTimeout = 10000; - -function getAwsSuccess(key, awsMD5, location, cb) { - return getAwsRetry({ key }, 0, (err, res) => { - assert.strictEqual(err, null, 'Expected success, got error ' + - `on direct AWS call: ${err}`); - if (location === awsLocationEncryption) { - // doesn't check ETag because it's different - // with every PUT with encryption - assert.strictEqual(res.ServerSideEncryption, 'AES256'); - } - if (process.env.ENABLE_KMS_ENCRYPTION !== 'true') { - assert.strictEqual(res.ETag, `"${awsMD5}"`); - } - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); - return cb(res); +async function getAwsSuccess(key, awsMD5, location) { + return new Promise((resolve, reject) => { + getAwsRetry({ key }, 0, (err, res) => { + if (err) { + reject(new Error(`Expected success, got error on direct AWS call: ${err}`)); + return; + } + + if (location === awsLocationEncryption) { + // doesn't check ETag because it's different + // with every PUT with encryption + assert.strictEqual(res.ServerSideEncryption, 'AES256'); + } + if (process.env.ENABLE_KMS_ENCRYPTION !== 'true') { + assert.strictEqual(res.ETag, `"${awsMD5}"`); + } + assert.strictEqual(res.Metadata['scal-location-constraint'], + location); + resolve(res); + }); }); } -function getAwsError(key, expectedError, cb) { - return getAwsRetry({ key }, 0, err => { - assert.notStrictEqual(err, undefined, - 'Expected error but did not find one'); - assert.strictEqual(err.code, expectedError, - `Expected error code ${expectedError} but got ${err.code}`); - cb(); +async function getAwsError(key, expectedError) { + return new Promise((resolve, reject) => { + getAwsRetry({ key }, 0, err => { + try { + assert.notStrictEqual(err, undefined, + 'Expected error but did not find one'); + assert.strictEqual(err.name, expectedError); + resolve(); + } catch (assertionError) { + reject(assertionError); + } + }); }); } -function awsGetCheck(objectKey, s3MD5, awsMD5, location, cb) { - process.stdout.write('Getting object\n'); - s3.getObject({ Bucket: bucket, Key: objectKey }, - function s3GetCallback(err, res) { - if (err && err.code === 'NetworkingError') { - return setTimeout(() => { - process.stdout.write('Getting object retry\n'); - s3.getObject({ Bucket: bucket, Key: objectKey }, s3GetCallback); - }, retryTimeout); - } - assert.strictEqual(err, null, 'Expected success, got error ' + - `on call to AWS through S3: ${err}`); - assert.strictEqual(res.ETag, `"${s3MD5}"`); - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); - process.stdout.write('Getting object from AWS\n'); - return getAwsSuccess(objectKey, awsMD5, location, cb); - }); +async function awsGetCheck(objectKey, s3MD5, awsMD5, location) { + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: objectKey })); + assert.strictEqual(res.ETag, `"${s3MD5}"`); + + if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { + assert.strictEqual(res.ServerSideEncryption, 'AES256'); + } + + process.stdout.write('Getting object from AWS\n'); + return await getAwsSuccess(objectKey, awsMD5, location); } -describe('MultipleBackend put object', function testSuite() { + +describeSkipIfNotMultiple('MultipleBackend put object', function testSuite() { this.timeout(250000); withV4(sigCfg => { - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; process.stdout.write('Creating bucket\n'); - s3.createBucketPromise = promisify(s3.createBucket); + if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - s3.createBucketPromise = createEncryptedBucketPromise; + await createEncryptedBucketPromise({ Bucket: bucket }); + } else { + await s3.send(new CreateBucketCommand({ Bucket: bucket })); } - return s3.createBucketPromise({ Bucket: bucket }) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + await bucketUtil.empty(bucket); + await bucketUtil.deleteOne(bucket); }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error it.skip('should return an error to put request without a valid ' + 'bucket name', - done => { + async () => { const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: '', Key: key }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); + try { + await s3.send(new PutObjectCommand({ Bucket: '', Key: key })); + throw new Error('Expected failure but got success'); + } catch (err) { assert.strictEqual(err.code, 'MethodNotAllowed'); - done(); - }); + } }); describeSkipIfNotMultiple('with set location from "x-amz-meta-scal-' + @@ -125,56 +122,51 @@ describe('MultipleBackend put object', function testSuite() { } it('should return an error to put request without a valid ' + - 'location constraint', done => { + 'location constraint', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': 'fail-region' } }; - s3.putObject(params, err => { - assert.notEqual(err, null, 'Expected failure but got ' + - 'success'); + try { + await s3.send(new PutObjectCommand(params)); + throw new Error('Expected failure but got success'); + } catch (err) { assert.strictEqual(err.code, 'InvalidArgument'); - done(); - }); + } }); - it('should put an object to mem', done => { + it('should put an object to mem', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': memLocation }, }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); }); - it('should put a 0-byte object to mem', done => { + it('should put a 0-byte object to mem', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Metadata: { 'scal-location-constraint': memLocation }, }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - done(); - }); + + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${emptyMD5}"`); }); - it('should put only metadata to mem with mdonly header', done => { + it('should put only metadata to mem with mdonly header', async () => { const key = `mdonly-${genUniqID()}`; const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); const params = { Bucket: bucket, Key: key, @@ -183,19 +175,17 @@ describe('MultipleBackend put object', function testSuite() { 'md5chksum': b64, 'size': body.length.toString(), } }; - s3.putObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - s3.headObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - getAwsError(key, 'NoSuchKey', () => done()); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new HeadObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + await getAwsError(key, 'NoSuchKey'); }); - it('should put actual object with body and mdonly header', done => { + it('should put actual object with body and mdonly header', async () => { const key = `mdonly-${genUniqID()}`; const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); const params = { Bucket: bucket, Key: key, Body: body, @@ -204,20 +194,17 @@ describe('MultipleBackend put object', function testSuite() { 'md5chksum': b64, 'size': body.length.toString(), } }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - awsGetCheck(key, correctMD5, correctMD5, awsLocation, - () => done()); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + await awsGetCheck(key, correctMD5, correctMD5, awsLocation); }); - it('should put 0-byte normally with mdonly header', done => { + it('should put 0-byte normally with mdonly header', async () => { const key = `mdonly-${genUniqID()}`; const b64 = Buffer.from(emptyMD5, 'hex').toString('base64'); const params = { Bucket: bucket, Key: key, @@ -226,196 +213,188 @@ describe('MultipleBackend put object', function testSuite() { 'md5chksum': b64, 'size': '0', } }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, - () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, emptyMD5, emptyMD5, awsLocation); }); - it('should put a 0-byte object to AWS', done => { + it('should put a 0-byte object to AWS', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Metadata: { 'scal-location-constraint': awsLocation }, }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, - () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, emptyMD5, emptyMD5, awsLocation); }); - it('should put an object to file', done => { + it('should put an object to file', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': fileLocation }, }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); }); - it('should put an object to AWS', done => { + it('should put an object to AWS', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, awsLocation, - () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, correctMD5, correctMD5, awsLocation); }); it('should encrypt body only if bucket encrypted putting ' + 'object to AWS', - done => { + async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return getAwsSuccess(key, correctMD5, awsLocation, - () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await getAwsSuccess(key, correctMD5, awsLocation); }); - it('should put an object to AWS with encryption', done => { + it('should put an object to AWS with encryption', async () => { // Test refuses to skip using itSkipCeph so just mark it passed if (isCEPH) { - return done(); + return; } const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocationEncryption } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocationEncryption, () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, correctMD5, correctMD5, + awsLocationEncryption); }); it('should return a version id putting object to ' + - 'to AWS with versioning enabled', done => { + 'to AWS with versioning enabled', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation } }; - async.waterfall([ - next => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => next(err)), - next => s3.putObject(params, (err, res) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - assert(res.VersionId); - next(null, res.ETag); - }), - (eTag, next) => getAwsSuccess(key, correctMD5, awsLocation, - () => next()), - ], done); + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + })); + const res = await s3.send(new PutObjectCommand(params)); + assert.strictEqual(res.VersionId); + await getAwsSuccess(key, correctMD5, awsLocation); }); - it('should put a large object to AWS', done => { + it('should put a large object to AWS', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: bigBody, Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected sucess, ' + - `got error ${err}`); - return awsGetCheck(key, bigS3MD5, bigAWSMD5, awsLocation, - () => done()); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, bigS3MD5, bigAWSMD5, awsLocation); }); it('should put objects with same key to AWS ' + - 'then file, and object should only be present in file', done => { + 'then file, and object should only be present in file', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = - { 'scal-location-constraint': fileLocation }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual( - res.Metadata['scal-location-constraint'], - fileLocation); - return getAwsError(key, 'NoSuchKey', done); - }); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); + }); + params.Metadata = + { 'scal-location-constraint': fileLocation }; + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual( + res.Metadata['scal-location-constraint'], + fileLocation); + return await getAwsError(key, 'NoSuchKey'); }); it('should put objects with same key to file ' + - 'then AWS, and object should only be present on AWS', done => { + 'then AWS, and object should only be present on AWS', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': fileLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { - 'scal-location-constraint': awsLocation }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocation, () => done()); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + params.Metadata = { + 'scal-location-constraint': awsLocation }; + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); + }); + await awsGetCheck(key, correctMD5, correctMD5, + awsLocation); }); it('should put two objects to AWS with same ' + - 'key, and newest object should be returned', done => { + 'key, and newest object should be returned', async () => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation, 'unique-header': 'first object' } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { 'scal-location-constraint': awsLocation, + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); + }); + params.Metadata = { 'scal-location-constraint': awsLocation, 'unique-header': 'second object' }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocation, result => { - assert.strictEqual(result.Metadata - ['unique-header'], 'second object'); - done(); - }); - }); + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, correctMD5, correctMD5, + awsLocation, result => { + assert.strictEqual(result.Metadata + ['unique-header'], 'second object'); + }); }); }); }); @@ -429,9 +408,9 @@ describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', s3 = bucketUtil.s3; }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) + await bucketUtil.empty(bucket) .then(() => { process.stdout.write('Deleting bucket\n'); return bucketUtil.deleteOne(bucket); @@ -443,72 +422,60 @@ describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', }); it('should put an object to mem with no location header', - done => { + async () => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, + await s3.send(new CreateBucketCommand({ Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: memLocation, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); - }); + })); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); }); - it('should put an object to file with no location header', done => { + it('should put an object to file with no location header', async () => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, + await s3.send(new CreateBucketCommand({ Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: fileLocation, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); - }); + })); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, `"${correctMD5}"`); }); - it('should put an object to AWS with no location header', done => { + it('should put an object to AWS with no location header', async () => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, + await s3.send(new CreateBucketCommand({ Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: awsLocation, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, undefined, - () => done()); - }); + })); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + await s3.send(new PutObjectCommand(params)).then(() => { + process.stdout.write('Putting object succeeded\n'); + }).catch(err => { + throw new Error(`Expected success, got error: ${err}`); }); + await awsGetCheck(key, correctMD5, correctMD5, undefined); }); }); }); @@ -532,38 +499,33 @@ describe('MultipleBackend put based on request endpoint', () => { }); }); - it('should create bucket in corresponding backend', done => { + it('should create bucket in corresponding backend', async () => { process.stdout.write('Creating bucket'); - const request = s3.createBucket({ Bucket: bucket }); - request.on('build', () => { - request.httpRequest.body = ''; - }); - request.send(err => { - assert.strictEqual(err, null, `Error creating bucket: ${err}`); - const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: bucket, Key: key, Body: body }, err => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); - const host = request.service.endpoint.hostname; - let endpoint = config.restEndpoints[host]; - // s3 returns '' for us-east-1 - if (endpoint === 'us-east-1') { - endpoint = ''; - } - s3.getBucketLocation({ Bucket: bucket }, (err, data) => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(data.LocationConstraint, endpoint); - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); - }); - }); - }); + + // Create bucket using AWS SDK v3 + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + + const key = `somekey-${genUniqID()}`; + + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body + })); + const locationData = await s3.send(new GetBucketLocationCommand({ Bucket: bucket })); + const objectData = await s3.send(new GetObjectCommand({ + Bucket: bucket, + Key: key + })); + const host = s3.config.endpoint.hostname; + let endpoint = config.restEndpoints[host]; + // s3 returns '' for us-east-1 + if (endpoint === 'us-east-1') { + endpoint = ''; + } + + assert.strictEqual(locationData.LocationConstraint, endpoint); + assert.strictEqual(objectData.ETag, `"${correctMD5}"`); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js index 4f8e590e5c..248b056af1 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js @@ -1,6 +1,11 @@ const assert = require('assert'); const async = require('async'); - +const { CreateBucketCommand, + PutObjectCommand, + GetObjectCommand, + CreateMultipartUploadCommand, + AbortMultipartUploadCommand, + PutBucketVersioningCommand } = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { @@ -35,16 +40,14 @@ let bucketUtil; let s3; function azureGetCheck(objectKey, azureMD5, azureMetadata, cb) { - azureClient.getContainerClient(azureContainerName).getProperties(objectKey).then(res => { - const resMD5 = convertMD5(res.contentSettings.contentMD5); - assert.strictEqual(resMD5, azureMD5); - assert.deepStrictEqual(res.metadata, azureMetadata); - return cb(); - }, err => { - assert.strictEqual(err, null, 'Expected success, got error ' + - `on call to Azure: ${err}`); - return cb(); - }); + azureClient.getContainerClient(azureContainerName).getBlobClient(objectKey).getProperties() + .then(res => { + const resMD5 = convertMD5(res.contentSettings.contentMD5); + assert.strictEqual(resMD5, azureMD5); + assert.deepStrictEqual(res.metadata, azureMetadata); + return cb(); + }) + .catch(err => cb(err)); } describeSkipIfNotMultipleOrCeph('MultipleBackend put object to AZURE', function @@ -70,12 +73,16 @@ describeF() { }); }); describe('with bucket location header', () => { - beforeEach(done => - s3.createBucket({ Bucket: azureContainerName, - CreateBucketConfiguration: { - LocationConstraint: azureLocation, - }, - }, done)); + beforeEach(done => { + s3.send(new CreateBucketCommand({ + Bucket: azureContainerName, + CreateBucketConfiguration: { + LocationConstraint: azureLocation, + }, + })) + .then(() => done()) + .catch(done); + }); it('should return a NotImplemented error if try to put ' + 'versioning to bucket with Azure location', done => { @@ -85,10 +92,14 @@ describeF() { Status: 'Enabled', }, }; - s3.putBucketVersioning(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); + s3.send(new PutBucketVersioningCommand(params)) + .then(() => { + done(new Error('Expected NotImplemented error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'NotImplemented'); + done(); + }); }); it('should put an object to Azure, with no object location ' + @@ -99,8 +110,11 @@ describeF() { Body: normalBody, }; async.waterfall([ - next => s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)), + next => { + s3.send(new PutObjectCommand(params)) + .then(() => setTimeout(() => next(), azureTimeout)) + .catch(next); + }, next => azureGetCheck(this.test.keyName, normalMD5, {}, next), ], done); @@ -109,7 +123,7 @@ describeF() { describe('with no bucket location header', () => { beforeEach(() => - s3.createBucket({ Bucket: azureContainerName }).promise() + s3.send(new CreateBucketCommand({ Bucket: azureContainerName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -124,14 +138,14 @@ describeF() { Metadata: { 'scal-location-constraint': azureLocation }, Body: key.body, }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - setTimeout(() => - azureGetCheck(this.test.keyName, - key.MD5, azureMetadata, - () => done()), azureTimeout); - }); + s3.send(new PutObjectCommand(params)) + .then(() => { + setTimeout(() => + azureGetCheck(this.test.keyName, + key.MD5, azureMetadata, + () => done()), azureTimeout); + }) + .catch(done); }); }); @@ -149,15 +163,15 @@ describeF() { scal_location_constraint: azureLocationMismatch, /* eslint-enable camelcase */ }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - setTimeout(() => - azureGetCheck( - `${azureContainerName}/${this.test.keyName}`, - normalMD5, azureMetadataMismatch, - () => done()), azureTimeout); - }); + s3.send(new PutObjectCommand(params)) + .then(() => { + setTimeout(() => + azureGetCheck( + `${azureContainerName}/${this.test.keyName}`, + normalMD5, azureMetadataMismatch, + () => done()), azureTimeout); + }) + .catch(done); }); it('should return error ServiceUnavailable putting an invalid ' + @@ -168,30 +182,37 @@ describeF() { Metadata: { 'scal-location-constraint': azureLocation }, Body: normalBody, }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); + s3.send(new PutObjectCommand(params)) + .then(() => { + done(new Error('Expected ServiceUnavailable error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'ServiceUnavailable'); + done(); + }); }); it('should return error NotImplemented putting a ' + 'version to Azure', function itF(done) { - s3.putBucketVersioning({ + s3.send(new PutBucketVersioningCommand({ Bucket: azureContainerName, VersioningConfiguration: versioningEnabled, - }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - const params = { Bucket: azureContainerName, - Key: this.test.keyName, - Body: normalBody, - Metadata: { 'scal-location-constraint': - azureLocation } }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); + })) + .then(() => { + const params = { Bucket: azureContainerName, + Key: this.test.keyName, + Body: normalBody, + Metadata: { 'scal-location-constraint': + azureLocation } }; + return s3.send(new PutObjectCommand(params)); + }) + .then(() => { + done(new Error('Expected NotImplemented error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'NotImplemented'); done(); }); - }); }); it('should put two objects to Azure with same ' + @@ -202,11 +223,16 @@ describeF() { Metadata: { 'scal-location-constraint': azureLocation }, }; async.waterfall([ - next => s3.putObject(params, err => next(err)), + next => { + s3.send(new PutObjectCommand(params)) + .then(() => next()) + .catch(next); + }, next => { params.Body = normalBody; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); + s3.send(new PutObjectCommand(params)) + .then(() => setTimeout(() => next(), azureTimeout)) + .catch(next); }, next => { setTimeout(() => { @@ -226,32 +252,42 @@ describeF() { Body: normalBody, Metadata: { 'scal-location-constraint': azureLocation } }; async.waterfall([ - next => s3.putObject(params, err => next(err)), + next => { + s3.send(new PutObjectCommand(params)) + .then(() => next()) + .catch(next); + }, next => { params.Metadata = { 'scal-location-constraint': fileLocation }; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); + s3.send(new PutObjectCommand(params)) + .then(() => setTimeout(() => next(), azureTimeout)) + .catch(next); + }, + next => { + s3.send(new GetObjectCommand({ + Bucket: azureContainerName, + Key: this.test.keyName, + })) + .then(res => { + assert.strictEqual( + res.Metadata['scal-location-constraint'], + fileLocation); + next(); + }) + .catch(next); + }, + next => { + azureClient.getContainerClient(azureContainerName) + .getBlobClient(this.test.keyName).getProperties() + .then(() => { + next(new Error('Expected NotFound error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'NotFound'); + next(); + }); }, - next => s3.getObject({ - Bucket: azureContainerName, - Key: this.test.keyName, - }, (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual( - res.Metadata['scal-location-constraint'], - fileLocation); - next(); - }), - next => azureClient.getContainerClient(azureContainerName) - .getProperties(this.test.keyName).then(() => { - assert.fail('unexpected success'); - next(); - }, err => { - assert.strictEqual(err.code, 'NotFound'); - next(); - }), ], done); }); @@ -263,13 +299,13 @@ describeF() { Body: normalBody, Metadata: { 'scal-location-constraint': fileLocation } }; async.waterfall([ - next => s3.putObject(params, err => next(err)), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => { params.Metadata = { 'scal-location-constraint': azureLocation, }; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); + s3.send(new PutObjectCommand(params)).then(() => setTimeout(() => + next(), azureTimeout)); }, next => azureGetCheck(this.test.keyName, normalMD5, azureMetadata, next), @@ -278,37 +314,43 @@ describeF() { describe('with ongoing MPU with same key name', () => { beforeEach(function beFn(done) { - s3.createMultipartUpload({ + s3.send(new CreateMultipartUploadCommand({ Bucket: azureContainerName, Key: this.currentTest.keyName, Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - assert.equal(err, null, `Err creating MPU: ${err}`); - this.currentTest.uploadId = res.UploadId; - done(); - }); + })) + .then(res => { + this.currentTest.uploadId = res.UploadId; + done(); + }) + .catch(done); }); afterEach(function afFn(done) { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: azureContainerName, Key: this.currentTest.keyName, UploadId: this.currentTest.uploadId, - }, err => { - assert.equal(err, null, `Err aborting MPU: ${err}`); - done(); - }); + })) + .then(() => { + done(); + }) + .catch(done); }); it('should return ServiceUnavailable', function itFn(done) { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.test.keyName, Metadata: { 'scal-location-constraint': azureLocation }, - }, err => { - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); + })) + .then(() => { + done(new Error('Expected ServiceUnavailable error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'ServiceUnavailable'); + done(); + }); }); }); }); From 3a97fe6fb77b133a6b701440336f110592a957e6 Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:33:03 +0200 Subject: [PATCH 04/12] config adaptation to migration Issue: CLDSRV-724 --- tests/functional/aws-node-sdk/test/support/awsConfig.js | 3 ++- tests/functional/aws-node-sdk/test/support/config.js | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/support/awsConfig.js b/tests/functional/aws-node-sdk/test/support/awsConfig.js index 6ec415bc6c..5ce78505b5 100644 --- a/tests/functional/aws-node-sdk/test/support/awsConfig.js +++ b/tests/functional/aws-node-sdk/test/support/awsConfig.js @@ -5,7 +5,7 @@ const { config } = require('../../../../../lib/Config'); const https = require('https'); const http = require('http'); -function getAwsCredentials(profile, credFile) { +function getAwsCredentials(profile, credFile = '/.aws/credentials') { const filename = path.join(process.env.HOME, credFile); try { @@ -25,6 +25,7 @@ function getRealAwsConfig(location) { const useHTTPS = config.locationConstraints[location].details.https; const proto = useHTTPS ? 'https' : 'http'; const params = { + region: 'us-east-1', endpoint: gcpEndpoint ? `${proto}://${gcpEndpoint}` : `${proto}://${awsEndpoint}`, }; diff --git a/tests/functional/aws-node-sdk/test/support/config.js b/tests/functional/aws-node-sdk/test/support/config.js index f49795356f..4d75aa6c7f 100644 --- a/tests/functional/aws-node-sdk/test/support/config.js +++ b/tests/functional/aws-node-sdk/test/support/config.js @@ -76,7 +76,7 @@ function _getMemConfig(profile, config) { } function _getAwsConfig(profile, config) { - const credentials = getAwsCredentials(profile, '/.aws/scality'); + const credentials = getAwsCredentials(profile); const awsConfig = Object.assign({} , DEFAULT_GLOBAL_OPTIONS, DEFAULT_AWS_OPTIONS From 71e33328d172082d273da35a2aadd5e8bd72abdc Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:33:55 +0200 Subject: [PATCH 05/12] route related tests migration Issue: CLDSRV-724 --- tests/multipleBackend/routes/routeBackbeat.js | 1382 +++++++++++++---- .../routes/routeBackbeatForReplication.js | 913 +++++++---- 2 files changed, 1617 insertions(+), 678 deletions(-) diff --git a/tests/multipleBackend/routes/routeBackbeat.js b/tests/multipleBackend/routes/routeBackbeat.js index d6e8f4af1a..0cf4de2dce 100644 --- a/tests/multipleBackend/routes/routeBackbeat.js +++ b/tests/multipleBackend/routes/routeBackbeat.js @@ -1,4 +1,19 @@ const assert = require('assert'); +const { + CreateBucketCommand, + DeleteBucketCommand, + PutBucketVersioningCommand, + PutObjectCommand, + DeleteObjectCommand, + HeadObjectCommand, + GetObjectCommand, + GetObjectTaggingCommand, + PutObjectTaggingCommand, + PutBucketEncryptionCommand, + ListObjectVersionsCommand, + CreateMultipartUploadCommand, + ListMultipartUploadsCommand, +} = require('@aws-sdk/client-s3'); const async = require('async'); const crypto = require('crypto'); const { v4: uuidv4 } = require('uuid'); @@ -23,7 +38,6 @@ const { } = require('../../functional/aws-node-sdk/test/multipleBackend/utils'); const { getCredentials } = require('../../functional/aws-node-sdk/test/support/credentials'); const { config } = require('../../../lib/Config'); - const azureClient = getAzureClient(); const containerName = getAzureContainerName(azureLocation); @@ -130,26 +144,34 @@ const nonVersionedTestMd = { }; function checkObjectData(s3, bucket, objectKey, dataValue, done) { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: bucket, Key: objectKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), dataValue); - done(); - }); + })).then(async data => { + try { + const body = await data.Body.transformToString(); + assert.strictEqual(body, dataValue); + return done(); + } catch (err) { + return done(err); + } + }).catch(err => done(err)); } function checkVersionData(s3, bucket, objectKey, versionId, dataValue, done) { - return s3.getObject({ + return s3.send(new GetObjectCommand({ Bucket: bucket, Key: objectKey, VersionId: versionId, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), dataValue); - return done(); - }); + })).then(async data => { + try { + const body = await data.Body.transformToString(); + assert.strictEqual(body, dataValue); + return done(); + } catch (err) { + return done(err); + } + }).catch(err => done(err)); } function updateStorageClass(data, storageClass) { @@ -180,11 +202,12 @@ describeSkipIfNotMultipleOrCeph('backbeat DELETE routes', () => { it('abort MPU', done => { const awsKey = 'backbeat-mpu-test'; async.waterfall([ - next => - awsClient.createMultipartUpload({ + next => { + awsClient.send(new CreateMultipartUploadCommand({ Bucket: awsBucket, Key: awsKey, - }, next), + })).then(response => next(null, response)).catch(err => next(err)); + }, (response, next) => { const { UploadId } = response; makeBackbeatRequest({ @@ -205,16 +228,16 @@ describeSkipIfNotMultipleOrCeph('backbeat DELETE routes', () => { assert.deepStrictEqual(JSON.parse(response.body), {}); return next(null, UploadId); }); - }, (UploadId, next) => - awsClient.listMultipartUploads({ + }, (UploadId, next) => { + awsClient.send(new ListMultipartUploadsCommand({ Bucket: awsBucket, - }, (err, response) => { - assert.ifError(err); + })).then(response => { const hasOngoingUpload = response.Uploads.some(upload => (upload === UploadId)); assert(!hasOngoingUpload); return next(); - }), + }).catch(err => next(err)); + }, ], err => { assert.ifError(err); done(); @@ -247,64 +270,63 @@ describe('backbeat routes', () => { const VERSION_SUSPENDED_BUCKET = generateUniqueBucketName(VERSION_SUSPENDED_BUCKET_PREFIX, suffix); before(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', {}); s3 = bucketUtil.s3; bucketUtil.emptyManyIfExists([TEST_BUCKET, TEST_ENCRYPTED_BUCKET, NONVERSIONED_BUCKET, VERSION_SUSPENDED_BUCKET]) - .then(() => s3.createBucket({ Bucket: TEST_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: TEST_BUCKET, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise()) - .then(() => s3.createBucket({ - Bucket: NONVERSIONED_BUCKET, - }).promise()) - .then(() => s3.createBucket({ Bucket: VERSION_SUSPENDED_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: VERSION_SUSPENDED_BUCKET, - VersioningConfiguration: { Status: 'Suspended' }, - }).promise()) - .then(() => s3.createBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: TEST_ENCRYPTED_BUCKET, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise()) - .then(() => s3.putBucketEncryption( - { - Bucket: TEST_ENCRYPTED_BUCKET, - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: 'AES256', + .then(async () => { + try { + await s3.send(new CreateBucketCommand({ Bucket: TEST_BUCKET })); + await s3.send(new PutBucketVersioningCommand({ + Bucket: TEST_BUCKET, + VersioningConfiguration: { Status: 'Enabled' }, + })); + await s3.send(new CreateBucketCommand({ + Bucket: NONVERSIONED_BUCKET, + })); + await s3.send(new CreateBucketCommand({ Bucket: VERSION_SUSPENDED_BUCKET })); + await s3.send(new PutBucketVersioningCommand({ + Bucket: VERSION_SUSPENDED_BUCKET, + VersioningConfiguration: { Status: 'Suspended' }, + })); + await s3.send(new CreateBucketCommand({ Bucket: TEST_ENCRYPTED_BUCKET })); + await s3.send(new PutBucketVersioningCommand({ + Bucket: TEST_ENCRYPTED_BUCKET, + VersioningConfiguration: { Status: 'Enabled' }, + })); + await s3.send(new PutBucketEncryptionCommand({ + Bucket: TEST_ENCRYPTED_BUCKET, + ServerSideEncryptionConfiguration: { + Rules: [ + { + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm: 'AES256', + }, }, - }, - ], - }, - }).promise()) - .then(() => done()) + ], + }, + })); + done(); + } catch (err) { + done(err); + } + }) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); done(err); }); }); - after(() => - bucketUtil.empty(TEST_BUCKET) - .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) - .then(() => bucketUtil.empty(TEST_ENCRYPTED_BUCKET)) - .then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) - .then(() => bucketUtil.empty(NONVERSIONED_BUCKET)) - .then(() => - s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) - .then(() => bucketUtil.empty(VERSION_SUSPENDED_BUCKET)) - .then(() => - s3.deleteBucket({ Bucket: VERSION_SUSPENDED_BUCKET }).promise()) - ); + after(async () => { + await bucketUtil.empty(TEST_BUCKET); + await s3.send(new DeleteBucketCommand({ Bucket: TEST_BUCKET })); + await bucketUtil.empty(TEST_ENCRYPTED_BUCKET); + await s3.send(new DeleteBucketCommand({ Bucket: TEST_ENCRYPTED_BUCKET })); + await bucketUtil.empty(NONVERSIONED_BUCKET); + await s3.send(new DeleteBucketCommand({ Bucket: NONVERSIONED_BUCKET })); + await bucketUtil.empty(VERSION_SUSPENDED_BUCKET); + await s3.send(new DeleteBucketCommand({ Bucket: VERSION_SUSPENDED_BUCKET })); + }); describe('null version', () => { let bucket; @@ -328,20 +350,37 @@ describe('backbeat routes', () => { beforeEach(() => { bucket = generateUniqueBucketName(BUCKET_FOR_NULL_VERSION_PREFIX); return bucketUtil.emptyIfExists(bucket) - .then(() => s3.createBucket({ Bucket: bucket }).promise()); + .then(() => s3.send(new CreateBucketCommand({ Bucket: bucket }))); }); - afterEach(() => - bucketUtil.empty(bucket) - .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) + afterEach(() => bucketUtil.empty(bucket) + .then(() => s3.send(new DeleteBucketCommand({ Bucket: bucket }))) ); it('should update metadata of a current null version', done => { let objMD; - return async.series({ - putObject: next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), + async.series({ + putObject: next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + enableVersioningSource: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -373,8 +412,15 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - headObject: next => s3.headObject( - { Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + headObject: next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), getMetadataAfter: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -385,12 +431,17 @@ describe('backbeat routes', () => { }, authCredentials: backbeatAuthCredentials, }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + listObjectVersions: next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), }, (err, results) => { if (err) { return done(err); } - const headObjectRes = results.headObject; assert.strictEqual(headObjectRes.VersionId, 'null'); assert.strictEqual(headObjectRes.StorageClass, storageClass); @@ -419,18 +470,39 @@ describe('backbeat routes', () => { let objMD; let expectedVersionId; return async.series({ - putObjectInitial: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectAgain: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), + putObjectInitial: next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + enableVersioning: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + putObjectAgain: next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(data => { + expectedVersionId = data.VersionId; + return next(null, data); + }).catch(err => { + next(err); + }); + }, getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -462,7 +534,17 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + headObject: next => { + s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadataAfter: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -473,7 +555,15 @@ describe('backbeat routes', () => { }, authCredentials: backbeatAuthCredentials, }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + listObjectVersions: next => { + s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, }, (err, results) => { if (err) { return done(err); @@ -493,7 +583,7 @@ describe('backbeat routes', () => { const currentVersion = Versions.find(v => v.IsLatest); assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const nonCurrentVersion = Versions.find(v => !v.IsLatest); + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); assertVersionIsNullAndUpdated(nonCurrentVersion); return done(); }); @@ -502,12 +592,37 @@ describe('backbeat routes', () => { it('should update metadata of a suspended null version', done => { let objMD; return async.series({ - suspendVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), + suspendVersioning: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + putObject: next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + enableVersioning: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -539,7 +654,17 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + headObject: next => { + s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadataAfter: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -550,7 +675,15 @@ describe('backbeat routes', () => { }, authCredentials: backbeatAuthCredentials, }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + listObjectVersions: next => { + s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, }, (err, results) => { if (err) { return done(err); @@ -577,16 +710,49 @@ describe('backbeat routes', () => { it('should update metadata of a suspended null version with internal version id', done => { let objMD; return async.series({ - suspendVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectTagging: next => s3.putObjectTagging({ - Bucket: bucket, Key: keyName, VersionId: 'null', - Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, - }, next), + suspendVersioning: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + putObject: next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + enableVersioning: next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + putObjectTagging: next => { + s3.send(new PutObjectTaggingCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -618,7 +784,17 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + headObject: next => { + s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, getMetadataAfter: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -629,7 +805,15 @@ describe('backbeat routes', () => { }, authCredentials: backbeatAuthCredentials, }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + listObjectVersions: next => { + s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, }, (err, results) => { if (err) { return done(err); @@ -647,7 +831,8 @@ describe('backbeat routes', () => { assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); assertVersionIsNullAndUpdated(currentVersion); return done(); }); @@ -655,8 +840,16 @@ describe('backbeat routes', () => { it('should update metadata of a non-version object', done => { let objMD; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + async.series([ + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -688,8 +881,21 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -702,7 +908,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[4]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -714,10 +920,28 @@ describe('backbeat routes', () => { it('should create a new null version if versioning suspended and no version', done => { let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + async.series([ + next => { + s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + next => { + s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -738,7 +962,17 @@ describe('backbeat routes', () => { objMD = result; return next(); }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => { + s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, next => makeBackbeatRequest({ method: 'PUT', resourceType: 'metadata', @@ -750,8 +984,25 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => { + s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, + next => { + s3.send(new ListObjectVersionsCommand({ + Bucket: bucket + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, ], (err, data) => { if (err) { return done(err); @@ -763,7 +1014,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[6]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -779,9 +1030,23 @@ describe('backbeat routes', () => { itSkipS3C('should create a new null version if versioning suspended and delete marker null version', done => { let objMD; return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -802,7 +1067,14 @@ describe('backbeat routes', () => { objMD = result; return next(); }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName }, next), + next => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'PUT', resourceType: 'metadata', @@ -814,8 +1086,21 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -827,7 +1112,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[6]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -841,18 +1126,41 @@ describe('backbeat routes', () => { let expectedVersionId; let objMD; return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(data => { expectedVersionId = data.VersionId; - return next(); + return next(null, data); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -873,7 +1181,15 @@ describe('backbeat routes', () => { objMD = result; return next(); }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'PUT', resourceType: 'metadata', @@ -885,8 +1201,19 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ Bucket: bucket })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -898,7 +1225,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[8]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 2); const currentVersion = Versions.find(v => v.IsLatest); @@ -916,9 +1243,23 @@ describe('backbeat routes', () => { it('should update null version with no version id and versioning suspended', done => { let objMD; return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -950,8 +1291,21 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -962,7 +1316,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[5]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -976,9 +1330,23 @@ describe('backbeat routes', () => { it('should update null version if versioning suspended and null version has a version id', done => { let objMD; return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1010,8 +1378,22 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -1024,7 +1406,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[5]; const { DeleteMarkers, Versions } = listObjectVersionsRes; assert.strictEqual(Versions.length, 1); - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); const currentVersion = Versions[0]; assert(currentVersion.IsLatest); @@ -1037,9 +1419,23 @@ describe('backbeat routes', () => { 'put object afterward', done => { let objMD; return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1071,9 +1467,40 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -1085,7 +1512,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[6]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -1100,9 +1527,23 @@ describe('backbeat routes', () => { let objMD; let expectedVersionId; return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1134,17 +1575,40 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(data => { expectedVersionId = data.VersionId; - return next(); + return next(null, data); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), ], (err, data) => { if (err) { return done(err); @@ -1171,18 +1635,41 @@ describe('backbeat routes', () => { let expectedVersionId; let objMD; return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(data => { expectedVersionId = data.VersionId; - return next(); + return next(null, data); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1214,8 +1701,31 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -1227,7 +1737,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[7]; const deleteMarkers = listObjectVersionsRes.DeleteMarkers; - assert.strictEqual(deleteMarkers.length, 0); + assert.strictEqual(deleteMarkers, undefined); const { Versions } = listObjectVersionsRes; assert.strictEqual(Versions.length, 2); @@ -1245,19 +1755,50 @@ describe('backbeat routes', () => { let objMD; let expectedVersionId; return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + expectedVersionId = result.VersionId; + return next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: expectedVersionId, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: expectedVersionId }, next), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1289,13 +1830,26 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); } - const headObjectRes = data[7]; assert.strictEqual(headObjectRes.VersionId, 'null'); assert.strictEqual(headObjectRes.StorageClass, storageClass); @@ -1303,7 +1857,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[8]; const { DeleteMarkers, Versions } = listObjectVersionsRes; assert.strictEqual(Versions.length, 1); - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); const currentVersion = Versions[0]; assert(currentVersion.IsLatest); @@ -1317,19 +1871,50 @@ describe('backbeat routes', () => { let objMD; let deletedVersionId; return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(data => { deletedVersionId = data.VersionId; - return next(); + return next(null, data); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: deletedVersionId, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1361,9 +1946,40 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), ], (err, data) => { if (err) { return done(err); @@ -1375,7 +1991,7 @@ describe('backbeat routes', () => { const listObjectVersionsRes = data[9]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 1); const currentVersion = Versions[0]; @@ -1391,19 +2007,50 @@ describe('backbeat routes', () => { let deletedVersionId; let expectedVersionId; return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - deletedVersionId = data.VersionId; + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + deletedVersionId = result.VersionId; return next(); + }).catch(err => { + next(err); + }), + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Suspended' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: deletedVersionId, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1435,29 +2082,51 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, requestBody: objMD, }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; + next => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: keyName, + Body: Buffer.from(testData), + })).then(result => { + expectedVersionId = result.VersionId; return next(); + }).catch(err => { + next(err); + }), + next => s3.send(new HeadObjectCommand({ + Bucket: bucket, + Key: keyName, + VersionId: 'null', + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }), + next => s3.send(new ListObjectVersionsCommand({ + Bucket: bucket, + })).then(result => { + next(null, result); + }).catch(err => { + next(err); }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), ], (err, data) => { if (err) { return done(err); } - const headObjectRes = data[9]; assert.strictEqual(headObjectRes.VersionId, 'null'); assert.strictEqual(headObjectRes.StorageClass, storageClass); const listObjectVersionsRes = data[10]; const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(DeleteMarkers, undefined); assert.strictEqual(Versions.length, 2); const [currentVersion] = Versions.filter(v => v.IsLatest); @@ -1605,13 +2274,14 @@ describe('backbeat routes', () => { next(); }), next => - s3.headObject({ + s3.send(new HeadObjectCommand({ Bucket: bucket, Key: objectKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.StorageClass, 'awsbackend'); + })).then(result => { + assert.strictEqual(result.StorageClass, 'awsbackend'); next(); + }).catch(err => { + next(err); }), next => checkObjectData(s3, bucket, objectKey, testData, next), ], done); @@ -1694,16 +2364,17 @@ describe('backbeat routes', () => { return next(); }), next => - awsClient.getObjectTagging({ + awsClient.send(new GetObjectTaggingCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { - assert.ifError(err); + })).then(data => { assert.deepStrictEqual(data.TagSet, [{ Key: 'Key1', Value: 'Value1' }]); - next(); + next(null, data); + }).catch(err => { + next(err); }), ], done); }); @@ -1723,11 +2394,11 @@ describe('backbeat routes', () => { it(`should PUT metadata and data if ${description} and x-scal-versioning-required is not set`, done => { let objectMd; async.waterfall([ - next => s3.putObject({ + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'sourcekey', - Body: Buffer.from(testData) }, - next), + Body: Buffer.from(testData), + })).then(res => next(null, res)).catch(err => next(err)), (resp, next) => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1931,10 +2602,10 @@ describe('backbeat routes', () => { // check that the object copy referencing the old data // locations is unreadable, confirming that the old // data locations have been deleted - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: TEST_BUCKET, Key: testKeyOldData, - }, err => { + })).catch(err => { assert(err, 'expected error to get object with old data ' + 'locations, got success'); next(); @@ -2021,10 +2692,10 @@ describe('backbeat routes', () => { // check that the object copy referencing the old data // locations is unreadable, confirming that the old // data locations have been deleted - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: TEST_BUCKET, Key: testKeyOldData, - }, err => { + })).catch(err => { assert(err, 'expected error to get object with old data ' + 'locations, got success'); next(); @@ -2071,13 +2742,15 @@ describe('backbeat routes', () => { }, next => { // check that the object is still readable to make // sure we did not remove the data keys - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: TEST_BUCKET, Key: testKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); + })).then(async data => { + const body = await data.Body.transformToString(); + assert.strictEqual(body, testData); next(); + }).catch(err => { + next(err); }); }], err => { assert.ifError(err); @@ -2149,24 +2822,28 @@ describe('backbeat routes', () => { assert.notStrictEqual(newVersion, testMd.versionId); // give some time for the async deletes to complete, // then check that we can read the latest version - setTimeout(() => s3.getObject({ + setTimeout(() => s3.send(new GetObjectCommand({ Bucket: TEST_BUCKET, Key: testKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); + })).then(async data => { + const body = await data.Body.transformToString(); + assert.strictEqual(body, testData); next(); + }).catch(err => { + next(err); }), 1000); }, next => { // check that the previous object version is still readable - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: TEST_BUCKET, Key: testKey, VersionId: versionIdUtils.encode(testMd.versionId), - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); + })).then(async data => { + const body = await data.Body.transformToString(); + assert.strictEqual(body, testData); next(); + }).catch(err => { + next(err); }); }], err => { assert.ifError(err); @@ -2453,10 +3130,10 @@ describe('backbeat routes', () => { jsonResponse: true, }, next), next => - awsClient.getObjectTagging({ + awsClient.send(new GetObjectTaggingCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { + }), (err, data) => { assert.ifError(err); assert.deepStrictEqual(data.TagSet, [{ Key: 'key1', @@ -2539,15 +3216,18 @@ describe('backbeat routes', () => { const testKey = 'batch-delete-test-key'; async.series([ - done => s3.putObject({ - Bucket: TEST_BUCKET, - Key: testKey, - Body: Buffer.from('hello'), - }, (err, data) => { - assert.ifError(err); - versionId = data.VersionId; - done(); - }), + done => { + s3.send(new PutObjectCommand({ + Bucket: TEST_BUCKET, + Key: testKey, + Body: Buffer.from('hello'), + })).then(data => { + versionId = data.VersionId; + done(); + }).catch(err => { + done(err); + }); + }, done => { makeBackbeatRequest({ method: 'GET', @@ -2580,15 +3260,19 @@ describe('backbeat routes', () => { }; makeRequest(options, done); }, - done => s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, err => { - // should error out as location shall no longer exist - assert(err); - assert.strictEqual(err.statusCode, 503); - done(); - }), + done => { + s3.send(new GetObjectCommand({ + Bucket: TEST_BUCKET, + Key: testKey, + })).then(() => { + done(new Error('Expected error')); + }).catch(err => { + // should error out as location shall no longer exist + assert(err); + assert.strictEqual(err.$metadata.httpStatusCode, 503); + done(); + }); + }, ], done); }); @@ -2597,15 +3281,18 @@ describe('backbeat routes', () => { const awsKey = `${TEST_BUCKET}/batch-delete-test-key-${makeid(8)}`; async.series([ - done => awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - Body: Buffer.from('hello'), - }, (err, data) => { - assert.ifError(err); - versionId = data.VersionId; - done(); - }), + done => { + awsClient.send(new PutObjectCommand({ + Bucket: awsBucket, + Key: awsKey, + Body: Buffer.from('hello'), + })).then(data => { + versionId = data.VersionId; + done(); + }).catch(err => { + done(err); + }); + }, done => { const location = [{ key: awsKey, @@ -2625,14 +3312,18 @@ describe('backbeat routes', () => { }; makeRequest(options, done); }, - done => awsClient.getObject({ - Bucket: awsBucket, - Key: awsKey, - }, err => { - // should error out as location shall no longer exist - assert(err); - done(); - }), + done => { + awsClient.send(new GetObjectCommand({ + Bucket: awsBucket, + Key: awsKey, + })).then(() => { + done(new Error('Expected error')); + }).catch(err => { + // should error out as location shall no longer exist + assert(err); + done(); + }); + }, ], done); }); it('should fail with error if given malformed JSON', done => { @@ -2694,11 +3385,16 @@ describe('backbeat routes', () => { 'if-unmodified-since header is not provided', done => { const awsKey = uuidv4(); async.series([ - next => - awsClient.putObject({ + next => { + awsClient.send(new PutObjectCommand({ Bucket: awsBucket, Key: awsKey, - }, next), + })).then(result => { + next(null, result); + }).catch(err => { + next(err); + }); + }, next => makeRequest({ authCredentials: backbeatAuthCredentials, @@ -2721,15 +3417,17 @@ describe('backbeat routes', () => { }), jsonResponse: true, }, next), - next => - awsClient.getObjectTagging({ + next => { + awsClient.send(new GetObjectTaggingCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { - assert.ifError(err); + })).then(data => { assert.deepStrictEqual(data.TagSet, []); - next(); - }), + next(null, data); + }).catch(err => { + next(err); + }); + }, ], done); }); @@ -2738,10 +3436,10 @@ describe('backbeat routes', () => { const awsKey = uuidv4(); async.series([ next => - awsClient.putObject({ + awsClient.send(new PutObjectCommand({ Bucket: awsBucket, Key: awsKey, - }, next), + })).then(result => next(null, result)).catch(err => next(err)), next => makeRequest({ authCredentials: backbeatAuthCredentials, @@ -2766,15 +3464,17 @@ describe('backbeat routes', () => { }), jsonResponse: true, }, next), - next => - awsClient.getObjectTagging({ + next => { + awsClient.send(new GetObjectTaggingCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { - assert.ifError(err); + })).then(data => { assert.deepStrictEqual(data.TagSet, []); next(); - }), + }).catch(err => { + next(err); + }); + }, ], done); }); @@ -2784,21 +3484,18 @@ describe('backbeat routes', () => { let lastModified; async.series([ next => - awsClient.putObject({ + awsClient.send(new PutObjectCommand({ Bucket: awsBucket, Key: awsKey, - }, next), + })).then(result => next(null, result)).catch(err => next(err)), next => - awsClient.headObject({ + awsClient.send(new HeadObjectCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { - if (err) { - return next(err); - } + })).then(data => { lastModified = data.LastModified; - return next(); - }), + next(null, data); + }).catch(err => next(err)), next => makeRequest({ authCredentials: backbeatAuthCredentials, @@ -2823,11 +3520,10 @@ describe('backbeat routes', () => { jsonResponse: true, }, next), next => - awsClient.getObjectTagging({ + awsClient.send(new GetObjectTaggingCommand({ Bucket: awsBucket, Key: awsKey, - }, (err, data) => { - assert.ifError(err); + })).then(data => { assert.strictEqual(data.TagSet.length, 2); data.TagSet.forEach(tag => { const { Key, Value } = tag; @@ -2843,7 +3539,10 @@ describe('backbeat routes', () => { Value, 'lifecycle-transition'); } }); - next(); + next(null, data); + }).catch(err => { + assert.ifError(err); + next(err); }), ], done); }); @@ -2860,6 +3559,7 @@ describe('backbeat routes', () => { authCredentials: backbeatAuthCredentials, hostname: ipAddress, port: 8000, + method: 'POST', path: `/_/backbeat/batchdelete/${containerName}/${blob}`, diff --git a/tests/multipleBackend/routes/routeBackbeatForReplication.js b/tests/multipleBackend/routes/routeBackbeatForReplication.js index be92f69282..acd77c0618 100644 --- a/tests/multipleBackend/routes/routeBackbeatForReplication.js +++ b/tests/multipleBackend/routes/routeBackbeatForReplication.js @@ -3,6 +3,17 @@ const async = require('async'); const { models } = require('arsenal'); const { ObjectMD } = models; const { v4: uuidv4 } = require('uuid'); +const { + CreateBucketCommand, + DeleteBucketCommand, + PutBucketVersioningCommand, + PutObjectCommand, + DeleteObjectCommand, + HeadObjectCommand, + ListObjectVersionsCommand, + GetObjectTaggingCommand, + PutObjectTaggingCommand, +} = require('@aws-sdk/client-s3'); const { makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest'); const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util'); @@ -92,16 +103,16 @@ describe(`backbeat routes for replication (${name})`, () => { bucketSource = generateUniqueBucketName('backbeatbucket-replication-source'); bucketDestination = generateUniqueBucketName('backbeatbucket-replication-destination'); await srcBucketUtil.emptyIfExists(bucketSource); - await srcS3.createBucket({ Bucket: bucketSource }).promise(); + await srcS3.send(new CreateBucketCommand({ Bucket: bucketSource })); await dstBucketUtil.emptyIfExists(bucketDestination); - await dstS3.createBucket({ Bucket: bucketDestination }).promise(); + await dstS3.send(new CreateBucketCommand({ Bucket: bucketDestination })); }); afterEach(async () => { await srcBucketUtil.empty(bucketSource); - await srcS3.deleteBucket({ Bucket: bucketSource }).promise(); + await srcS3.send(new DeleteBucketCommand({ Bucket: bucketSource })); await dstBucketUtil.empty(bucketDestination); - await dstS3.deleteBucket({ Bucket: bucketDestination }).promise(); + await dstS3.send(new DeleteBucketCommand({ Bucket: bucketDestination })); }); it('should successfully replicate a version', done => { @@ -109,18 +120,25 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(() => next()).catch(next), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data=> { + versionId = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -148,9 +166,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObject: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObject: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: versionId + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -174,18 +197,25 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionId = data.VersionId; + next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -231,9 +261,14 @@ describe(`backbeat routes for replication (${name})`, () => { requestBody: result.getSerialized(), }, next); }, - getObjectTagging: next => dstS3.getObjectTagging( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + getObjectTagging: next => dstS3.send(new GetObjectTaggingCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: versionId + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -258,18 +293,25 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionId = data.VersionId; + next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -335,18 +377,25 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionId = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -387,26 +436,34 @@ describe(`backbeat routes for replication (${name})`, () => { let versionIdCurrent, versionIdNonCurrent; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectNonCurrent: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdNonCurrent = data.VersionId; - return next(); - }), - putObjectCurrent: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdCurrent = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectNonCurrent: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionIdNonCurrent = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + putObjectCurrent: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionIdCurrent = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadataNonCurrent: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -462,7 +519,9 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDNonCurrent, }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -489,26 +548,33 @@ describe(`backbeat routes for replication (${name})`, () => { let versionIdVersion, versionIdDeleteMarker; async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdVersion = data.VersionId; - return next(); - }), - deleteObject: next => srcS3.deleteObject( - { Bucket: bucketSource, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - versionIdDeleteMarker = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { + versionIdVersion = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + deleteObject: next => srcS3.send(new DeleteObjectCommand({ + Bucket: bucketSource, + Key: keyName + })).then(data => { + versionIdDeleteMarker = data.VersionId; + return next(null, data); + }).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadataVersion: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -563,7 +629,9 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDDeleteMarker, }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -589,12 +657,19 @@ describe(`backbeat routes for replication (${name})`, () => { let objMD; async.series({ - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -622,8 +697,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObject: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -649,14 +730,27 @@ describe(`backbeat routes for replication (${name})`, () => { let objMD; async.series({ - suspendVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + suspendVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Suspended' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -684,8 +778,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObject: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -711,12 +811,22 @@ describe(`backbeat routes for replication (${name})`, () => { let objMD; async.series({ - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + putObject: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -762,8 +872,14 @@ describe(`backbeat routes for replication (${name})`, () => { requestBody: result.getSerialized(), }, next); }, - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObject: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -792,12 +908,22 @@ describe(`backbeat routes for replication (${name})`, () => { let expectedVersionId; async.series({ - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -825,16 +951,22 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - putObjectDestination: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + putObjectDestination: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { expectedVersionId = data.VersionId; - return next(); - }), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + return next(null, data); + }).catch(err => next(err)), + headObject: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -862,26 +994,34 @@ describe(`backbeat routes for replication (${name})`, () => { let secondVersionId; async.series({ - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectDestination: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectDestination: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { firstVersionId = data.VersionId; - return next(); - }), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + return next(null, data); + }).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { secondVersionId = data.VersionId; - return next(); - }), + return next(null, data); + }).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -909,11 +1049,19 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectFirstVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: firstVersionId }, next), - headObjectSecondVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: secondVersionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObjectFirstVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: firstVersionId + })).then(data => next(null, data)).catch(err => next(err)), + headObjectSecondVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: secondVersionId + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -948,20 +1096,31 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + putObjectDestinationInitial: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), + return next(null, data); + }).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -989,10 +1148,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions( - { Bucket: bucketDestination }, next), + headObjectNullVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1022,22 +1185,31 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - suspendVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + suspendVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Suspended' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectDestinationInitial: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), + return next(null, data); + }).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1065,9 +1237,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObjectNullVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1098,10 +1275,17 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + putObjectDestinationInitial: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadataNullVersion: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1129,16 +1313,20 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDNull, }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), + return next(null, data); + }).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1166,9 +1354,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObjectNullVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1206,24 +1399,41 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - suspendVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - putObjectTagging: next => dstS3.putObjectTagging( - { Bucket: bucketDestination, Key: keyName, Tagging: { TagSet: tagSet } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + suspendVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Suspended' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectDestinationInitial: next => dstS3.send(new PutObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectTagging: next => dstS3.send(new PutObjectTaggingCommand({ + Bucket: bucketDestination, + Key: keyName, + Tagging: { TagSet: tagSet } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), + next(null, data); + }).catch(err => next(err)), getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1251,11 +1461,19 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - getObjectTaggingNullVersion: next => dstS3.getObjectTagging( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + headObjectNullVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + getObjectTaggingNullVersion: next => dstS3.send(new GetObjectTaggingCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1290,10 +1508,17 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - createNullSoloMasterKey: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), + createNullSoloMasterKey: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + simulateCrrExistingObjectsGetMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1322,8 +1547,10 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: sourceAuthCredentials, requestBody: objMDNull, }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), replicateNullVersion: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1351,14 +1578,14 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDNullReplicated, }, next), - putNewVersionSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } + putNewVersionSource: next => srcS3.send(new PutObjectCommand({ + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), + return next(null, data); + }).catch(err => next(err)), simulateMetadataReplicationVersion: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1375,7 +1602,9 @@ describe(`backbeat routes for replication (${name})`, () => { objMDVersion = objectMDWithUpdatedAccountInfo(data, src === dst ? null : dstAccountInfo); return next(); }), - listObjectVersionsBeforeReplicate: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + listObjectVersionsBeforeReplicate: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), putReplicatedVersion: next => makeBackbeatRequest({ method: 'PUT', resourceType: 'metadata', @@ -1387,11 +1616,19 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDVersion, }, next), - checkReplicatedNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - checkReplicatedVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersionsAfterReplicate: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + checkReplicatedNullVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + checkReplicatedVersion: next => dstS3.send(new HeadObjectCommand({ + Bucket: bucketDestination, + Key: keyName, + VersionId: versionId + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersionsAfterReplicate: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1427,30 +1664,31 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - enableVersioningDestination: next => dstS3.putBucketVersioning({ + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectDestination: next => dstS3.putObject({ + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectDestination: next => dstS3.send(new PutObjectCommand({ Bucket: bucketDestination, Key: keyName, - Body: Buffer.from(testData), - }, (err, data) => { - if (err) { - return next(err); - } + Body: Buffer.from(testData) + })).then(data =>{ versionId = data.VersionId; - return next(); - }), - putObjectSource: next => srcS3.putObject({ + return next(null, data); + }).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ Bucket: bucketSource, Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1474,19 +1712,19 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectByVersionId: next => dstS3.headObject({ + headObjectByVersionId: next => dstS3.send(new HeadObjectCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: versionId, - }, next), - headObjectByNullVersionId: next => dstS3.headObject({ + VersionId: versionId + })).then(data => next(null, data)).catch(err => next(err)), + headObjectByNullVersionId: next => dstS3.send(new HeadObjectCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: 'null', - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1518,30 +1756,35 @@ describe(`backbeat routes for replication (${name})`, () => { let objMD; async.series({ - putObjectDestinationInitial: next => dstS3.putObject({ + putObjectDestinationInitial: next => dstS3.send(new PutObjectCommand({ Bucket: bucketDestination, Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning({ + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectSource: next => srcS3.putObject({ + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ Bucket: bucketSource, Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectTaggingSource: next => srcS3.putObjectTagging({ + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + + putObjectTaggingSource: next => srcS3.send(new PutObjectTaggingCommand({ Bucket: bucketSource, Key: keyName, VersionId: 'null', - Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, - }, next), + Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] } + })).then(data => next(null, data)).catch(err => next(err)), + getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -1565,19 +1808,19 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMD, }, next), - headObjectNullVersion: next => dstS3.headObject({ + headObjectNullVersion: next => dstS3.send(new HeadObjectCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: 'null', - }, next), - getObjectTaggingNullVersion: next => dstS3.getObjectTagging({ + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + getObjectTaggingNullVersion: next => dstS3.send(new GetObjectTaggingCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: 'null', - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersions: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); @@ -1608,31 +1851,29 @@ describe(`backbeat routes for replication (${name})`, () => { let versionId; async.series({ - // === SETUP PHASE === - enableVersioningDestination: next => dstS3.putBucketVersioning({ + enableVersioningDestination: next => dstS3.send(new PutBucketVersioningCommand({ Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectDestination: next => dstS3.putObject({ + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), + putObjectDestination: next => dstS3.send(new PutObjectCommand({ Bucket: bucketDestination, Key: keyName, - Body: Buffer.from(testData), - }, (err, data) => { - if (err) { - return next(err); - } + Body: Buffer.from(testData) + })).then(data => { versionId = data.VersionId; - return next(); - }), - putObjectSource: next => srcS3.putObject({ + return next(null, data); + }).catch(err => next(err)), + + putObjectSource: next => srcS3.send(new PutObjectCommand({ Bucket: bucketSource, Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ + Body: Buffer.from(testData) + })).then(data => next(null, data)).catch(err => next(err)), + + enableVersioningSource: next => srcS3.send(new PutBucketVersioningCommand({ Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), + VersioningConfiguration: { Status: 'Enabled' } + })).then(data => next(null, data)).catch(err => next(err)), // === LIFECYCLE SIMULATION PHASE === // Lifecycle Simulation: GET current null version metadata getSourceNullVersionForLifecycle: next => makeBackbeatRequest({ @@ -1647,7 +1888,7 @@ describe(`backbeat routes for replication (${name})`, () => { return next(err); } objMDUpdated = JSON.parse(data.body).Body; - return next(); + return next(null, data); }), // Lifecycle Simulation: Apply lifecycle changes to null version metadata // Lifecycle changes can consist of: @@ -1677,7 +1918,7 @@ describe(`backbeat routes for replication (${name})`, () => { return next(err); } objMDReplicated = objectMDWithUpdatedAccountInfo(data, src === dst ? null : dstAccountInfo); - return next(); + return next(null, data); }), // Replication: PUT lifecycled null version to destination replicateLifecycledNullVersionToDestination: next => makeBackbeatRequest({ @@ -1689,25 +1930,23 @@ describe(`backbeat routes for replication (${name})`, () => { authCredentials: destinationAuthCredentials, requestBody: objMDReplicated, }, next), - // === VALIDATION PHASE === - headObjectByVersionId: next => dstS3.headObject({ + headObjectByVersionId: next => dstS3.send(new HeadObjectCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: versionId, - }, next), - headObjectByNullVersion: next => dstS3.headObject({ + VersionId: versionId + })).then(data => next(null, data)).catch(err => next(err)), + headObjectByNullVersion: next => dstS3.send(new HeadObjectCommand({ Bucket: bucketDestination, Key: keyName, - VersionId: 'null', - }, next), - listObjectVersionsDestination: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), + VersionId: 'null' + })).then(data => next(null, data)).catch(err => next(err)), + listObjectVersionsDestination: next => dstS3.send(new ListObjectVersionsCommand({ + Bucket: bucketDestination + })).then(data => next(null, data)).catch(err => next(err)), }, (err, results) => { if (err) { return done(err); } - const firstHeadObjectRes = results.headObjectByVersionId; assert.strictEqual(firstHeadObjectRes.VersionId, versionId); From 8c99a99cfd858d0439d16fe2c6fd70a3cc44dcac Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:41:30 +0200 Subject: [PATCH 06/12] complete mpu related tests migration Issue: CLDSRV-724 --- .../mpuComplete/azureCompleteMPU.js | 202 ++++++++++-------- .../mpuComplete/mpuAwsVersioning.js | 49 +++-- 2 files changed, 133 insertions(+), 118 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js index 8bc3dd0979..8438d6ac79 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js @@ -1,6 +1,11 @@ const async = require('async'); const assert = require('assert'); - +const { CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + PutObjectCommand, + GetObjectCommand } = require('@aws-sdk/client-s3'); const { s3middleware } = require('arsenal'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -34,25 +39,23 @@ let bucketUtil; function getCheck(key, bucketMatch, cb) { let azureKey = key; - s3.getObject({ Bucket: azureContainerName, Key: azureKey }, (err, s3Res) => { - assert.equal(err, null, `Err getting object from S3: ${err}`); - assert.strictEqual(s3Res.ETag, `"${s3MD5}"`); + s3.send(new GetObjectCommand({ Bucket: azureContainerName, Key: azureKey })) + .then(s3Res => { + assert.strictEqual(s3Res.ETag, `"${s3MD5}"`); - if (!bucketMatch) { - azureKey = `${azureContainerName}/${key}`; - } - azureClient.getContainerClient(azureContainerName).getProperties(azureKey).then( - azureRes => { - assert.strictEqual(expectedContentLength, azureRes.contentLength); - cb(); - }, - err => { - assert.equal(err, null, `Err getting object from Azure: ${err}`); - cb(); - }); - }); + if (!bucketMatch) { + azureKey = `${azureContainerName}/${key}`; + } + return azureClient.getContainerClient(azureContainerName).getBlobClient(azureKey).getProperties(); + }) + .then(azureRes => { + assert.strictEqual(expectedContentLength, azureRes.contentLength); + cb(); + }) + .catch(err => { + cb(err); + }); } - function mpuSetup(key, location, cb) { const partArray = []; async.waterfall([ @@ -62,16 +65,15 @@ function mpuSetup(key, location, cb) { Key: key, Metadata: { 'scal-location-constraint': location }, }; - s3.createMultipartUpload(params, (err, res) => { - if (err) { - return next(err); - } - const uploadId = res.UploadId; - assert(uploadId); - assert.strictEqual(res.Bucket, azureContainerName); - assert.strictEqual(res.Key, key); - return next(null, uploadId); - }); + s3.send(new CreateMultipartUploadCommand(params)) + .then(res => { + const uploadId = res.UploadId; + assert(uploadId); + assert.strictEqual(res.Bucket, azureContainerName); + assert.strictEqual(res.Key, key); + return next(null, uploadId); + }) + .catch(next); }, (uploadId, next) => { const partParams = { @@ -81,13 +83,12 @@ function mpuSetup(key, location, cb) { UploadId: uploadId, Body: smallBody, }; - s3.uploadPart(partParams, (err, res) => { - if (err) { - return next(err); - } - partArray.push({ ETag: res.ETag, PartNumber: 1 }); - return next(null, uploadId); - }); + s3.send(new UploadPartCommand(partParams)) + .then(res => { + partArray.push({ ETag: res.ETag, PartNumber: 1 }); + return next(null, uploadId); + }) + .catch(next); }, (uploadId, next) => { const partParams = { @@ -97,18 +98,19 @@ function mpuSetup(key, location, cb) { UploadId: uploadId, Body: bigBody, }; - s3.uploadPart(partParams, (err, res) => { - if (err) { - return next(err); - } - partArray.push({ ETag: res.ETag, PartNumber: 2 }); - return next(null, uploadId); - }); + s3.send(new UploadPartCommand(partParams)) + .then(res => { + partArray.push({ ETag: res.ETag, PartNumber: 2 }); + return next(null, uploadId); + }) + .catch(next); }, ], (err, uploadId) => { + if (err) { + return cb(err); + } process.stdout.write('Created MPU and put two parts\n'); - assert.equal(err, null, `Err setting up MPU: ${err}`); - cb(uploadId, partArray); + return cb(uploadId, partArray); }); } @@ -121,7 +123,7 @@ function testSuite() { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; this.currentTest.awsClient = awsS3; - return s3.createBucket({ Bucket: azureContainerName }).promise() + return s3.send(new CreateBucketCommand({ Bucket: azureContainerName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; @@ -149,11 +151,12 @@ function testSuite() { UploadId: uploadId, MultipartUpload: { Parts: partArray }, }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, true, done), - azureTimeout); - }); + s3.send(new CompleteMultipartUploadCommand(params)) + .then(() => { + setTimeout(() => getCheck(this.test.key, true, done), + azureTimeout); + }) + .catch(done); }); }); @@ -167,24 +170,23 @@ function testSuite() { UploadId: uploadId, MultipartUpload: { Parts: partArray }, }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, false, done), - azureTimeout); - }); + s3.send(new CompleteMultipartUploadCommand(params)) + .then(() => { + setTimeout(() => getCheck(this.test.key, false, done), + azureTimeout); + }) + .catch(done); }); }); it('should complete an MPU on Azure with same key as object put ' + 'to file', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.test.key, Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }, - err => { - assert.equal(err, null, `Err putting object to file: ${err}`); + Metadata: { 'scal-location-constraint': fileLocation } })).then(() => { mpuSetup(this.test.key, azureLocation, (uploadId, partArray) => { const params = { @@ -193,25 +195,24 @@ function testSuite() { UploadId: uploadId, MultipartUpload: { Parts: partArray }, }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, true, done), - azureTimeout); - }); + s3.send(new CompleteMultipartUploadCommand(params)) + .then(() => { + setTimeout(() => getCheck(this.test.key, true, done), + azureTimeout); + }) + .catch(done); }); - }); + }).catch(done); }); it('should complete an MPU on Azure with same key as object put ' + 'to Azure', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.test.key, Body: body, - Metadata: { 'scal-location-constraint': azureLocation } }, - err => { - assert.equal(err, null, `Err putting object to Azure: ${err}`); + Metadata: { 'scal-location-constraint': azureLocation } })).then(() => { mpuSetup(this.test.key, azureLocation, (uploadId, partArray) => { const params = { @@ -220,10 +221,13 @@ function testSuite() { UploadId: uploadId, MultipartUpload: { Parts: partArray }, }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); + s3.send(new CompleteMultipartUploadCommand(params)).then(() => { + setTimeout(() => getCheck(this.test.key, true, done), azureTimeout); + }).catch(err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + done(err); }); }); }); @@ -232,34 +236,42 @@ function testSuite() { it('should complete an MPU on Azure with same key as object put ' + 'to AWS', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: azureContainerName, Key: this.test.key, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }, - err => { - assert.equal(err, null, `Err putting object to AWS: ${err}`); - mpuSetup(this.test.key, azureLocation, - (uploadId, partArray) => { - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - // make sure object is gone from AWS - setTimeout(() => { - this.test.awsClient.getObject({ Bucket: awsBucket, - Key: this.test.key }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - getCheck(this.test.key, true, done); - }); - }, azureTimeout); + Metadata: { 'scal-location-constraint': awsLocation } + })) + .then(() => { + mpuSetup(this.test.key, azureLocation, + (uploadId, partArray) => { + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + s3.send(new CompleteMultipartUploadCommand(params)) + .then(() => { + // make sure object is gone from AWS + setTimeout(() => { + this.test.awsClient.send(new GetObjectCommand({ + Bucket: awsBucket, + Key: this.test.key + })) + .then(() => { + done(new Error('Expected NoSuchKey error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'NoSuchKey'); + getCheck(this.test.key, true, done); + }); + }, azureTimeout); + }) + .catch(done); }); - }); - }); + }) + .catch(done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js index ff7eea8adf..2da7f142cc 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js @@ -2,6 +2,9 @@ const assert = require('assert'); const async = require('async'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); +const { CreateBucketCommand, DeleteBucketCommand, + CreateMultipartUploadCommand, UploadPartCommand, + CompleteMultipartUploadCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3'); const { minimumAllowedPartSize } = require('../../../../../../constants'); const { removeAllVersions } = require('../../../lib/utility/versioning-util'); const { @@ -29,13 +32,14 @@ function mpuSetup(s3, key, location, cb) { Key: key, Metadata: { 'scal-location-constraint': location }, }; - s3.createMultipartUpload(params, (err, res) => { - assert.strictEqual(err, null, `err creating mpu: ${err}`); + s3.send(new CreateMultipartUploadCommand(params)).then(res => { const uploadId = res.UploadId; assert(uploadId); assert.strictEqual(res.Bucket, bucket); assert.strictEqual(res.Key, key); - next(err, uploadId); + next(null, uploadId); + }).catch(err => { + next(err); }); }, (uploadId, next) => { @@ -46,10 +50,11 @@ function mpuSetup(s3, key, location, cb) { UploadId: uploadId, Body: data[0], }; - s3.uploadPart(partParams, (err, res) => { - assert.strictEqual(err, null, `err uploading part 1: ${err}`); + s3.send(new UploadPartCommand(partParams)).then(res => { partArray.push({ ETag: res.ETag, PartNumber: 1 }); - next(err, uploadId); + next(null, uploadId); + }).catch(err => { + next(err); }); }, (uploadId, next) => { @@ -60,10 +65,11 @@ function mpuSetup(s3, key, location, cb) { UploadId: uploadId, Body: data[1], }; - s3.uploadPart(partParams, (err, res) => { - assert.strictEqual(err, null, `err uploading part 2: ${err}`); + s3.send(new UploadPartCommand(partParams)).then(res => { partArray.push({ ETag: res.ETag, PartNumber: 2 }); - next(err, uploadId); + next(null, uploadId); + }).catch(err => { + next(err); }); }, ], (err, uploadId) => { @@ -75,13 +81,12 @@ function mpuSetup(s3, key, location, cb) { function completeAndAssertMpu(s3, params, cb) { const { bucket, key, uploadId, partArray, expectVersionId, expectedGetVersionId } = params; - s3.completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, MultipartUpload: { Parts: partArray }, - }, (err, data) => { - assert.strictEqual(err, null, `Err completing MPU: ${err}`); + })).then(data => { if (expectVersionId) { assert.notEqual(data.VersionId, undefined); } else { @@ -90,6 +95,8 @@ function completeAndAssertMpu(s3, params, cb) { const expectedVersionId = expectedGetVersionId || data.VersionId; getAndAssertResult(s3, { bucket, key, body: concattedData, expectedVersionId }, cb); + }).catch(err => { + cb(err); }); } @@ -99,19 +106,15 @@ function testSuite() { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ + beforeEach(done => s3.send(new CreateBucketCommand({ Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: awsLocation, }, - }, done)); - afterEach(done => { - removeAllVersions({ Bucket: bucket }, err => { - if (err) { - return done(err); - } - return s3.deleteBucket({ Bucket: bucket }, done); - }); + })).then(() => done()).catch(err => done(err))); + afterEach(async () => { + await removeAllVersions({ Bucket: bucket }); + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); }); it('versioning not configured: should not return version id ' + @@ -138,8 +141,8 @@ function testSuite() { (uploadId, partArray, next) => completeAndAssertMpu(s3, { bucket, key, uploadId, partArray, expectVersionId: false }, next), - next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: - 'null' }, next), + next => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: key, VersionId: + 'null' })).then(delData => next(null, delData)).catch(next), (delData, next) => getAndAssertResult(s3, { bucket, key, expectedError: 'NoSuchKey' }, next), next => awsGetLatestVerId(key, '', next), From 16de9748feca8fcdf773f0609eced8c18a8f3ec9 Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:46:44 +0200 Subject: [PATCH 07/12] multiple backend related tests migration Issue: CLDSRV-724 --- .../test/multipleBackend/unknownEndpoint.js | 63 ++-- .../test/multipleBackend/utils.js | 339 ++++++++++-------- tests/multipleBackend/multipartUpload.js | 46 ++- tests/multipleBackend/objectPutPart.js | 13 +- 4 files changed, 251 insertions(+), 210 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js b/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js index 834af24875..b443da5da9 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js @@ -1,4 +1,11 @@ const assert = require('assert'); +const { + CreateBucketCommand, + GetBucketLocationCommand, + PutObjectCommand, + HeadObjectCommand, + GetObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const config = require('../../../config.json'); @@ -21,27 +28,18 @@ describe('Requests to ip endpoint not in config', () => { s3 = bucketUtil.s3; }); - after(() => { + after(async () => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + await bucketUtil.empty(bucket); + process.stdout.write('Deleting bucket\n'); + await bucketUtil.deleteOne(bucket); }); it('should accept put bucket request ' + 'to IP address endpoint that is not in config using ' + 'path style', - done => { - s3.createBucket({ Bucket: bucket }, err => { - assert.ifError(err); - done(); - }); + async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); const itSkipIfE2E = process.env.S3_END_TO_END ? it.skip : it; @@ -51,42 +49,25 @@ describe('Requests to ip endpoint not in config', () => { itSkipIfE2E('should show us-east-1 as bucket location since' + 'IP address endpoint was not in config thereby ' + 'defaulting to us-east-1', - done => { - s3.getBucketLocation({ Bucket: bucket }, - (err, res) => { - assert.ifError(err); - // us-east-1 is returned as empty string - assert.strictEqual(res - .LocationConstraint, ''); - done(); - }); + async () => { + const res = await s3.send(new GetBucketLocationCommand({ Bucket: bucket })); + assert.strictEqual(res.LocationConstraint, ''); }); it('should accept put object request ' + 'to IP address endpoint that is not in config using ' + 'path style and use the bucket location for the object', - done => { - s3.putObject({ Bucket: bucket, Key: key, Body: body }, - err => { - assert.ifError(err); - return s3.headObject({ Bucket: bucket, Key: key }, - err => { - assert.ifError(err); - done(); - }); - }); + async () => { + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: body })); + await s3.send(new HeadObjectCommand({ Bucket: bucket, Key: key })); }); it('should accept get object request ' + 'to IP address endpoint that is not in config using ' + 'path style', - done => { - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.ifError(err); - assert.strictEqual(res.ETag, expectedETag); - done(); - }); + async () => { + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ETag, expectedETag); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/utils.js b/tests/functional/aws-node-sdk/test/multipleBackend/utils.js index 0a87530347..a38bf9e36e 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/utils.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/utils.js @@ -1,11 +1,17 @@ const assert = require('assert'); const crypto = require('crypto'); -const { errors, storage } = require('arsenal'); -const AWS = require('aws-sdk'); -AWS.config.logger = console; +const { storage } = require('arsenal'); +const { + S3Client, + PutObjectCommand, + GetObjectCommand, + PutBucketVersioningCommand, + PutObjectTaggingCommand, + GetObjectTaggingCommand, + DeleteObjectTaggingCommand, +} = require('@aws-sdk/client-s3'); const { v4: uuidv4 } = require('uuid'); -const async = require('async'); const azure = require('@azure/storage-blob'); const { GCP } = storage.data.external; @@ -49,7 +55,7 @@ let gcpBucketMPU; if (config.backends.data === 'multiple') { if (config.locationConstraints[awsLocation]) { const awsConfig = getRealAwsConfig(awsLocation); - awsS3 = new AWS.S3(awsConfig); + awsS3 = new S3Client(awsConfig); awsBucket = config.locationConstraints[awsLocation].details.bucketName; } else { process.stdout.write(`LocationConstraint for aws '${awsLocation}' not found in ${ @@ -69,16 +75,6 @@ if (config.backends.data === 'multiple') { } -function _assertErrorResult(err, expectedError, desc) { - if (!expectedError) { - assert.strictEqual(err, null, `got error for ${desc}: ${err}`); - return; - } - assert(err, `expected ${expectedError} but found no error`); - assert.strictEqual(err.code, expectedError); - assert.strictEqual(err.statusCode, errors[expectedError].code); -} - const utils = { describeSkipIfNotMultiple, describeSkipIfNotMultipleOrCeph, @@ -216,88 +212,92 @@ utils.expectedETag = (body, getStringified = true) => { return `"${eTagValue}"`; }; -utils.putToAwsBackend = (s3, bucket, key, body, cb) => { - s3.putObject({ Bucket: bucket, Key: key, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, result) => { - cb(err, result.VersionId); - } - ); +utils.putToAwsBackend = async (s3, bucket, key, body) => { + const result = await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation } + })); + return result.VersionId; }; -utils.enableVersioning = (s3, bucket, cb) => { - s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningEnabled }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `enabling versioning, got error ${err}`); - cb(); - }); +utils.enableVersioning = async (s3, bucket) => { + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: versioningEnabled + })); }; -utils.suspendVersioning = (s3, bucket, cb) => { - s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningSuspended }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `enabling versioning, got error ${err}`); - cb(); - }); +utils.suspendVersioning = async (s3, bucket) => { + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: versioningSuspended + })); }; -utils.mapToAwsPuts = (s3, bucket, key, dataArray, cb) => { - async.mapSeries(dataArray, (data, next) => { - utils.putToAwsBackend(s3, bucket, key, data, next); - }, (err, results) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - cb(null, results); - }); +utils.mapToAwsPuts = async (s3, bucket, key, dataArray) => { + const results = []; + for (const data of dataArray) { + const versionId = await utils.putToAwsBackend(s3, bucket, key, data); + results.push(versionId); + } + return results; }; -utils.putVersionsToAws = (s3, bucket, key, versions, cb) => { - utils.enableVersioning(s3, bucket, () => { - utils.mapToAwsPuts(s3, bucket, key, versions, cb); - }); +utils.putVersionsToAws = async (s3, bucket, key, versions) => { + await utils.enableVersioning(s3, bucket); + return utils.mapToAwsPuts(s3, bucket, key, versions); }; -utils.putNullVersionsToAws = (s3, bucket, key, versions, cb) => { - utils.suspendVersioning(s3, bucket, () => { - utils.mapToAwsPuts(s3, bucket, key, versions, cb); - }); +utils.putNullVersionsToAws = async (s3, bucket, key, versions) => { + await utils.suspendVersioning(s3, bucket); + return utils.mapToAwsPuts(s3, bucket, key, versions); }; -utils.getAndAssertResult = (s3, params, cb) => { +utils.getAndAssertResult = async (s3, params) => { const { bucket, key, body, versionId, expectedVersionId, expectedTagCount, expectedError } = params; - s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, - (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); - if (expectedError) { - return cb(); - } - assert.strictEqual(err, null, 'Expected success ' + - `getting object, got error ${err}`); - if (body) { - assert(data.Body, 'expected object body in response'); - assert.equal(data.Body.length, data.ContentLength, - `received data of length ${data.Body.length} does not ` + - 'equal expected based on ' + - `content length header of ${data.ContentLength}`); - const expectedMD5 = utils.expectedETag(body, false); - const resultMD5 = utils.expectedETag(data.Body, false); - assert.strictEqual(resultMD5, expectedMD5); - } - if (!expectedVersionId) { - assert.strictEqual(data.VersionId, undefined); - } else { - assert.strictEqual(data.VersionId, expectedVersionId); - } - if (expectedTagCount && expectedTagCount === '0') { - assert.strictEqual(data.TagCount, undefined); - } else if (expectedTagCount) { - assert.strictEqual(data.TagCount, parseInt(expectedTagCount, 10)); - } - return cb(); - }); + + try { + const data = await s3.send(new GetObjectCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId + })); + + if (expectedError) { + throw new Error(`Expected error ${expectedError} but got success`); + } + + if (body) { + assert(data.Body, 'expected object body in response'); + assert.equal(data.Body.length, data.ContentLength, + `received data of length ${data.Body.length} does not ` + + 'equal expected based on ' + + `content length header of ${data.ContentLength}`); + const expectedMD5 = utils.expectedETag(body, false); + const resultMD5 = utils.expectedETag(data.Body, false); + assert.strictEqual(resultMD5, expectedMD5); + } + if (!expectedVersionId) { + assert.strictEqual(data.VersionId, undefined); + } else { + assert.strictEqual(data.VersionId, expectedVersionId); + } + if (expectedTagCount && expectedTagCount === '0') { + assert.strictEqual(data.TagCount, undefined); + } else if (expectedTagCount) { + assert.strictEqual(data.TagCount, parseInt(expectedTagCount, 10)); + } + return undefined; + } catch (err) { + if (expectedError) { + assert.strictEqual(err.name, expectedError); + return undefined; + } + throw err; + } }; utils.getAwsRetry = (params, retryNumber, assertCb) => { @@ -308,23 +308,29 @@ utils.getAwsRetry = (params, retryNumber, assertCb) => { 2: awsSecondTimeout, }; const maxRetries = 2; - const getObject = awsS3.getObject.bind(awsS3); const timeout = retryTimeout[retryNumber]; - return setTimeout(getObject, timeout, { Bucket: awsBucket, Key: key, - VersionId: versionId }, - (err, res) => { - try { - // note: this will only catch exceptions thrown before an - // asynchronous call - return assertCb(err, res); - } catch (e) { - if (retryNumber !== maxRetries) { - return utils.getAwsRetry(params, retryNumber + 1, - assertCb); - } - throw e; + + const executeGet = async () => { + try { + const res = await awsS3.send(new GetObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: versionId + })); + return assertCb(null, res); + } catch (err) { + return assertCb(err); + } + }; + + return setTimeout(() => { + executeGet().catch(e => { + if (retryNumber !== maxRetries) { + return utils.getAwsRetry(params, retryNumber + 1, assertCb); } + throw e; }); + }, timeout); }; utils.awsGetLatestVerId = (key, body, cb) => @@ -351,82 +357,121 @@ function _getTaggingConfig(tags) { }; } -utils.tagging.putTaggingAndAssert = (s3, params, cb) => { - const { bucket, key, tags, versionId, expectedVersionId, - expectedError } = params; +utils.tagging.putTaggingAndAssert = async (s3, params) => { + const { bucket, key, tags, versionId, expectedVersionId, expectedError } = params; const taggingConfig = _getTaggingConfig(tags); - return s3.putObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId, - Tagging: taggingConfig }, (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); + + try { + const data = await s3.send(new PutObjectTaggingCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId, + Tagging: taggingConfig + })); + if (expectedError) { - return cb(); + throw new Error(`Expected error ${expectedError} but got success`); } - assert.strictEqual(err, null, `got error for putting tags: ${err}`); + if (expectedVersionId) { assert.strictEqual(data.VersionId, expectedVersionId); } else { assert.strictEqual(data.VersionId, undefined); } - return cb(null, data.VersionId); - }); + return data.VersionId; + } catch (err) { + if (expectedError) { + assert.strictEqual(err.name, expectedError); + return undefined; + } + throw err; + } }; -utils.tagging.getTaggingAndAssert = (s3, params, cb) => { +utils.tagging.getTaggingAndAssert = async (s3, params) => { const { bucket, key, expectedTags, versionId, expectedVersionId, expectedError, getObject } = params; - s3.getObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId }, - (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); - if (expectedError) { - return cb(); - } - const expectedTagResult = _getTaggingConfig(expectedTags); - const expectedTagCount = `${Object.keys(expectedTags).length}`; - assert.strictEqual(err, null, `got error for putting tags: ${err}`); - if (expectedVersionId) { - assert.strictEqual(data.VersionId, expectedVersionId); - } else { - assert.strictEqual(data.VersionId, undefined); - } - assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); - if (getObject === false) { - return process.nextTick(cb, null, data.VersionId); - } - return utils.getAndAssertResult(s3, { bucket, key, versionId, - expectedVersionId, expectedTagCount }, - () => cb(null, data.VersionId)); - }); + + try { + const data = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId + })); + + if (expectedError) { + throw new Error(`Expected error ${expectedError} but got success`); + } + + const expectedTagResult = _getTaggingConfig(expectedTags); + const expectedTagCount = `${Object.keys(expectedTags).length}`; + + if (expectedVersionId) { + assert.strictEqual(data.VersionId, expectedVersionId); + } else { + assert.strictEqual(data.VersionId, undefined); + } + assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); + + if (getObject !== false) { + await utils.getAndAssertResult(s3, { bucket, key, versionId, + expectedVersionId, expectedTagCount }); + } + + return data.VersionId; + } catch (err) { + if (expectedError) { + assert.strictEqual(err.name, expectedError); + return undefined; + } + throw err; + } }; -utils.tagging.delTaggingAndAssert = (s3, params, cb) => { +utils.tagging.delTaggingAndAssert = async (s3, params) => { const { bucket, key, versionId, expectedVersionId, expectedError } = params; - return s3.deleteObjectTagging({ Bucket: bucket, Key: key, - VersionId: versionId }, (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); + + try { + const data = await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucket, + Key: key, + VersionId: versionId + })); + if (expectedError) { - return cb(); + throw new Error(`Expected error ${expectedError} but got success`); } - assert.strictEqual(err, null, `got error for putting tags: ${err}`); + if (expectedVersionId) { assert.strictEqual(data.VersionId, expectedVersionId); } else { assert.strictEqual(data.VersionId, undefined); } - return utils.tagging.getTaggingAndAssert(s3, { bucket, key, versionId, - expectedVersionId, expectedTags: {} }, () => cb()); - }); + + await utils.tagging.getTaggingAndAssert(s3, { + bucket, key, versionId, expectedVersionId, expectedTags: {} + }); + return undefined; + } catch (err) { + if (expectedError) { + assert.strictEqual(err.name, expectedError); + return undefined; + } + throw err; + } }; -utils.tagging.awsGetAssertTags = (params, cb) => { +utils.tagging.awsGetAssertTags = async params => { const { key, versionId, expectedTags } = params; const expectedTagResult = _getTaggingConfig(expectedTags); - awsS3.getObjectTagging({ Bucket: awsBucket, Key: key, - VersionId: versionId }, (err, data) => { - assert.strictEqual(err, null, 'got unexpected error getting ' + - `tags directly from AWS: ${err}`); - assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); - return cb(); - }); + + const data = await awsS3.send(new GetObjectTaggingCommand({ + Bucket: awsBucket, + Key: key, + VersionId: versionId + })); + + assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); }; module.exports = utils; diff --git a/tests/multipleBackend/multipartUpload.js b/tests/multipleBackend/multipartUpload.js index 90a3d9a736..b01c8e2861 100644 --- a/tests/multipleBackend/multipartUpload.js +++ b/tests/multipleBackend/multipartUpload.js @@ -1,6 +1,9 @@ const assert = require('assert'); const async = require('async'); -const AWS = require('aws-sdk'); +const { S3Client, + HeadObjectCommand, + AbortMultipartUploadCommand, + ListPartsCommand } = require('@aws-sdk/client-s3'); const { parseString } = require('xml2js'); const { models } = require('arsenal'); @@ -34,7 +37,7 @@ const fileLocation = 'scality-internal-file'; const awsLocation = 'awsbackend'; const awsLocationMismatch = 'awsbackendmismatch'; const awsConfig = getRealAwsConfig(awsLocation); -const s3 = new AWS.S3(awsConfig); +const s3 = new S3Client(awsConfig); const log = new DummyRequestLogger(); const fakeUploadId = 'fakeuploadid'; @@ -240,15 +243,17 @@ function _getZenkoObjectKey(objectKey) { function assertObjOnBackend(expectedBackend, objectKey, cb) { const zenkoObjectKey = _getZenkoObjectKey(objectKey); return objectGet(authInfo, getObjectGetRequest(zenkoObjectKey), false, log, - (err, result, metaHeaders) => { + async (err, result, metaHeaders) => { assert.equal(err, null, `Error getting object on S3: ${err}`); assert.strictEqual(metaHeaders[`x-amz-meta-${locMetaHeader}`], expectedBackend); if (expectedBackend === awsLocation) { - return s3.headObject({ Bucket: awsBucket, Key: objectKey }, - (err, result) => { + return s3.send(new HeadObjectCommand({ Bucket: awsBucket, Key: objectKey })) + .then(result => { + assert.strictEqual(result.Metadata[locMetaHeader], awsLocation); + return cb(); + }).catch(err => { assert.equal(err, null, 'Error on headObject call to AWS: ' + `${err}`); - assert.strictEqual(result.Metadata[locMetaHeader], awsLocation); return cb(); }); } @@ -310,10 +315,13 @@ function putObject(putBackend, objectKey, cb) { function abortMPU(uploadId, awsParams, cb) { const abortParams = Object.assign({ UploadId: uploadId }, awsParams); - s3.abortMultipartUpload(abortParams, err => { + s3.send(new AbortMultipartUploadCommand(abortParams)) + .then(() => { + cb(); + }).catch(err => { assert.equal(err, null, `Error aborting MPU: ${err}`); - cb(); - }); + cb(); + }); } function abortMultipleMpus(backendsInfo, callback) { @@ -492,15 +500,17 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { const delParams = getDeleteParams(objectKey, uploadId); - multipartDelete(authInfo, delParams, log, err => { + multipartDelete(authInfo, delParams, log, async err => { assert.equal(err, null, `Error aborting MPU: ${err}`); - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: awsBucket, Key: objectKey, UploadId: uploadId, - }, err => { + })).then(() => { + assert.fail('Expected an error listing parts of aborted MPU'); + }).catch(err => { const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; - assert.strictEqual(err.code, wantedError); + assert.strictEqual(err.name, wantedError); done(); }); }); @@ -512,15 +522,17 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocationMismatch, objectKey, uploadId => { const delParams = getDeleteParams(objectKey, uploadId); - multipartDelete(authInfo, delParams, log, err => { + multipartDelete(authInfo, delParams, log, async err => { assert.equal(err, null, `Error aborting MPU: ${err}`); - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: awsBucket, Key: `${bucketName}/${objectKey}`, UploadId: uploadId, - }, err => { + })).then(() => { + assert.fail('Expected an error listing parts of aborted MPU'); + }).catch(err => { const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; - assert.strictEqual(err.code, wantedError); + assert.strictEqual(err.name, wantedError); done(); }); }); diff --git a/tests/multipleBackend/objectPutPart.js b/tests/multipleBackend/objectPutPart.js index 345266007d..fcbf29ca1d 100644 --- a/tests/multipleBackend/objectPutPart.js +++ b/tests/multipleBackend/objectPutPart.js @@ -2,7 +2,9 @@ const assert = require('assert'); const async = require('async'); const crypto = require('crypto'); const { parseString } = require('xml2js'); -const AWS = require('aws-sdk'); +const { S3Client, + ListPartsCommand, + AbortMultipartUploadCommand } = require('@aws-sdk/client-s3'); const { storage } = require('arsenal'); const { config } = require('../../lib/Config'); @@ -26,7 +28,7 @@ const fileLocation = 'scality-internal-file'; const awsLocation = 'awsbackend'; const awsLocationMismatch = 'awsbackendmismatch'; const awsConfig = getRealAwsConfig(awsLocation); -const s3 = new AWS.S3(awsConfig); +const s3 = new S3Client(awsConfig); const splitter = constants.splitter; const log = new DummyRequestLogger(); @@ -159,13 +161,14 @@ function listAndAbort(uploadId, calculatedHash2, objectName, location, done) { Key: objectName, UploadId: uploadId, }; - s3.listParts(params, (err, data) => { - assert.equal(err, null, `Error listing parts: ${err}`); + s3.send(new ListPartsCommand(params)).then(data => { assert.strictEqual(data.Parts.length, 1); if (calculatedHash2) { assert.strictEqual(`"${calculatedHash2}"`, data.Parts[0].ETag); } - s3.abortMultipartUpload(params, err => { + s3.send(new AbortMultipartUploadCommand(params)).then(() => { + done(); + }).catch(err => { assert.equal(err, null, `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.`); done(); From e58839dde26f6832308afec866e8586370c61350 Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:47:40 +0200 Subject: [PATCH 08/12] acl related tests migration Issue: CLDSRV-724 --- .../multipleBackend/acl/aclAwsVersioning.js | 88 ++++++++++++++----- 1 file changed, 65 insertions(+), 23 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js index 840051d9ab..130c3a5610 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js @@ -1,5 +1,12 @@ const assert = require('assert'); const async = require('async'); +const { + CreateBucketCommand, + PutObjectCommand, + GetObjectAclCommand, + PutObjectAclCommand, +} = require('@aws-sdk/client-s3'); + const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const constants = require('../../../../../../constants'); @@ -56,27 +63,47 @@ const testAcp = new _AccessControlPolicy(ownerParams); testAcp.addGrantee('Group', constants.publicId, 'READ'); function putObjectAcl(s3, key, versionId, acp, cb) { - s3.putObjectAcl({ Bucket: bucket, Key: key, AccessControlPolicy: acp, - VersionId: versionId }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object acl, got error ${err}`); - cb(); - }); + const params = { + Bucket: bucket, + Key: key, + AccessControlPolicy: acp, + }; + if (versionId) { + params.VersionId = versionId; + } + + const command = new PutObjectAclCommand(params); + s3.send(command) + .then(() => { + cb(); + }) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object acl, got error ${err}`); + }); } function putObjectAndAcl(s3, key, body, acp, cb) { - s3.putObject({ Bucket: bucket, Key: key, Body: body }, - (err, putData) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - putObjectAcl(s3, key, putData.VersionId, acp, () => - cb(null, putData.VersionId)); + const command = new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, }); + + s3.send(command) + .then(putData => { + putObjectAcl(s3, key, putData.VersionId, acp, () => + cb(null, putData.VersionId)); + }) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `putting object, got error ${err}`); + }); } /** putVersionsWithAclToAws - enable versioning and put multiple versions * followed by putting object acl - * @param {AWS.S3} s3 - aws node sdk s3 instance + * @param {S3Client} s3 - aws sdk v3 s3 client instance * @param {string} key - string * @param {(string[]|Buffer[])} data - array of data to put as objects * @param {_AccessControlPolicy[]} acps - array of _AccessControlPolicy instance @@ -103,19 +130,30 @@ function getObjectAndAssertAcl(s3, params, cb) { = params; getAndAssertResult(s3, { bucket, key, versionId, expectedVersionId, body }, () => { - s3.getObjectAcl({ Bucket: bucket, Key: key, VersionId: versionId }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object acl, got error ${err}`); + const aclParams = { + Bucket: bucket, + Key: key, + }; + if (versionId) { + aclParams.VersionId = versionId; + } + + const command = new GetObjectAclCommand(aclParams); + s3.send(command) + .then(data => { assert.deepEqual(data, expectedResult); cb(); + }) + .catch(err => { + assert.strictEqual(err, null, 'Expected success ' + + `getting object acl, got error ${err}`); }); }); } /** getObjectsAndAssertAcls - enable versioning and put multiple versions * followed by putting object acl - * @param {AWS.S3} s3 - aws node sdk s3 instance + * @param {S3Client} s3 - aws sdk v3 s3 client instance * @param {string} key - string * @param {string[]} versionIds - array of versionIds to use to get objs & acl * @param {(string[]|Buffer[])} expectedData - array of data expected from gets @@ -150,15 +188,19 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket, + + const command = new CreateBucketCommand({ + Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: awsLocation, }, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; }); + + return s3.send(command) + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { From 68c859a10cda7898c18c8e5cd42c7e2817d65cfa Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:48:17 +0200 Subject: [PATCH 09/12] listParts related tests migration Issue: CLDSRV-724 --- .../listParts/azureListParts.js | 43 +++++++++++-------- .../multipleBackend/listParts/listPartsGcp.js | 42 ++++++++++-------- 2 files changed, 49 insertions(+), 36 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js index a4d4596ee3..a6b6bf1cec 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js @@ -1,4 +1,9 @@ const assert = require('assert'); +const { CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + AbortMultipartUploadCommand, + ListPartsCommand } = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -21,37 +26,37 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on Azure data backend', this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() - .then(() => s3.createMultipartUpload({ + return s3.send(new CreateBucketCommand({ Bucket: azureContainerName })) + .then(() => s3.send(new CreateMultipartUploadCommand({ Bucket: azureContainerName, Key: this.currentTest.key, Metadata: { 'scal-location-constraint': azureLocation }, - }).promise()) + })) .then(res => { this.currentTest.uploadId = res.UploadId; - return s3.uploadPart({ Bucket: azureContainerName, + return s3.send(new UploadPartCommand({ Bucket: azureContainerName, Key: this.currentTest.key, PartNumber: 1, UploadId: this.currentTest.uploadId, Body: bodyFirstPart, - }).promise(); + })); }).then(res => { this.currentTest.firstEtag = res.ETag; - }).then(() => s3.uploadPart({ Bucket: azureContainerName, + }).then(() => s3.send(new UploadPartCommand({ Bucket: azureContainerName, Key: this.currentTest.key, PartNumber: 2, UploadId: this.currentTest.uploadId, Body: bodySecondPart, - }).promise()).then(res => { + }))).then(res => { this.currentTest.secondEtag = res.ETag; }) .catch(err => { process.stdout.write(`Error in beforeEach: ${err}\n`); throw err; - }); + })); }); afterEach(function afterEachFn() { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ + return s3.send(new AbortMultipartUploadCommand({ Bucket: azureContainerName, Key: this.currentTest.key, UploadId: this.currentTest.uploadId, - }).promise() + })) .then(() => bucketUtil.empty(azureContainerName)) .then(() => { process.stdout.write('Deleting bucket'); @@ -64,12 +69,10 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on Azure data backend', }); it('should list both parts', function itFn(done) { - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: azureContainerName, Key: this.test.key, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); + UploadId: this.test.uploadId })).then(data => { assert.strictEqual(data.Parts.length, 2); assert.strictEqual(data.Parts[0].PartNumber, 1); assert.strictEqual(data.Parts[0].Size, firstPartSize); @@ -78,21 +81,25 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on Azure data backend', assert.strictEqual(data.Parts[1].Size, secondPartSize); assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); done(); + }).catch(err => { + assert.equal(err, null, `Err listing parts: ${err}`); + done(err); }); }); it('should only list the second part', function itFn(done) { - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: azureContainerName, Key: this.test.key, PartNumberMarker: 1, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); + UploadId: this.test.uploadId })).then(data => { assert.strictEqual(data.Parts[0].PartNumber, 2); assert.strictEqual(data.Parts[0].Size, secondPartSize); assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); done(); + }).catch(err => { + assert.equal(err, null, `Err listing parts: ${err}`); + done(err); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js index 46edeee0d3..56a34b7ee5 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js @@ -1,5 +1,9 @@ const assert = require('assert'); - +const { CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + AbortMultipartUploadCommand, + ListPartsCommand } = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { describeSkipIfNotMultipleOrCeph, gcpLocation, genUniqID } @@ -20,23 +24,23 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on GCP data backend', () => { this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ + return s3.send(new CreateBucketCommand({ Bucket: bucket })) + .then(() => s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: this.currentTest.key, Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise()) + }))) .then(res => { this.currentTest.uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, + return s3.send(new UploadPartCommand({ Bucket: bucket, Key: this.currentTest.key, PartNumber: 1, UploadId: this.currentTest.uploadId, Body: bodyFirstPart, - }).promise(); + })); }).then(res => { this.currentTest.firstEtag = res.ETag; - }).then(() => s3.uploadPart({ Bucket: bucket, + }).then(() => s3.send(new UploadPartCommand({ Bucket: bucket, Key: this.currentTest.key, PartNumber: 2, UploadId: this.currentTest.uploadId, Body: bodySecondPart, - }).promise()) + }))) .then(res => { this.currentTest.secondEtag = res.ETag; }) @@ -48,10 +52,10 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on GCP data backend', () => { afterEach(function afterEachFn() { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ + return s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: this.currentTest.key, UploadId: this.currentTest.uploadId, - }).promise() + })) .then(() => bucketUtil.empty(bucket)) .then(() => { process.stdout.write('Deleting bucket'); @@ -64,12 +68,10 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on GCP data backend', () => { }); it('should list both parts', function itFn(done) { - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: bucket, Key: this.test.key, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); + UploadId: this.test.uploadId })).then(data => { assert.strictEqual(data.Parts.length, 2); assert.strictEqual(data.Parts[0].PartNumber, 1); assert.strictEqual(data.Parts[0].Size, firstPartSize); @@ -78,21 +80,25 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on GCP data backend', () => { assert.strictEqual(data.Parts[1].Size, secondPartSize); assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); done(); + }).catch(err => { + assert.equal(err, null, `Err listing parts: ${err}`); + done(err); }); }); it('should only list the second part', function itFn(done) { - s3.listParts({ + s3.send(new ListPartsCommand({ Bucket: bucket, Key: this.test.key, PartNumberMarker: 1, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); + UploadId: this.test.uploadId })).then(data => { assert.strictEqual(data.Parts[0].PartNumber, 2); assert.strictEqual(data.Parts[0].Size, secondPartSize); assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); done(); + }).catch(err => { + assert.equal(err, null, `Err listing parts: ${err}`); + done(err); }); }); }); From b00b66f8ad9d18f51a0fedd0dc27f62de5684807 Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:49:14 +0200 Subject: [PATCH 10/12] objectCopy related tests migration Issue: CLDSRV-724 --- .../multipleBackend/objectCopy/objectCopy.js | 197 +++++++++--------- 1 file changed, 93 insertions(+), 104 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js index c13ae83c7e..e260dda602 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js @@ -1,7 +1,12 @@ -const { promisify } = require('util'); const assert = require('assert'); -const async = require('async'); -const AWS = require('aws-sdk'); +const { + S3Client, + PutObjectCommand, + GetObjectCommand, + CopyObjectCommand, + PutObjectAclCommand, + CreateBucketCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const constants = require('../../../../../../constants'); @@ -24,7 +29,7 @@ const locMetaHeader = constants.objectLocationConstraintHeader.substring(11); let bucketUtil; let s3; -function putSourceObj(location, isEmptyObj, bucket, cb) { +async function putSourceObj(location, isEmptyObj, bucket) { const key = `somekey-${genUniqID()}`; const sourceParams = { Bucket: bucket, Key: key, Metadata: { @@ -38,32 +43,28 @@ function putSourceObj(location, isEmptyObj, bucket, cb) { sourceParams.Body = body; } process.stdout.write('Putting source object\n'); - s3.putObject(sourceParams, (err, result) => { - assert.equal(err, null, `Error putting source object: ${err}`); - if (isEmptyObj) { - assert.strictEqual(result.ETag, `"${emptyMD5}"`); - } else { - assert.strictEqual(result.ETag, `"${correctMD5}"`); - } - cb(key); - }); + const result = await s3.send(new PutObjectCommand(sourceParams)); + if (isEmptyObj) { + assert.strictEqual(result.ETag, `"${emptyMD5}"`); + } else { + assert.strictEqual(result.ETag, `"${correctMD5}"`); + } + return key; } -function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, -destBucket, destLoc, awsKey, mdDirective, isEmptyObj, awsS3, awsLocation, -callback) { +async function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, +destBucket, destLoc, awsKey, mdDirective, isEmptyObj, awsS3, awsLocation) { const awsBucket = config.locationConstraints[awsLocation].details.bucketName; const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey }; const destGetParams = { Bucket: destBucket, Key: destKey }; const awsParams = { Bucket: awsBucket, Key: awsKey }; - async.series([ - cb => s3.getObject(sourceGetParams, cb), - cb => s3.getObject(destGetParams, cb), - cb => awsS3.getObject(awsParams, cb), - ], (err, results) => { - assert.equal(err, null, `Error in assertGetObjects: ${err}`); - const [sourceRes, destRes, awsRes] = results; + + const [sourceRes, destRes, awsRes] = await Promise.all([ + s3.send(new GetObjectCommand(sourceGetParams)), + s3.send(new GetObjectCommand(destGetParams)), + awsS3.send(new GetObjectCommand(awsParams)), + ]); if (isEmptyObj) { assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); @@ -100,69 +101,63 @@ callback) { undefined); } } - assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); - assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); - assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); - callback(); - }); + assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); + assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); + assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); } describeSkipIfNotMultiple('MultipleBackend object copy: AWS', function testSuite() { this.timeout(250000); withV4(sigCfg => { - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; process.stdout.write('Creating bucket\n'); - s3.createBucketPromise = promisify(s3.createBucket); + if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - s3.createBucketPromise = createEncryptedBucketPromise; + await createEncryptedBucketPromise({ Bucket: bucket }); + await createEncryptedBucketPromise({ Bucket: awsServerSideEncryptionbucket }); + await createEncryptedBucketPromise({ Bucket: bucketAws }); + } else { + await s3.send(new CreateBucketCommand({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: memLocation, + }, + })); + + await s3.send(new CreateBucketCommand({ + Bucket: awsServerSideEncryptionbucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocationEncryption, + }, + })); + + await s3.send(new CreateBucketCommand({ + Bucket: bucketAws, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + })); } - return s3.createBucketPromise({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: memLocation, - }, - }) - .then(() => s3.createBucketPromise({ - Bucket: awsServerSideEncryptionbucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocationEncryption, - }, - })) - .then(() => s3.createBucketPromise({ Bucket: bucketAws, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - })) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(bucketAws)) - .then(() => bucketUtil.empty(awsServerSideEncryptionbucket)) - .then(() => { - process.stdout.write(`Deleting bucket ${bucket}\n`); - return bucketUtil.deleteOne(bucket); - }) - .then(() => { - process.stdout.write('Deleting bucket ' + - `${awsServerSideEncryptionbucket}\n`); - return bucketUtil.deleteOne(awsServerSideEncryptionbucket); - }) - .then(() => { - process.stdout.write(`Deleting bucket ${bucketAws}\n`); - return bucketUtil.deleteOne(bucketAws); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + await bucketUtil.empty(bucket); + await bucketUtil.empty(bucketAws); + await bucketUtil.empty(awsServerSideEncryptionbucket); + + process.stdout.write(`Deleting bucket ${bucket}\n`); + await bucketUtil.deleteOne(bucket); + + process.stdout.write('Deleting bucket ' + + `${awsServerSideEncryptionbucket}\n`); + await bucketUtil.deleteOne(awsServerSideEncryptionbucket); + + process.stdout.write(`Deleting bucket ${bucketAws}\n`); + await bucketUtil.deleteOne(bucketAws); }); it('should copy an object from mem to AWS relying on ' + @@ -484,42 +479,36 @@ function testSuite() { it('should copy an object on AWS to a different AWS location ' + 'with source object READ access', - done => { + async () => { const awsConfig2 = getRealAwsConfig(awsLocation2); - const awsS3Two = new AWS.S3(awsConfig2); + const awsS3Two = new S3Client(awsConfig2); const copyKey = `copyKey-${genUniqID()}`; const awsBucket = config.locationConstraints[awsLocation].details.bucketName; - async.waterfall([ - // giving access to the object on the AWS side - next => putSourceObj(awsLocation, false, bucket, key => - next(null, key)), - (key, next) => awsS3.putObjectAcl( - { Bucket: awsBucket, Key: key, - ACL: 'public-read' }, err => next(err, key)), - (key, next) => { - const copyParams = { - Bucket: bucket, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'REPLACE', - Metadata: { - 'scal-location-constraint': awsLocation2 }, - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success ' + - `but got error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - next(err, key); - }); - }, - (key, next) => - assertGetObjects(key, bucket, awsLocation, copyKey, - bucket, awsLocation2, copyKey, 'REPLACE', false, - awsS3Two, awsLocation2, next), - ], done); + + // giving access to the object on the AWS side + const key = await putSourceObj(awsLocation, false, bucket); + await awsS3.send(new PutObjectAclCommand({ + Bucket: awsBucket, + Key: key, + ACL: 'public-read' + })); + + const copyParams = { + Bucket: bucket, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'REPLACE', + Metadata: { + 'scal-location-constraint': awsLocation2 }, + }; + process.stdout.write('Copying object\n'); + const result = await s3.send(new CopyObjectCommand(copyParams)); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + + await assertGetObjects(key, bucket, awsLocation, copyKey, + bucket, awsLocation2, copyKey, 'REPLACE', false, + awsS3Two, awsLocation2); }); itSkipCeph('should return error AccessDenied copying an object on ' + From 4cc2689e8f3b4340354ecc8c21240b317308ce4f Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Tue, 30 Sep 2025 14:49:55 +0200 Subject: [PATCH 11/12] objectTagging related tests migration Issue: CLDSRV-724 --- .../taggingAwsVersioning-putget.js | 172 ++++++++++++++---- 1 file changed, 137 insertions(+), 35 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js index 9dd74a291f..bf85ed174b 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js @@ -1,4 +1,10 @@ const async = require('async'); +const { + CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + DeleteBucketCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); @@ -30,26 +36,34 @@ function testSuite() { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ - Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }, done)); - afterEach(done => { - removeAllVersions({ Bucket: bucket }, err => { - if (err) { - return done(err); - } - return s3.deleteBucket({ Bucket: bucket }, done); + + beforeEach(done => { + const command = new CreateBucketCommand({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, }); + s3.send(command) + .then(() => done()) + .catch(err => done(err)); + }); + + afterEach(async () => { + await removeAllVersions({ Bucket: bucket }); + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); }); it('versioning not configured: should put/get a tag set on the ' + 'latest version if no version is specified', done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, next), (versionId, next) => getTaggingAndAssert(s3, { bucket, key, @@ -63,7 +77,12 @@ function testSuite() { 'specific version if specified (null)', done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: 'null', expectedVersionId: false }, next), (versionId, next) => getTaggingAndAssert(s3, { bucket, key, @@ -110,7 +129,12 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), (s3Vid, awsVid, next) => putNullVersionsToAws(s3, bucket, key, @@ -131,7 +155,12 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: putData.VersionId }, next), (versionId, next) => getTaggingAndAssert(s3, { bucket, key, @@ -146,7 +175,12 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, next), @@ -163,7 +197,12 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, next), @@ -178,13 +217,25 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), // put another version - (s3Vid, awsVid, next) => s3.putObject({ Bucket: bucket, - Key: key, Body: someBody }, - err => next(err, s3Vid, awsVid)), + (s3Vid, awsVid, next) => { + const command = new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: someBody + }); + s3.send(command) + .then(() => next(null, s3Vid, awsVid)) + .catch(err => next(err, s3Vid, awsVid)); + }, (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, err => next(err, s3Vid, awsVid)), @@ -196,7 +247,6 @@ function testSuite() { ], done); }); - it('versioning suspended then enabled: should put/get a tag set on ' + 'a specific version (null) if specified', done => { const key = `somekey-${genUniqID()}`; @@ -222,12 +272,25 @@ function testSuite() { done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', next), (awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, () => next(null, awsVid)), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), + (awsVid, next) => { + const command = new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: awsVid + }); + awsS3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (delData, next) => getTaggingAndAssert(s3, { bucket, key, expectedTags: tags, expectedVersionId: false, getObject: false }, next), @@ -239,10 +302,23 @@ function testSuite() { done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', next), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), + (awsVid, next) => { + const command = new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: awsVid + }); + awsS3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (delData, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedError: 'ServiceUnavailable' }, next), ], done); @@ -254,14 +330,27 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, () => next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), + (s3Vid, awsVid, next) => { + const command = new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: awsVid + }); + awsS3.send(command) + .then(() => next(null, s3Vid)) + .catch(err => next(err, s3Vid)); + }, (s3Vid, next) => getTaggingAndAssert(s3, { bucket, key, versionId: s3Vid, expectedTags: tags, expectedVersionId: s3Vid, getObject: false }, next), @@ -273,11 +362,24 @@ function testSuite() { done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => { + const command = new PutObjectCommand({ Bucket: bucket, Key: key }); + s3.send(command) + .then(data => next(null, data)) + .catch(err => next(err)); + }, (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), + (s3Vid, awsVid, next) => { + const command = new DeleteObjectCommand({ + Bucket: awsBucket, + Key: key, + VersionId: awsVid + }); + awsS3.send(command) + .then(() => next(null, s3Vid)) + .catch(err => next(err, s3Vid)); + }, (s3Vid, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: s3Vid, expectedError: 'ServiceUnavailable' }, next), From d6bfd4b13f6b435a77b247198eb610905a42b17f Mon Sep 17 00:00:00 2001 From: Maha Benzekri Date: Thu, 9 Oct 2025 10:49:15 +0200 Subject: [PATCH 12/12] fixups post pre review --- .../lib/utility/versioning-util.js | 74 ++++++++++--------- .../test/multipleBackend/delete/delete.js | 35 +++++---- .../test/multipleBackend/get/get.js | 8 +- .../mpuComplete/mpuAwsVersioning.js | 11 ++- .../taggingAwsVersioning-delete.js | 59 +++++++++------ .../taggingAwsVersioning-putget.js | 13 +++- .../aws-node-sdk/test/object/copyPart.js | 2 +- 7 files changed, 117 insertions(+), 85 deletions(-) diff --git a/tests/functional/aws-node-sdk/lib/utility/versioning-util.js b/tests/functional/aws-node-sdk/lib/utility/versioning-util.js index d3e29628a3..d1c2fe496e 100644 --- a/tests/functional/aws-node-sdk/lib/utility/versioning-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/versioning-util.js @@ -1,10 +1,16 @@ const async = require('async'); const assert = require('assert'); -const { S3 } = require('aws-sdk'); +const { S3Client, + ListObjectVersionsCommand, + GetObjectCommand, + DeleteObjectsCommand, + PutBucketVersioningCommand, + PutObjectCommand, + DeleteObjectCommand } = require('@aws-sdk/client-s3'); const getConfig = require('../../test/support/config'); -const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const config = getConfig('default'); +const s3Client = new S3Client(config); const versioningEnabled = { Status: 'Enabled' }; const versioningSuspended = { Status: 'Suspended' }; @@ -19,28 +25,25 @@ function _deleteVersionList(versionList, bucket, callback) { Key: version.Key, VersionId: version.VersionId }); }); - return s3.deleteObjects(params, callback); + return s3Client.send(new DeleteObjectsCommand(params)).then(() => callback()).catch(err => callback(err)); } function checkOneVersion(s3, bucket, versionId, callback) { - return s3.listObjectVersions({ Bucket: bucket }, - (err, data) => { - if (err) { - callback(err); - } + return s3Client.send(new ListObjectVersionsCommand({ Bucket: bucket })).then(data => { assert.strictEqual(data.Versions.length, 1); if (versionId) { assert.strictEqual(data.Versions[0].VersionId, versionId); } assert.strictEqual(data.DeleteMarkers.length, 0); callback(); - }); + }).catch(err => callback(err)); } function removeAllVersions(params, callback) { const bucket = params.Bucket; async.waterfall([ - cb => s3.listObjectVersions(params, cb), + cb => s3Client.send(new ListObjectVersionsCommand(params)).then(data => + cb(null, data)).catch(err => cb(err)), (data, cb) => _deleteVersionList(data.DeleteMarkers, bucket, err => cb(err, data)), (data, cb) => _deleteVersionList(data.Versions, bucket, @@ -60,17 +63,17 @@ function removeAllVersions(params, callback) { } function suspendVersioning(bucket, callback) { - s3.putBucketVersioning({ + s3Client.send(new PutBucketVersioningCommand({ Bucket: bucket, VersioningConfiguration: versioningSuspended, - }, callback); + })).then(() => callback()).catch(err => callback(err)); } function enableVersioning(bucket, callback) { - s3.putBucketVersioning({ + s3Client.send(new PutBucketVersioningCommand({ Bucket: bucket, VersioningConfiguration: versioningEnabled, - }, callback); + })).then(() => callback()).catch(err => callback(err)); } function enableVersioningThenPutObject(bucket, object, callback) { @@ -78,7 +81,8 @@ function enableVersioningThenPutObject(bucket, object, callback) { if (err) { callback(err); } - s3.putObject({ Bucket: bucket, Key: object }, callback); + s3Client.send(new PutObjectCommand({ Bucket: bucket, Key: object })).then(() => + callback()).catch(err => callback(err)); }); } @@ -102,33 +106,35 @@ function enableVersioningThenPutObject(bucket, object, callback) { function createDualNullVersion(s3, bucketName, keyName, cb) { async.waterfall([ // put null version - next => s3.putObject({ Bucket: bucketName, Key: keyName }, - err => next(err)), + next => s3Client.send(new PutObjectCommand({ Bucket: bucketName, Key: keyName, Body: null })).then(() => + next()).catch(err => next(err)), next => enableVersioning(bucketName, err => next(err)), // should store null version as separate version before // putting new version - next => s3.putObject({ Bucket: bucketName, Key: keyName }, - (err, data) => { - assert.strictEqual(err, null, - 'Unexpected err putting new version'); - assert(data.VersionId); - next(null, data.VersionId); - }), + next => s3Client.send(new PutObjectCommand({ Bucket: bucketName, Key: keyName })).then(data => { + assert(data.VersionId); + next(null, data.VersionId); + }).catch(err => { + assert.strictEqual(err, null, + 'Unexpected err putting new version'); + next(err); + }), // delete version we just created, master version should be updated // with value of next most recent version: null version previously put - (versionId, next) => s3.deleteObject({ + (versionId, next) => s3Client.send(new DeleteObjectCommand({ Bucket: bucketName, Key: keyName, VersionId: versionId, - }, err => next(err)), + })).then(() => next()).catch(err => next(err)), // getting object should return null version now - next => s3.getObject({ Bucket: bucketName, Key: keyName }, - (err, data) => { - assert.strictEqual(err, null, - 'Unexpected err getting latest version'); - assert.strictEqual(data.VersionId, 'null'); - next(); - }), + next => s3Client.send(new GetObjectCommand({ Bucket: bucketName, Key: keyName })).then(data => { + assert.strictEqual(data.VersionId, 'null'); + next(); + }).catch(err => { + assert.strictEqual(err, null, + 'Unexpected err getting latest version'); + next(err); + }), ], err => cb(err)); } diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js index 752eb3aed7..67f93147d6 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js @@ -70,13 +70,21 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { Metadata: { 'scal-location-constraint': awsLocation } })); - process.stdout.write('Putting big object to AWS\n'); + process.stdout.write('Putting large object to AWS\n'); await s3.send(new PutObjectCommand({ Bucket: bucket, Key: bigObject, Body: bigBody, Metadata: { 'scal-location-constraint': awsLocation } })); + + process.stdout.write('Putting object to AWS\n'); + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationMismatch } + })); }); after(async () => { @@ -91,7 +99,7 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { try { await s3.send(new GetObjectCommand({ Bucket: bucket, Key: memObject })); - throw new Error('Expected NoSuchKey error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { assert.strictEqual(err.code, 'NoSuchKey'); } @@ -102,7 +110,7 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { try { await s3.send(new GetObjectCommand({ Bucket: bucket, Key: fileObject })); - throw new Error('Expected NoSuchKey error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { assert.strictEqual(err.code, 'NoSuchKey'); } @@ -113,7 +121,7 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { try { await s3.send(new GetObjectCommand({ Bucket: bucket, Key: awsObject })); - throw new Error('Expected NoSuchKey error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { assert.strictEqual(err.code, 'NoSuchKey'); } @@ -124,7 +132,7 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { try { await s3.send(new GetObjectCommand({ Bucket: bucket, Key: emptyObject })); - throw new Error('Expected NoSuchKey error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { assert.strictEqual(err.code, 'NoSuchKey'); } @@ -135,24 +143,23 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { try { await s3.send(new GetObjectCommand({ Bucket: bucket, Key: bigObject })); - throw new Error('Expected NoSuchKey error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { assert.strictEqual(err.code, 'NoSuchKey'); } }); - it('should return an InvalidLocationConstraint ' + - 'error for mismatch location', async () => { + it('should delete object from AWS location with bucketMatch set to ' + + 'false', async () => { try { - await s3.send(new PutObjectCommand({ + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: mismatchObject })); + await s3.send(new GetObjectCommand({ Bucket: bucket, - Key: mismatchObject, - Body: body, - Metadata: { 'scal-location-constraint': awsLocationMismatch } + Key: mismatchObject })); - throw new Error('Expected InvalidLocationConstraint error but got success'); + assert.fail('Expected NoSuchKey error but got success'); } catch (err) { - assert.strictEqual(err.code, 'InvalidLocationConstraint'); + assert.strictEqual(err.code, 'NoSuchKey'); } }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js index 93d4b5c2b1..944a139d20 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js @@ -71,9 +71,7 @@ describe('Multiple backend get object', function testSuite() { done => { const command = new GetObjectCommand({ Bucket: '', Key: 'somekey' }); s3.send(command) - .then(() => { - done(new Error('Expected failure but got success')); - }) + .then(() => done(new Error('Expected failure but got success'))) .catch(err => { assert.notEqual(err, null, 'Expected failure but got success'); @@ -85,9 +83,7 @@ describe('Multiple backend get object', function testSuite() { done => { const command = new GetObjectCommand({ Bucket: bucket, Key: 'nope' }); s3.send(command) - .then(() => { - done(new Error('Expected failure but got success')); - }) + .then(() => done(new Error('Expected failure but got success'))) .catch(err => { assert.notEqual(err, null, 'Expected failure but got success'); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js index 2da7f142cc..766208c85f 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js @@ -112,9 +112,14 @@ function testSuite() { LocationConstraint: awsLocation, }, })).then(() => done()).catch(err => done(err))); - afterEach(async () => { - await removeAllVersions({ Bucket: bucket }); - await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + afterEach(done => { + removeAllVersions({ Bucket: bucket }, err => { + if (err) { + return done(err); + } + return s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => done()).catch(done); + }); }); it('versioning not configured: should not return version id ' + diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js index bd3456bb86..52a2a91297 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js @@ -15,6 +15,11 @@ const { tagging, genUniqID, } = require('../utils'); +const { PutObjectCommand, + DeleteObjectCommand, + CreateBucketCommand, + DeleteBucketCommand, +} = require('@aws-sdk/client-s3'); const { putTaggingAndAssert, delTaggingAndAssert, awsGetAssertTags } = tagging; const bucket = `awsversioningtagdel${genUniqID()}`; @@ -28,18 +33,19 @@ function testSuite() { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ + beforeEach(done => s3.send(new CreateBucketCommand({ Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: awsLocation, }, - }, done)); + })).then(() => done()).catch(err => done(err))); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { if (err) { return done(err); } - return s3.deleteBucket({ Bucket: bucket }, done); + return s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => done()).catch(done); }); }); @@ -47,7 +53,8 @@ function testSuite() { 'latest version if no version is specified', done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(data => + next(null, data)).catch(err => next(err)), (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, next), (versionId, next) => delTaggingAndAssert(s3, { bucket, key, @@ -60,7 +67,8 @@ function testSuite() { 'version if specified (null)', done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(data => + next(null, data)).catch(err => next(err)), (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: 'null', expectedVersionId: false }, next), (versionId, next) => delTaggingAndAssert(s3, { bucket, key, @@ -103,7 +111,8 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(data => + next(null, data)).catch(err => next(err)), (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), (s3Vid, awsVid, next) => putNullVersionsToAws(s3, bucket, key, @@ -124,7 +133,8 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(putData => + next(null, putData)).catch(err => next(err)), (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: putData.VersionId }, next), (versionId, next) => delTaggingAndAssert(s3, { bucket, key, @@ -138,7 +148,8 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(putData => + next(null, putData)).catch(err => next(err)), (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, next), @@ -153,13 +164,14 @@ function testSuite() { const key = `somekey-${genUniqID()}`; async.waterfall([ next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(putData => + next(null, putData)).catch(err => next(err)), (putData, next) => awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), // put another version - (s3Vid, awsVid, next) => s3.putObject({ Bucket: bucket, - Key: key, Body: someBody }, - err => next(err, s3Vid, awsVid)), + (s3Vid, awsVid, next) => s3.send(new PutObjectCommand({ Bucket: bucket, + Key: key, Body: someBody })).then(() => + next(null, s3Vid, awsVid)).catch(err => next(err, s3Vid, awsVid)), (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, err => next(err, s3Vid, awsVid)), @@ -196,10 +208,11 @@ function testSuite() { done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(putData => + next(null, putData)).catch(err => next(err)), (putData, next) => awsGetLatestVerId(key, '', next), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), + (awsVid, next) => awsS3.send(new DeleteObjectCommand({ Bucket: awsBucket, + Key: key, VersionId: awsVid })).then(delData => next(null, delData)).catch(err => next(err)), (delData, next) => delTaggingAndAssert(s3, { bucket, key, expectedError: 'ServiceUnavailable' }, next), ], done); @@ -210,15 +223,15 @@ function testSuite() { done => { const key = `somekey-${genUniqID()}`; async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), - (s3Vid, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedError: 'ServiceUnavailable' }, - next), + next => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })).then(putData => + next(null, putData)).catch(err => next(err)), + (putData, next) => awsGetLatestVerId(key, '', next), + (awsVid, next) => awsS3.send(new DeleteObjectCommand({ Bucket: awsBucket, + Key: key, VersionId: awsVid })).then(delData => next(null, delData)).catch(err => next(err)), + (delData, next) => delTaggingAndAssert(s3, { bucket, key, + expectedError: 'ServiceUnavailable' }, next), ], done); }); + }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js index bf85ed174b..2906707b25 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js @@ -48,10 +48,15 @@ function testSuite() { .then(() => done()) .catch(err => done(err)); }); - - afterEach(async () => { - await removeAllVersions({ Bucket: bucket }); - await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + + afterEach(done => { + removeAllVersions({ Bucket: bucket }, err => { + if (err) { + return done(err); + } + return s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => done()).catch(done); + }); }); it('versioning not configured: should put/get a tag set on the ' + diff --git a/tests/functional/aws-node-sdk/test/object/copyPart.js b/tests/functional/aws-node-sdk/test/object/copyPart.js index f87780b706..1d48e1ba79 100644 --- a/tests/functional/aws-node-sdk/test/object/copyPart.js +++ b/tests/functional/aws-node-sdk/test/object/copyPart.js @@ -29,7 +29,7 @@ function checkNoError(err) { function checkError(err, code) { assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); + assert.strictEqual(err.name, code); } describe('Object Part Copy', () => {