diff --git a/lib/api/apiUtils/object/abortMultipartUpload.js b/lib/api/apiUtils/object/abortMultipartUpload.js index 40e1f00c5f..4f9e3c8cdc 100644 --- a/lib/api/apiUtils/object/abortMultipartUpload.js +++ b/lib/api/apiUtils/object/abortMultipartUpload.js @@ -3,8 +3,7 @@ const async = require('async'); const constants = require('../../../../constants'); const { data } = require('../../../data/wrapper'); const locationConstraintCheck = require('../object/locationConstraintCheck'); -const { standardMetadataValidateBucketAndObj } = - require('../../../metadata/metadataUtils'); +const metadataUtils = require('../../../metadata/metadataUtils'); const { validateQuotas } = require('../quotas/quotaUtils'); const services = require('../../../services'); const metadata = require('../../../metadata/wrapper'); @@ -30,7 +29,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, async.waterfall([ function checkDestBucketVal(next) { - standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, + metadataUtils.standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, (err, destinationBucket, objectMD) => { if (err) { log.error('error validating request', { error: err }); @@ -61,7 +60,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, }); }, function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD, - next) { + next) { const location = mpuOverviewObj.controllingLocationConstraint; const originalIdentityAuthzResults = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign @@ -93,13 +92,52 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, skipDataDelete); }); }, - function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { - if (!objectMD || metadataValMPUparams.uploadId !== objectMD.uploadId) { + // During Abort, we dynamically detect if the previous CompleteMPU call + // created potential object metadata wrongly, e.g. by creating + // an object version when some of the parts are missing. + // By passing a null objectMD, we tell the subsequent steps + // to skip the cleanup. + // Another approach is possible, but not supported by all backends: + // to honor the uploadId filter in standardMetadataValidateBucketAndObj + // ensuring the objMD returned has the right uploadId. But this is not + // supported by Metadata. + function findObjectToCleanup(mpuBucket, storedParts, destBucket, + objectMD, skipDataDelete, next) { + if (!objectMD) { + return next(null, mpuBucket, storedParts, destBucket, null, skipDataDelete); + } + + // If objectMD exists and has matching uploadId, use it directly + // This handles all non-versioned cases, and some versioned cases. + if (objectMD.uploadId === uploadId) { return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); } - // In case there has been an error during cleanup after a complete MPU - // (e.g. failure to delete MPU MD in shadow bucket), - // we need to ensure that the MPU metadata is deleted. + + // If bucket is not versioned, no need to check versions: + // as the uploadId is not the same, we skip the cleanup. + if (!destBucket.isVersioningEnabled()) { + return next(null, mpuBucket, storedParts, destBucket, null, skipDataDelete); + } + + // Otherwise, list all versions to find one with a matching uploadId. + return services.findObjectVersionByUploadId(bucketName, objectKey, uploadId, log, (err, foundVersion) => { + if (err) { + log.warn('error finding object version by uploadId, proceeding without cleanup', { + error: err, + method: 'abortMultipartUpload.findObjectToCleanup', + }); + // On error, continue the abort without an objectMD to clean up. + return next(null, mpuBucket, storedParts, destBucket, null, skipDataDelete); + } + return next(null, mpuBucket, storedParts, destBucket, foundVersion, skipDataDelete); + }); + }, + function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, + skipDataDelete, next) { + if (!objectMD) { + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); + } + log.debug('Object has existing metadata, deleting them', { method: 'abortMultipartUpload', bucketName, @@ -107,15 +145,29 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, uploadId, versionId: objectMD.versionId, }); - return metadata.deleteObjectMD(bucketName, objectKey, { versionId: objectMD.versionId }, log, err => { + return metadata.deleteObjectMD(bucketName, objectKey, { + versionId: objectMD.versionId, + }, log, err => { if (err) { - log.error('error deleting object metadata', { error: err }); + // Handle concurrent deletion of this object metadata + if (err.is?.NoSuchKey) { + log.debug('object metadata already deleted or does not exist', { + method: 'abortMultipartUpload', + bucketName, + objectKey, + versionId: objectMD.versionId, + }); + } else { + log.error('error deleting object metadata', { error: err }); + } } - return next(err, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); + // Continue with the operation regardless of deletion success/failure + // The important part is that we tried to clean up + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); }); }, function deleteData(mpuBucket, storedParts, destBucket, objectMD, - skipDataDelete, next) { + skipDataDelete, next) { if (skipDataDelete) { return next(null, mpuBucket, storedParts, destBucket); } @@ -126,16 +178,18 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, return next(null, mpuBucket, storedParts, destBucket); } - if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) { + // Add object data locations if they exist + if (objectMD?.location) { const existingLocations = new Set(locations.map(loc => loc.key)); - const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key)); + const remainingObjectLocations = objectMD. + location.filter(loc => !existingLocations.has(loc.key)); locations.push(...remainingObjectLocations); } return async.eachLimit(locations, 5, (loc, cb) => { data.delete(loc, log, err => { if (err) { - log.fatal('delete ObjectPart failed', { err }); + log.warn('delete ObjectPart failed', { err }); } cb(); }); diff --git a/lib/services.js b/lib/services.js index a312137e5e..ac742f1c17 100644 --- a/lib/services.js +++ b/lib/services.js @@ -6,6 +6,7 @@ const { errors, s3middleware } = require('arsenal'); const ObjectMD = require('arsenal').models.ObjectMD; const BucketInfo = require('arsenal').models.BucketInfo; const ObjectMDArchive = require('arsenal').models.ObjectMDArchive; +const { versioning } = require('arsenal'); const acl = require('./metadata/acl'); const constants = require('../constants'); const { config } = require('./Config'); @@ -418,6 +419,71 @@ const services = { }); }, + /** + * Finds a specific object version by its uploadId by listing and filtering all versions. + * This is used during MPU abort to clean up potentially orphaned object metadata. + * @param {string} bucketName - The name of the bucket. + * @param {string} objectKey - The key of the object. + * @param {string} uploadId - The uploadId to search for. + * @param {object} log - The logger instance. + * @param {function} cb - The callback, called with (err, foundVersion). + * @returns {undefined} + */ + findObjectVersionByUploadId(bucketName, objectKey, uploadId, log, cb) { + let keyMarker = null; + let versionIdMarker = null; + let foundVersion = null; + let shouldContinue = true; + + return async.whilst( + () => shouldContinue && !foundVersion, + callback => { + const listParams = { + listingType: 'DelimiterVersions', + // To only list the specific key, we need to add the versionId separator + prefix: `${objectKey}${versioning.VersioningConstants.VersionId.Separator}`, + maxKeys: 1000, + }; + + if (keyMarker) { + listParams.keyMarker = keyMarker; + } + if (versionIdMarker) { + listParams.versionIdMarker = versionIdMarker; + } + + return this.getObjectListing(bucketName, listParams, log, (err, listResponse) => { + if (err) { + log.error('error listing object versions', { error: err }); + return callback(err); + } + + // Check each version in current batch for matching uploadId + const matchedVersion = (listResponse.Versions || []).find(version => + version.key === objectKey && + version.value && + version.value.uploadId === uploadId + ); + + if (matchedVersion) { + foundVersion = matchedVersion.value; + } + + // Set up for next iteration if needed + if (listResponse.IsTruncated && !foundVersion) { + keyMarker = listResponse.NextKeyMarker; + versionIdMarker = listResponse.NextVersionIdMarker; + } else { + shouldContinue = false; + } + + return callback(); + }); + }, + err => cb(err, err ? null : foundVersion) + ); + }, + /** * Gets list of objects ready to be lifecycled * @param {object} bucketName - bucket in which objectMetadata is stored diff --git a/tests/functional/aws-node-sdk/test/object/abortMPU.js b/tests/functional/aws-node-sdk/test/object/abortMPU.js index 787d9d1304..8374ce5274 100644 --- a/tests/functional/aws-node-sdk/test/object/abortMPU.js +++ b/tests/functional/aws-node-sdk/test/object/abortMPU.js @@ -25,18 +25,21 @@ describe('Abort MPU', () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + .then(() => s3.createMultipartUpload({ + Bucket: bucket, + Key: key, + }).promise()) + .then(res => { + uploadId = res.UploadId; + return s3.uploadPart({ + Bucket: bucket, Key: key, + PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, + }).promise(); + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(() => @@ -45,23 +48,24 @@ describe('Abort MPU', () => { Key: key, UploadId: uploadId, }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => bucketUtil.deleteOne(bucket)) + .then(() => bucketUtil.empty(bucket)) + .then(() => bucketUtil.deleteOne(bucket)) ); // aws-sdk now (v2.363.0) returns 'UriParameterError' error // this test was not replaced in any other suite it.skip('should return InvalidRequest error if aborting without key', - done => { - s3.abortMultipartUpload({ - Bucket: bucket, - Key: '', - UploadId: uploadId }, - err => { - checkError(err, 'InvalidRequest', 'A key must be specified'); - done(); + done => { + s3.abortMultipartUpload({ + Bucket: bucket, + Key: '', + UploadId: uploadId + }, + err => { + checkError(err, 'InvalidRequest', 'A key must be specified'); + done(); + }); }); - }); }); }); @@ -273,16 +277,218 @@ describe('Abort MPU - No Such Upload', () => { afterEach(() => bucketUtil.deleteOne(bucket)); it('should return NoSuchUpload error when aborting non-existent mpu', - done => { - s3.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uuidv4().replace(/-/g, '') }, - err => { - assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchUpload'); - done(); + done => { + s3.abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uuidv4().replace(/-/g, '') + }, + err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'NoSuchUpload'); + done(); + }); }); + }); +}); + +describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { + this.timeout(30000); + + withV4(sigCfg => { + let bucketUtil; + let s3; + const bucketName = `abort-mpu-versioned-${Date.now()}`; + const objectKey = 'test-object-with-versions'; + + beforeEach(done => { + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + + async.series([ + next => s3.createBucket({ Bucket: bucketName }, next), + next => s3.putBucketVersioning({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' }, + }, next), + ], done); + }); + + afterEach(async () => { + // Clean up all multipart uploads + const listMPUResponse = await s3.listMultipartUploads({ Bucket: bucketName }).promise(); + await Promise.all(listMPUResponse.Uploads.map(upload => + s3.abortMultipartUpload({ + Bucket: bucketName, + Key: upload.Key, + UploadId: upload.UploadId, + }).promise().catch(err => { + if (err.code !== 'NoSuchUpload') { + throw err; + } + }), + )); + + // Clean up all object versions + const listVersionsResponse = await s3.listObjectVersions({ Bucket: bucketName }).promise(); + const allObjects = [ + ...listVersionsResponse.Versions, + ...listVersionsResponse.DeleteMarkers, + ]; + await Promise.all(allObjects.map(obj => + s3.deleteObject({ + Bucket: bucketName, + Key: obj.Key, + VersionId: obj.VersionId, + }).promise() + )); + + await bucketUtil.deleteOne(bucketName); + }); + + it('should handle aborting MPU with many versions of same object', done => { + const numberOfVersions = 5; + let currentVersion = 0; + let finalUploadId; + + // Create multiple versions of the same object + async.whilst( + () => currentVersion < numberOfVersions, + callback => { + currentVersion++; + const data = Buffer.from(`Version ${currentVersion} data`); + + async.waterfall([ + next => { + s3.createMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + }, (err, result) => { + assert.ifError(err); + if (currentVersion === numberOfVersions) { + finalUploadId = result.UploadId; // Save the last one for aborting + } + next(null, result.UploadId); + }); + }, + (uploadId, next) => { + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId, + Body: data, + }, (err, result) => { + assert.ifError(err); + next(null, uploadId, result.ETag); + }); + }, + (uploadId, etag, next) => { + if (currentVersion === numberOfVersions) { + // Don't complete the last one - we'll abort it + return next(); + } + + return s3.completeMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: etag, PartNumber: 1 }], + }, + }, next); + }, + ], callback); + }, + err => { + assert.ifError(err); + + // Now abort the final MPU + s3.abortMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: finalUploadId, + }, err => { + assert.ifError(err); + + // Verify we still have the correct number of completed versions + s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { + assert.ifError(err); + + const objectVersions = data.Versions.filter(v => v.Key === objectKey); + assert.strictEqual(objectVersions.length, numberOfVersions - 1, + `Expected ${numberOfVersions - 1} versions after abort, got ${objectVersions.length}`); + + done(); + }); + }); + } + ); + }); + + it('should handle abort MPU when object has no versions', done => { + let uploadId; + const data = Buffer.from('test data for single MPU abort'); + + async.waterfall([ + // Create and upload part for MPU + next => { + s3.createMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + }, (err, result) => { + assert.ifError(err); + uploadId = result.UploadId; + next(); + }); + }, + next => { + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId, + Body: data, + }, err => { + assert.ifError(err); + next(); + }); + }, + + // Abort the MPU + next => { + s3.abortMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId, + }, err => { + assert.ifError(err); + next(); + }); + }, + + // Verify no object exists + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, err => { + assert.notEqual(err, null, 'Expected NoSuchKey error'); + assert.strictEqual(err.code, 'NoSuchKey'); + next(); + }); + }, + + // Verify no versions exist + next => { + s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { + assert.ifError(err); + + const objectVersions = data.Versions.filter(v => v.Key === objectKey); + assert.strictEqual(objectVersions.length, 0, + `Expected 0 versions after abort, got ${objectVersions.length}`); + + next(); + }); + }, + ], done); }); }); }); diff --git a/tests/unit/api/apiUtils/object/abortMultipartUpload.spec.js b/tests/unit/api/apiUtils/object/abortMultipartUpload.spec.js new file mode 100644 index 0000000000..89c25a9bf7 --- /dev/null +++ b/tests/unit/api/apiUtils/object/abortMultipartUpload.spec.js @@ -0,0 +1,280 @@ +const assert = require('assert'); +const sinon = require('sinon'); +const { parseString } = require('xml2js'); +const { errors } = require('arsenal'); +const async = require('async'); +const crypto = require('crypto'); + +const abortMultipartUpload = require('../../../../../lib/api/apiUtils/object/abortMultipartUpload'); +const { bucketPut } = require('../../../../../lib/api/bucketPut'); +const initiateMultipartUpload = require('../../../../../lib/api/initiateMultipartUpload'); +const bucketPutVersioning = require('../../../../../lib/api/bucketPutVersioning'); +const objectPutPart = require('../../../../../lib/api/objectPutPart'); +const { data } = require('../../../../../lib/data/wrapper'); +const quotaUtils = require('../../../../../lib/api/apiUtils/quotas/quotaUtils'); +const services = require('../../../../../lib/services'); +const metadata = require('../../../../../lib/metadata/wrapper'); +const metadataUtils = require('../../../../../lib/metadata/metadataUtils'); +const { DummyRequestLogger, makeAuthInfo, cleanup, versioningTestUtils } = require('../../../helpers'); +const DummyRequest = require('../../../DummyRequest'); + +describe('abortMultipartUpload', () => { + const log = new DummyRequestLogger(); + const authInfo = makeAuthInfo('testCanonicalId'); + const bucketName = 'test-bucket'; + const objectKey = 'test-object'; + const postBody = Buffer.from('I am a part', 'utf8'); + + const bucketRequest = new DummyRequest({ + bucketName, + namespace: 'default', + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + }); + + const initiateRequest = new DummyRequest({ + bucketName, + namespace: 'default', + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }); + + const abortRequest = new DummyRequest({ + bucketName, + namespace: 'default', + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=test-upload-id`, + query: { uploadId: 'test-upload-id' }, + apiMethods: 'multipartDelete', + actionImplicitDenies: false, + accountQuotas: {}, + }); + + const enableVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); + + beforeEach(() => { + cleanup(); + sinon.stub(data, 'abortMPU').callsFake( + (objectKey, uploadId, location, bucketName, request, + destBucket, locationConstraintCheckFn, log, callback) => callback(null, false) + ); + sinon.stub(data, 'delete').yields(null); + sinon.stub(quotaUtils, 'validateQuotas').yields(null); + + sinon.stub(services, 'metadataValidateMultipart') + .yields(null, { + getName: () => 'mpu-shadow-bucket', + getMdBucketModelVersion: () => 2, + isVersioningEnabled: () => true, + }, { controllingLocationConstraint: 'us-east-1' }); + + sinon.stub(services, 'getMPUparts').yields(null, { Contents: [] }); + sinon.stub(services, 'batchDeleteObjectMetadata').yields(null); + }); + + afterEach(() => { + sinon.restore(); + }); + + function createBucketAndMPU(versioned, callback) { + async.waterfall([ + next => bucketPut(authInfo, bucketRequest, log, err => next(err)), + next => { + if (versioned) { + return bucketPutVersioning(authInfo, enableVersioningRequest, log, err => next(err)); + } + return next(); + }, + next => initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => next(err, result)), + (result, next) => parseString(result, (err, json) => + next(err, json.InitiateMultipartUploadResult.UploadId[0])), + ], callback); + } + + describe('basic functionality', () => { + it('should successfully abort multipart upload', done => { + createBucketAndMPU(false, (err, uploadId) => { + assert.ifError(err); + abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, err => { + assert.strictEqual(err, null); + done(); + }, { ...abortRequest, query: { uploadId } }); + }); + }); + + it('should return error for non-existent bucket', done => { + abortMultipartUpload(authInfo, 'non-existent-bucket', objectKey, 'fake-upload-id', log, err => { + assert(err); + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }, abortRequest); + }); + + it('should return error for non-existent upload', done => { + services.metadataValidateMultipart.yields(errors.NoSuchUpload); + bucketPut(authInfo, bucketRequest, log, err => { + assert.ifError(err); + abortMultipartUpload(authInfo, bucketName, objectKey, 'fake-upload-id', log, err => { + assert(err); + assert.strictEqual(err.is.NoSuchUpload, true); + done(); + }, abortRequest); + }); + }); + + it('should return error if data backend fails to abort', done => { + const testError = new Error('Data backend abort failed'); + // This stub is now more explicit to avoid side-effects. + data.abortMPU.callsFake((objKey, upId, loc, bucket, req, destB, locCheckFn, log, cb) => { + cb(testError); + }); + createBucketAndMPU(false, (err, uploadId) => { + assert.ifError(err); + abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, err => { + assert.deepStrictEqual(err, testError); + done(); + }, { ...abortRequest, query: { uploadId } }); + }); + }); + }); + + describe('with multipart upload parts', () => { + it('should delete part data when aborting', done => { + createBucketAndMPU(false, (err, uploadId) => { + assert.ifError(err); + const partRequest = new DummyRequest({ + bucketName, objectKey, namespace: 'default', + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { partNumber: '1', uploadId }, + calculatedHash: crypto.createHash('md5').update(postBody).digest('hex'), + }, postBody); + + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.ifError(err); + services.getMPUparts.yields(null, { + Contents: [{ + key: `1${uploadId}`, + value: { + Size: 11, + partLocations: [{ key: 'a-key' }], + }, + }], + }); + + abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, err => { + assert.strictEqual(err, null); + sinon.assert.called(data.delete); + done(); + }, { ...abortRequest, query: { uploadId } }); + }); + }); + }); + }); + + describe('orphaned version cleanup', () => { + let findObjectVersionStub; + + beforeEach(done => { + findObjectVersionStub = sinon.stub(services, 'findObjectVersionByUploadId'); + bucketPut(authInfo, bucketRequest, log, err => { + assert.ifError(err); + done(); + }); + }); + + it('should NOT search for orphans if master object does not exist', done => { + bucketPutVersioning(authInfo, enableVersioningRequest, log, err => { + assert.ifError(err); + abortMultipartUpload(authInfo, bucketName, objectKey, 'any-id', log, err => { + assert.ifError(err); + sinon.assert.notCalled(findObjectVersionStub); + done(); + }, abortRequest); + }); + }); + + it('should NOT search for orphans on non-versioned bucket with mismatched master uploadId', done => { + const standardMetadataValidateStub = sinon.stub(metadataUtils, 'standardMetadataValidateBucketAndObj'); + const mockBucket = { + isVersioningEnabled: () => false, + getOwner: () => 'testCanonicalId', + getName: () => bucketName, + }; + const mockMasterMD = { 'uploadId': 'master-id' }; + standardMetadataValidateStub.yields(null, mockBucket, mockMasterMD); + + abortMultipartUpload(authInfo, bucketName, objectKey, 'abort-id', log, err => { + assert.ifError(err); + sinon.assert.notCalled(findObjectVersionStub); + done(); + }, { ...abortRequest, query: { uploadId: 'abort-id' } }); + }); + + it('should proceed without cleanup if finding object version fails', done => { + const testError = new Error('Find version failed'); + findObjectVersionStub.yields(testError); + const deleteObjectMDStub = sinon.stub(metadata, 'deleteObjectMD').yields(null); + + const standardMetadataValidateStub = sinon.stub(metadataUtils, 'standardMetadataValidateBucketAndObj'); + const mockBucket = { + isVersioningEnabled: () => true, + getOwner: () => 'testCanonicalId', + getName: () => bucketName, + }; + const mockMasterMD = { 'uploadId': 'master-id' }; + standardMetadataValidateStub.yields(null, mockBucket, mockMasterMD); + + abortMultipartUpload(authInfo, bucketName, objectKey, 'abort-id', log, err => { + assert.ifError(err); + sinon.assert.calledOnce(findObjectVersionStub); + sinon.assert.notCalled(deleteObjectMDStub); + done(); + }, { ...abortRequest, query: { uploadId: 'abort-id' } }); + }); + + it('should delete the correct orphaned object version', done => { + const deleteObjectMDStub = sinon.stub(metadata, 'deleteObjectMD').yields(null); + findObjectVersionStub.yields(null, { uploadId: 'abort-id', versionId: 'orphan-vid' }); + + const standardMetadataValidateStub = sinon.stub(metadataUtils, 'standardMetadataValidateBucketAndObj'); + const mockBucket = { + isVersioningEnabled: () => true, + getOwner: () => 'testCanonicalId', + getName: () => bucketName, + }; + const mockMasterMD = { 'uploadId': 'master-id' }; + standardMetadataValidateStub.yields(null, mockBucket, mockMasterMD); + + abortMultipartUpload(authInfo, bucketName, objectKey, 'abort-id', log, err => { + assert.ifError(err); + sinon.assert.calledOnce(deleteObjectMDStub); + assert.strictEqual(deleteObjectMDStub.getCall(0).args[2].versionId, 'orphan-vid'); + done(); + }, { ...abortRequest, query: { uploadId: 'abort-id' } }); + }); + + it('should proceed if orphaned object version is already deleted (NoSuchKey)', done => { + const deleteObjectMDStub = sinon.stub(metadata, 'deleteObjectMD').yields(errors.NoSuchKey); + findObjectVersionStub.yields(null, { uploadId: 'abort-id', versionId: 'orphan-vid' }); + + const standardMetadataValidateStub = sinon.stub(metadataUtils, 'standardMetadataValidateBucketAndObj'); + const mockBucket = { + isVersioningEnabled: () => true, + getOwner: () => 'testCanonicalId', + getName: () => bucketName, + }; + const mockMasterMD = { 'uploadId': 'master-id' }; + standardMetadataValidateStub.yields(null, mockBucket, mockMasterMD); + + abortMultipartUpload(authInfo, bucketName, objectKey, 'abort-id', log, err => { + assert.ifError(err); + sinon.assert.calledOnce(deleteObjectMDStub); + done(); + }, { ...abortRequest, query: { uploadId: 'abort-id' } }); + }); + }); +}); diff --git a/tests/unit/lib/services.spec.js b/tests/unit/lib/services.spec.js new file mode 100644 index 0000000000..3c7b28fde7 --- /dev/null +++ b/tests/unit/lib/services.spec.js @@ -0,0 +1,157 @@ +const assert = require('assert'); +const sinon = require('sinon'); +const { versioning } = require('arsenal'); + +const services = require('../../../lib/services'); +const { DummyRequestLogger } = require('../helpers'); + +const { VersionId } = versioning.VersioningConstants; + +describe('services', () => { + const log = new DummyRequestLogger(); + const bucketName = 'test-bucket'; + const objectKey = 'test-object'; + + afterEach(() => { + sinon.restore(); + }); + + describe('findObjectVersionByUploadId', () => { + let getObjectListingStub; + + beforeEach(() => { + getObjectListingStub = sinon.stub(services, 'getObjectListing'); + }); + + it('should call getObjectListing with an optimized prefix', done => { + getObjectListingStub.yields(null, { Versions: [], IsTruncated: false }); + + services.findObjectVersionByUploadId(bucketName, objectKey, 'any-upload-id', log, err => { + assert.ifError(err); + sinon.assert.calledOnce(getObjectListingStub); + + const listParams = getObjectListingStub.getCall(0).args[1]; + const expectedPrefix = `${objectKey}${VersionId.Separator}`; + assert.strictEqual(listParams.prefix, expectedPrefix); + assert.strictEqual(listParams.listingType, 'DelimiterVersions'); + done(); + }); + }); + + it('should handle an error from getObjectListing', done => { + const testError = new Error('listing failed'); + getObjectListingStub.yields(testError); + + services.findObjectVersionByUploadId(bucketName, objectKey, 'any-upload-id', log, err => { + assert.deepStrictEqual(err, testError); + sinon.assert.calledOnce(getObjectListingStub); + done(); + }); + }); + + it('should return null if no matching version is found', done => { + getObjectListingStub.yields(null, { Versions: [], IsTruncated: false }); + + services.findObjectVersionByUploadId(bucketName, objectKey, 'any-upload-id', log, (err, foundVersion) => { + assert.ifError(err); + sinon.assert.calledOnce(getObjectListingStub); + assert.strictEqual(foundVersion, null); + done(); + }); + }); + + it('should find a version on the only page of results', done => { + const uploadIdToFind = 'the-correct-upload-id'; + const correctVersionValue = { uploadId: uploadIdToFind, data: 'this is it' }; + const versions = [ + { key: objectKey, value: { uploadId: 'some-other-id' } }, + // Version with a different key but same uploadId to test the key check + { key: 'another-object-key', value: { uploadId: uploadIdToFind } }, + { key: objectKey, value: correctVersionValue }, + ]; + getObjectListingStub.yields(null, { Versions: versions, IsTruncated: false }); + + services.findObjectVersionByUploadId(bucketName, objectKey, uploadIdToFind, log, (err, foundVersion) => { + assert.ifError(err); + sinon.assert.calledOnce(getObjectListingStub); + assert.deepStrictEqual(foundVersion, correctVersionValue); + done(); + }); + }); + + it('should read all pages if a matching version is not found', done => { + getObjectListingStub.onFirstCall().yields(null, { + Versions: [{ key: objectKey, value: { uploadId: 'id-page-1' } }], + IsTruncated: true, + NextKeyMarker: 'key-marker', + NextVersionIdMarker: 'version-marker', + }); + getObjectListingStub.onSecondCall().yields(null, { + Versions: [{ key: objectKey, value: { uploadId: 'id-page-2' } }], + IsTruncated: false, + }); + + services.findObjectVersionByUploadId(bucketName, objectKey, 'non-existent-upload-id', + log, (err, foundVersion) => { + assert.ifError(err); + sinon.assert.calledTwice(getObjectListingStub); + + const secondCallParams = getObjectListingStub.getCall(1).args[1]; + assert.strictEqual(secondCallParams.keyMarker, 'key-marker'); + assert.strictEqual(secondCallParams.versionIdMarker, 'version-marker'); + assert.strictEqual(foundVersion, null); + done(); + }); + }); + + it('should find a version on the first page of many and stop listing', done => { + const uploadIdToFind = 'the-correct-upload-id'; + const correctVersionValue = { uploadId: uploadIdToFind, data: 'this is it' }; + const versions = [{ key: objectKey, value: correctVersionValue }]; + + getObjectListingStub.onFirstCall().yields(null, { + Versions: versions, + IsTruncated: true, + NextKeyMarker: 'key-marker', + NextVersionIdMarker: 'version-marker', + }); + getObjectListingStub.onSecondCall().yields(new Error('should not have been called')); + + services.findObjectVersionByUploadId(bucketName, objectKey, uploadIdToFind, log, (err, foundVersion) => { + assert.ifError(err); + sinon.assert.calledOnce(getObjectListingStub); + assert.deepStrictEqual(foundVersion, correctVersionValue); + done(); + }); + }); + + it('should find a version on a subsequent page', done => { + const uploadIdToFind = 'the-correct-upload-id'; + const correctVersionValue = { uploadId: uploadIdToFind, data: 'this is it' }; + const secondPageVersions = [{ key: objectKey, value: correctVersionValue }]; + + getObjectListingStub.onFirstCall().yields(null, { + Versions: [{ key: objectKey, value: { uploadId: 'some-other-id' } }], + IsTruncated: true, + NextKeyMarker: 'key-marker', + NextVersionIdMarker: 'version-marker', + }); + getObjectListingStub.onSecondCall().yields(null, { + Versions: secondPageVersions, + IsTruncated: false, + }); + + services.findObjectVersionByUploadId(bucketName, objectKey, uploadIdToFind, log, (err, foundVersion) => { + assert.ifError(err); + sinon.assert.calledTwice(getObjectListingStub); + + const secondCallParams = getObjectListingStub.getCall(1).args[1]; + assert.strictEqual(secondCallParams.keyMarker, 'key-marker'); + assert.strictEqual(secondCallParams.versionIdMarker, 'version-marker'); + + assert.deepStrictEqual(foundVersion, correctVersionValue); + done(); + }); + }); + }); +});